diff --git a/src/.vuepress/sidebar/V2.0.x/en-Table.ts b/src/.vuepress/sidebar/V2.0.x/en-Table.ts index 2de43f1b0..cd15c118e 100644 --- a/src/.vuepress/sidebar/V2.0.x/en-Table.ts +++ b/src/.vuepress/sidebar/V2.0.x/en-Table.ts @@ -40,7 +40,7 @@ export const enSidebar = { { text: 'Common Concepts', link: 'Cluster-Concept_apache' }, { text: 'Timeseries Data Model', link: 'Navigating_Time_Series_Data' }, { text: 'Modeling Scheme Design', link: 'Data-Model-and-Terminology_apache' }, - { text: 'Data Type', link: 'Data-Type' }, + { text: 'Data Type', link: 'Data-Type_apache' }, ], }, { @@ -222,7 +222,7 @@ export const enSidebar = { collapsible: true, children: [ { text: 'overview', link: 'overview_apache' }, - { text: 'SELECT Clause', link: 'Select-Clause' }, + { text: 'SELECT Clause', link: 'Select-Clause_apache' }, { text: 'FROM&JOIN Clause', link: 'From-Join-Clause' }, { text: 'WHERE Clause', link: 'Where-Clause' }, { text: 'GROUP BY Clause', link: 'GroupBy-Clause' }, @@ -233,14 +233,14 @@ export const enSidebar = { { text: 'Nested Queries', link: 'Nested-Queries' }, ], }, - { text: 'Maintenance Statements', link: 'SQL-Maintenance-Statements' }, + { text: 'Maintenance Statements', link: 'SQL-Maintenance-Statements_apache' }, { text: 'Identifier', link: 'Identifier' }, { text: 'Keywords', link: 'Keywords' }, { text: 'Functions and Operators', collapsible: true, children: [ - { text: 'Basis Functions', link: 'Basis-Function' }, + { text: 'Basis Functions', link: 'Basis-Function_apache' }, { text: 'Featured Functions', link: 'Featured-Functions_apache' }, ], }, @@ -262,7 +262,7 @@ export const enSidebar = { prefix: 'Reference/', children: [ { text: 'Sample Data', link: 'Sample-Data' }, - { text: 'Config Manual', link: 'System-Config-Manual' }, + { text: 'Config Manual', link: 'System-Config-Manual_apache' }, { text: 'Status Codes', link: 'Status-Codes' }, { text: 'System Tables', link: 'System-Tables_apache' }, ], diff --git a/src/.vuepress/sidebar/V2.0.x/en-Tree.ts b/src/.vuepress/sidebar/V2.0.x/en-Tree.ts index b67724b0d..33a8f2897 100644 --- a/src/.vuepress/sidebar/V2.0.x/en-Tree.ts +++ b/src/.vuepress/sidebar/V2.0.x/en-Tree.ts @@ -46,7 +46,7 @@ export const enSidebar = { text: 'Modeling Scheme Design', link: 'Data-Model-and-Terminology_apache', }, - { text: 'Data Type', link: 'Data-Type' }, + { text: 'Data Type', link: 'Data-Type_apache' }, ], }, { @@ -138,7 +138,7 @@ export const enSidebar = { children: [ { text: 'Query Performance Analysis', link: 'Query-Performance-Analysis' }, { text: 'Cluster Maintenance', link: 'Load-Balance' }, - { text: 'Maintenance statement', link: 'Maintenance-commands' }, + { text: 'Maintenance statement', link: 'Maintenance-commands_apache' }, ], }, ], diff --git a/src/.vuepress/sidebar/V2.0.x/zh-Table.ts b/src/.vuepress/sidebar/V2.0.x/zh-Table.ts index 69c2879c5..7ac6579ac 100644 --- a/src/.vuepress/sidebar/V2.0.x/zh-Table.ts +++ b/src/.vuepress/sidebar/V2.0.x/zh-Table.ts @@ -40,7 +40,7 @@ export const zhSidebar = { { text: '常见概念', link: 'Cluster-Concept_apache' }, { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, { text: '建模方案设计', link: 'Data-Model-and-Terminology_apache' }, - { text: '数据类型', link: 'Data-Type' }, + { text: '数据类型', link: 'Data-Type_apache' }, ], }, { @@ -219,7 +219,7 @@ export const zhSidebar = { collapsible: true, children: [ { text: '概览', link: 'overview_apache' }, - { text: 'SELECT子句', link: 'Select-Clause' }, + { text: 'SELECT子句', link: 'Select-Clause_apache' }, { text: 'FROM&JOIN子句', link: 'From-Join-Clause' }, { text: 'WHERE子句', link: 'Where-Clause' }, { text: 'GROUP BY子句', link: 'GroupBy-Clause' }, @@ -230,14 +230,14 @@ export const zhSidebar = { { text: '嵌套查询', link: 'Nested-Queries' }, ], }, - { text: '运维语句', link: 'SQL-Maintenance-Statements' }, + { text: '运维语句', link: 'SQL-Maintenance-Statements_apache' }, { text: '标识符', link: 'Identifier' }, { text: '保留字&关键字', link: 'Keywords' }, { text: '函数与操作符', collapsible: true, children: [ - { text: '基础函数', link: 'Basis-Function' }, + { text: '基础函数', link: 'Basis-Function_apache' }, { text: '特色函数', link: 'Featured-Functions_apache' }, ], }, @@ -259,7 +259,7 @@ export const zhSidebar = { prefix: 'Reference/', children: [ { text: '示例数据', link: 'Sample-Data' }, - { text: '配置参数', link: 'System-Config-Manual' }, + { text: '配置参数', link: 'System-Config-Manual_apache' }, { text: '状态码', link: 'Status-Codes' }, { text: '系统表', link: 'System-Tables_apache' }, ], diff --git a/src/.vuepress/sidebar/V2.0.x/zh-Tree.ts b/src/.vuepress/sidebar/V2.0.x/zh-Tree.ts index 3e16287af..dbcaf68f4 100644 --- a/src/.vuepress/sidebar/V2.0.x/zh-Tree.ts +++ b/src/.vuepress/sidebar/V2.0.x/zh-Tree.ts @@ -40,7 +40,7 @@ export const zhSidebar = { { text: '常见概念', link: 'Cluster-Concept_apache' }, { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, { text: '建模方案设计', link: 'Data-Model-and-Terminology_apache' }, - { text: '数据类型', link: 'Data-Type' }, + { text: '数据类型', link: 'Data-Type_apache' }, ], }, { @@ -129,7 +129,7 @@ export const zhSidebar = { children: [ { text: '查询性能分析', link: 'Query-Performance-Analysis' }, { text: '集群维护', link: 'Load-Balance' }, - { text: '运维语句', link: 'Maintenance-statement' }, + { text: '运维语句', link: 'Maintenance-statement_apache' }, ], }, ], diff --git a/src/.vuepress/sidebar_timecho/V1.3.x/en.ts b/src/.vuepress/sidebar_timecho/V1.3.x/en.ts index 1034998ac..44de79af9 100644 --- a/src/.vuepress/sidebar_timecho/V1.3.x/en.ts +++ b/src/.vuepress/sidebar_timecho/V1.3.x/en.ts @@ -169,7 +169,7 @@ export const enSidebar = { children: [ { text: 'Query Performance Analysis', link: 'Query-Performance-Analysis' }, { text: 'Load Balance', link: 'Load-Balance' }, - { text: 'Maintenance statement', link: 'Maintenance-commands' }, + { text: 'Maintenance statement', link: 'Maintenance-commands_timecho' }, ], }, ], diff --git a/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts b/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts index 6ef9d8221..8b725e949 100644 --- a/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts +++ b/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts @@ -151,7 +151,7 @@ export const zhSidebar = { children: [ { text: '查询性能分析', link: 'Query-Performance-Analysis' }, { text: '负载均衡', link: 'Load-Balance' }, - { text: '运维语句', link: 'Maintenance-statement' }, + { text: '运维语句', link: 'Maintenance-statement_timecho' }, ], }, ], diff --git a/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts b/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts index e951a3e6b..0b33f4719 100644 --- a/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts +++ b/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts @@ -40,7 +40,7 @@ export const enSidebar = { { text: 'Common Concepts', link: 'Cluster-Concept_timecho' }, { text: 'Timeseries Data Model', link: 'Navigating_Time_Series_Data' }, { text: 'Modeling Scheme Design', link: 'Data-Model-and-Terminology_timecho' }, - { text: 'Data Type', link: 'Data-Type' }, + { text: 'Data Type', link: 'Data-Type_timecho' }, ], }, { @@ -83,7 +83,7 @@ export const enSidebar = { collapsible: true, children: [ { text: 'AINode Deployment(V2.0.5/6)', link: 'AINode_Deployment_timecho' }, - { text: 'AINode Deployment(V2.0.8-beta)', link: 'AINode_Deployment_Upgrade_timecho' }, + { text: 'AINode Deployment(V2.0.8)', link: 'AINode_Deployment_Upgrade_timecho' }, ], }, { @@ -161,9 +161,9 @@ export const enSidebar = { prefix: 'AI-capability/', children: [ { text: 'AINode(V2.0.5/6)', link: 'AINode_timecho' }, - { text: 'AINode(V2.0.8-beta)', link: 'AINode_Upgrade_timecho' }, + { text: 'AINode(V2.0.8)', link: 'AINode_Upgrade_timecho' }, { text: 'TimeSeries Large Model(V2.0.5/6)', link: 'TimeSeries-Large-Model' }, - { text: 'TimeSeries Large Model(V2.0.8-beta)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, + { text: 'TimeSeries Large Model(V2.0.8)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, ], }, { @@ -250,7 +250,7 @@ export const enSidebar = { collapsible: true, children: [ { text: 'overview', link: 'overview_timecho' }, - { text: 'SELECT Clause', link: 'Select-Clause' }, + { text: 'SELECT Clause', link: 'Select-Clause_timecho' }, { text: 'FROM&JOIN Clause', link: 'From-Join-Clause' }, { text: 'WHERE Clause', link: 'Where-Clause' }, { text: 'GROUP BY Clause', link: 'GroupBy-Clause' }, @@ -262,14 +262,14 @@ export const enSidebar = { { text: 'Pattern Query', link: 'Row-Pattern-Recognition_timecho' }, ], }, - { text: 'Maintenance Statements', link: 'SQL-Maintenance-Statements' }, + { text: 'Maintenance Statements', link: 'SQL-Maintenance-Statements_timecho' }, { text: 'Identifier', link: 'Identifier' }, { text: 'Keywords', link: 'Keywords' }, { text: 'Functions and Operators', collapsible: true, children: [ - { text: 'Basis Functions', link: 'Basis-Function' }, + { text: 'Basis Functions', link: 'Basis-Function_timecho' }, { text: 'Featured Functions', link: 'Featured-Functions_timecho' }, ], }, @@ -291,7 +291,7 @@ export const enSidebar = { prefix: 'Reference/', children: [ { text: 'Sample Data', link: 'Sample-Data' }, - { text: 'Config Manual', link: 'System-Config-Manual' }, + { text: 'Config Manual', link: 'System-Config-Manual_timecho' }, { text: 'Status Codes', link: 'Status-Codes' }, { text: 'System Tables', link: 'System-Tables_timecho' }, ], diff --git a/src/.vuepress/sidebar_timecho/V2.0.x/en-Tree.ts b/src/.vuepress/sidebar_timecho/V2.0.x/en-Tree.ts index c75f266e1..b523104d1 100644 --- a/src/.vuepress/sidebar_timecho/V2.0.x/en-Tree.ts +++ b/src/.vuepress/sidebar_timecho/V2.0.x/en-Tree.ts @@ -46,7 +46,7 @@ export const enSidebar = { text: 'Modeling Scheme Design', link: 'Data-Model-and-Terminology_timecho', }, - { text: 'Data Type', link: 'Data-Type' }, + { text: 'Data Type', link: 'Data-Type_timecho' }, ], }, { @@ -90,7 +90,7 @@ export const enSidebar = { collapsible: true, children: [ { text: 'AINode Deployment(V2.0.5/6)', link: 'AINode_Deployment_timecho' }, - { text: 'AINode Deployment(V2.0.8-beta)', link: 'AINode_Deployment_Upgrade_timecho' }, + { text: 'AINode Deployment(V2.0.8)', link: 'AINode_Deployment_Upgrade_timecho' }, ], }, { @@ -162,7 +162,7 @@ export const enSidebar = { children: [ { text: 'Query Performance Analysis', link: 'Query-Performance-Analysis' }, { text: 'Cluster Maintenance', link: 'Load-Balance' }, - { text: 'Maintenance statement', link: 'Maintenance-commands' }, + { text: 'Maintenance statement', link: 'Maintenance-commands_timecho' }, ], }, ], @@ -173,9 +173,9 @@ export const enSidebar = { prefix: 'AI-capability/', children: [ { text: 'AINode(V2.0.5/6)', link: 'AINode_timecho' }, - { text: 'AINode(V2.0.8-beta)', link: 'AINode_Upgrade_timecho' }, + { text: 'AINode(V2.0.8)', link: 'AINode_Upgrade_timecho' }, { text: 'TimeSeries Large Model(V2.0.5/6)', link: 'TimeSeries-Large-Model' }, - { text: 'TimeSeries Large Model(V2.0.8-beta)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, + { text: 'TimeSeries Large Model(V2.0.8)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, ], }, { diff --git a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts index 3cd633c1e..340e66fb1 100644 --- a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts +++ b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts @@ -40,7 +40,7 @@ export const zhSidebar = { { text: '常见概念', link: 'Cluster-Concept_timecho' }, { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, { text: '建模方案设计', link: 'Data-Model-and-Terminology_timecho' }, - { text: '数据类型', link: 'Data-Type' }, + { text: '数据类型', link: 'Data-Type_timecho' }, ], }, { @@ -77,7 +77,7 @@ export const zhSidebar = { collapsible: true, children: [ { text: 'AINode 部署(V2.0.5/6)', link: 'AINode_Deployment_timecho' }, - { text: 'AINode 部署(V2.0.8-beta)', link: 'AINode_Deployment_Upgrade_timecho' }, + { text: 'AINode 部署(V2.0.8)', link: 'AINode_Deployment_Upgrade_timecho' }, ], }, { @@ -152,9 +152,9 @@ export const zhSidebar = { prefix: 'AI-capability/', children: [ { text: 'AINode(V2.0.5/6)', link: 'AINode_timecho' }, - { text: 'AINode(V2.0.8-beta)', link: 'AINode_Upgrade_timecho' }, + { text: 'AINode(V2.0.8)', link: 'AINode_Upgrade_timecho' }, { text: '时序大模型(V2.0.5/6)', link: 'TimeSeries-Large-Model' }, - { text: '时序大模型(V2.0.8-beta)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, + { text: '时序大模型(V2.0.8)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, ], }, { @@ -241,7 +241,7 @@ export const zhSidebar = { collapsible: true, children: [ { text: '概览', link: 'overview_timecho' }, - { text: 'SELECT子句', link: 'Select-Clause' }, + { text: 'SELECT子句', link: 'Select-Clause_timecho' }, { text: 'FROM&JOIN子句', link: 'From-Join-Clause' }, { text: 'WHERE子句', link: 'Where-Clause' }, { text: 'GROUP BY子句', link: 'GroupBy-Clause' }, @@ -253,14 +253,14 @@ export const zhSidebar = { { text: '模式查询', link: 'Row-Pattern-Recognition_timecho' }, ], }, - { text: '运维语句', link: 'SQL-Maintenance-Statements' }, + { text: '运维语句', link: 'SQL-Maintenance-Statements_timecho' }, { text: '标识符', link: 'Identifier' }, { text: '保留字&关键字', link: 'Keywords' }, { text: '函数与操作符', collapsible: true, children: [ - { text: '基础函数', link: 'Basis-Function' }, + { text: '基础函数', link: 'Basis-Function_timecho' }, { text: '特色函数', link: 'Featured-Functions_timecho' }, ], }, @@ -282,7 +282,7 @@ export const zhSidebar = { prefix: 'Reference/', children: [ { text: '示例数据', link: 'Sample-Data' }, - { text: '配置参数', link: 'System-Config-Manual' }, + { text: '配置参数', link: 'System-Config-Manual_timecho' }, { text: '状态码', link: 'Status-Codes' }, { text: '系统表', link: 'System-Tables_timecho' }, ], diff --git a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Tree.ts b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Tree.ts index 92310f783..35a17c9cb 100644 --- a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Tree.ts +++ b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Tree.ts @@ -40,7 +40,7 @@ export const zhSidebar = { { text: '常见概念', link: 'Cluster-Concept_timecho' }, { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, { text: '建模方案设计', link: 'Data-Model-and-Terminology_timecho' }, - { text: '数据类型', link: 'Data-Type' }, + { text: '数据类型', link: 'Data-Type_timecho' }, ], }, { @@ -78,7 +78,7 @@ export const zhSidebar = { collapsible: true, children: [ { text: 'AINode 部署(V2.0.5/6)', link: 'AINode_Deployment_timecho' }, - { text: 'AINode 部署(V2.0.8-beta)', link: 'AINode_Deployment_Upgrade_timecho' }, + { text: 'AINode 部署(V2.0.8)', link: 'AINode_Deployment_Upgrade_timecho' }, ], }, { @@ -144,7 +144,7 @@ export const zhSidebar = { children: [ { text: '查询性能分析', link: 'Query-Performance-Analysis' }, { text: '集群维护', link: 'Load-Balance' }, - { text: '运维语句', link: 'Maintenance-statement' }, + { text: '运维语句', link: 'Maintenance-statement_timecho' }, ], }, ], @@ -155,9 +155,9 @@ export const zhSidebar = { prefix: 'AI-capability/', children: [ { text: 'AINode(V2.0.5/6)', link: 'AINode_timecho' }, - { text: 'AINode(V2.0.8-beta)', link: 'AINode_Upgrade_timecho' }, + { text: 'AINode(V2.0.8)', link: 'AINode_Upgrade_timecho' }, { text: '时序大模型(V2.0.5/6)', link: 'TimeSeries-Large-Model' }, - { text: '时序大模型(V2.0.8-beta)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, + { text: '时序大模型(V2.0.8)', link: 'TimeSeries-Large-Model_Upgrade_timecho' }, ], }, { diff --git a/src/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md b/src/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md index eff3f0974..89805d8fd 100644 --- a/src/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md +++ b/src/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md @@ -65,7 +65,7 @@ The `ITableSession` interface defines basic operations for interacting with IoTD **Description of Object Data Type:** -Since V2.0.8-beta, the `iTableSession.insert(Tablet tablet)` interface supports splitting a single Object-class file into multiple segments and writing them sequentially in order. When the column data type in the Tablet data structure is **`TSDataType.Object`**, you need to use the following method to populate the Tablet: +Since V2.0.8, the `iTableSession.insert(Tablet tablet)` interface supports splitting a single Object-class file into multiple segments and writing them sequentially in order. When the column data type in the Tablet data structure is **`TSDataType.Object`**, you need to use the following method to populate the Tablet: ```Java /* diff --git a/src/UserGuide/Master/Table/Background-knowledge/Data-Type.md b/src/UserGuide/Master/Table/Background-knowledge/Data-Type.md index 3ebd7d16f..49bc408ab 100644 --- a/src/UserGuide/Master/Table/Background-knowledge/Data-Type.md +++ b/src/UserGuide/Master/Table/Background-knowledge/Data-Type.md @@ -1,3 +1,6 @@ +--- +redirectTo: Data-Type_apache.html +--- - -# Data Type - -## 1. Basic Data Types - -IoTDB supports the following ten data types: - -- **BOOLEAN** (Boolean value) -- **INT32** (32-bit integer) -- **INT64** (64-bit integer) -- **FLOAT** (Single-precision floating-point number) -- **DOUBLE** (Double-precision floating-point number) -- **TEXT** (Text data, suitable for long strings) -- **STRING** (String data with additional statistical information for optimized queries) -- **BLOB** (Large binary object) -- **OBJECT** (Large Binary Object) - > Supported since V2.0.8-beta -- **TIMESTAMP** (Timestamp, representing precise moments in time) -- **DATE** (Date, storing only calendar date information) - -The difference between **STRING** and **TEXT**: - -- **STRING** stores text data and includes additional statistical information to optimize value-filtering queries. -- **TEXT** is suitable for storing long text strings without additional query optimization. - -The differences between **OBJECT** and **BLOB** types are as follows: - -| | **OBJECT** | **BLOB** | -|----------------------|-------------------------------------------------------------------------------------------------------------------------|--------------------------------------| -| **Write Amplification** (Lower is better) | Low (Write amplification factor is always 1) | High (Write amplification factor = 2 + number of merges) | -| **Space Amplification** (Lower is better) | Low (Merge & release on write) | High (Merge on read and release on compact) | -| **Query Results** | When querying an OBJECT column by default, returns metadata like: `(Object) XX.XX KB`. Actual OBJECT data storage path: `${data_dir}/object_data`. Use `READ_OBJECT` function to retrieve raw content | Directly returns raw binary content | - -### 1.1 Floating-Point Precision Configuration - -For **FLOAT** and **DOUBLE** series using **RLE** or **TS_2DIFF** encoding, the number of decimal places can be set via the **MAX_POINT_NUMBER** attribute during series creation. - -For example: - -```SQL -CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; -``` - -If not specified, the system will use the configuration in the `iotdb-system.properties` file under the `float_precision` item (default is 2 decimal places). - -### 1.2 Data Type Compatibility - -If the written data type does not match the registered data type of a series: - -- **Incompatible types** → The system will issue an error. -- **Compatible types** → The system will automatically convert the written data type to match the registered type. - -The compatibility of data types is shown in the table below: - -| Registered Data Type | Compatible Write Data Types | -|:---------------------|:---------------------------------------| -| BOOLEAN | BOOLEAN | -| INT32 | INT32 | -| INT64 | INT32, INT64, TIMESTAMP | -| FLOAT | INT32, FLOAT | -| DOUBLE | INT32, INT64, FLOAT, DOUBLE, TIMESTAMP | -| TEXT | TEXT, STRING | -| STRING | TEXT, STRING | -| BLOB | TEXT, STRING, BLOB | -| OBJECT | OBJECT | -| TIMESTAMP | INT32, INT64, TIMESTAMP | -| DATE | DATE | - -## 2. Timestamp Types - -A timestamp represents the moment when data is recorded. IoTDB supports two types: - -- **Absolute timestamps**: Directly specify a point in time. -- **Relative timestamps**: Define time offsets from a reference point (e.g., `now()`). - -### 2.1 Absolute Timestamp - -IoTDB supports timestamps in two formats: - -1. **LONG**: Milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). -2. **DATETIME**: Human-readable date-time strings. (including **DATETIME-INPUT** and **DATETIME-DISPLAY** subcategories). - -When entering a timestamp, users can use either a LONG value or a DATETIME string. Supported input formats include: - -
- -**DATETIME-INPUT Type Supports Format** - - -| format | -| :--------------------------- | -| yyyy-MM-dd HH:mm:ss | -| yyyy/MM/dd HH:mm:ss | -| yyyy.MM.dd HH:mm:ss | -| yyyy-MM-dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ssZZ | -| yyyy.MM.dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSS | -| yyyy.MM.dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSSZZ | -| yyyy/MM/dd HH:mm:ss.SSSZZ | -| yyyy.MM.dd HH:mm:ss.SSSZZ | -| ISO8601 standard time format | - - -
- -> **Note:** `ZZ` represents a time zone offset (e.g., `+0800` for Beijing Time, `-0500` for Eastern Standard Time). - -IoTDB supports timestamp display in **LONG** format or **DATETIME-DISPLAY** format, allowing users to customize time output. - -
- -**Syntax for Custom Time Formats in DATETIME-DISPLAY** - - -| Symbol | Meaning | Presentation | Examples | -| :----: | :-------------------------: | :----------: | :--------------------------------: | -| G | era | era | era | -| C | century of era (>=0) | number | 20 | -| Y | year of era (>=0) | year | 1996 | -| | | | | -| x | weekyear | year | 1996 | -| w | week of weekyear | number | 27 | -| e | day of week | number | 2 | -| E | day of week | text | Tuesday; Tue | -| | | | | -| y | year | year | 1996 | -| D | day of year | number | 189 | -| M | month of year | month | July; Jul; 07 | -| d | day of month | number | 10 | -| | | | | -| a | halfday of day | text | PM | -| K | hour of halfday (0~11) | number | 0 | -| h | clockhour of halfday (1~12) | number | 12 | -| | | | | -| H | hour of day (0~23) | number | 0 | -| k | clockhour of day (1~24) | number | 24 | -| m | minute of hour | number | 30 | -| s | second of minute | number | 55 | -| S | fraction of second | millis | 978 | -| | | | | -| z | time zone | text | Pacific Standard Time; PST | -| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | -| | | | | -| ' | escape for text | delimiter | | -| '' | single quote | literal | ' | - -
- -### 2.2 Relative Timestamp - -Relative timestamps allow specifying time offsets from **now()** or a **DATETIME** reference. - -The formal definition is: - -```Plain -Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ -RelativeTime = (now() | DATETIME) ((+|-) Duration)+ -``` - -
- - **The syntax of the duration unit** - - - | Symbol | Meaning | Presentation | Examples | - | :----: | :---------: | :----------------------: | :------: | - | y | year | 1y=365 days | 1y | - | mo | month | 1mo=30 days | 1mo | - | w | week | 1w=7 days | 1w | - | d | day | 1d=1 day | 1d | - | | | | | - | h | hour | 1h=3600 seconds | 1h | - | m | minute | 1m=60 seconds | 1m | - | s | second | 1s=1 second | 1s | - | | | | | - | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | - | us | microsecond | 1us=1000 nanoseconds | 1us | - | ns | nanosecond | 1ns=1 nanosecond | 1ns | - -
- -**Examples:** - -```Plain -now() - 1d2h // A time 1 day and 2 hours earlier than the server time -now() - 1w // A time 1 week earlier than the server time -``` - -> **Note:** There must be spaces on both sides of `+` and `-` operators. \ No newline at end of file diff --git a/src/UserGuide/Master/Table/Background-knowledge/Data-Type_apache.md b/src/UserGuide/Master/Table/Background-knowledge/Data-Type_apache.md new file mode 100644 index 000000000..3ebd7d16f --- /dev/null +++ b/src/UserGuide/Master/Table/Background-knowledge/Data-Type_apache.md @@ -0,0 +1,212 @@ + + +# Data Type + +## 1. Basic Data Types + +IoTDB supports the following ten data types: + +- **BOOLEAN** (Boolean value) +- **INT32** (32-bit integer) +- **INT64** (64-bit integer) +- **FLOAT** (Single-precision floating-point number) +- **DOUBLE** (Double-precision floating-point number) +- **TEXT** (Text data, suitable for long strings) +- **STRING** (String data with additional statistical information for optimized queries) +- **BLOB** (Large binary object) +- **OBJECT** (Large Binary Object) + > Supported since V2.0.8-beta +- **TIMESTAMP** (Timestamp, representing precise moments in time) +- **DATE** (Date, storing only calendar date information) + +The difference between **STRING** and **TEXT**: + +- **STRING** stores text data and includes additional statistical information to optimize value-filtering queries. +- **TEXT** is suitable for storing long text strings without additional query optimization. + +The differences between **OBJECT** and **BLOB** types are as follows: + +| | **OBJECT** | **BLOB** | +|----------------------|-------------------------------------------------------------------------------------------------------------------------|--------------------------------------| +| **Write Amplification** (Lower is better) | Low (Write amplification factor is always 1) | High (Write amplification factor = 2 + number of merges) | +| **Space Amplification** (Lower is better) | Low (Merge & release on write) | High (Merge on read and release on compact) | +| **Query Results** | When querying an OBJECT column by default, returns metadata like: `(Object) XX.XX KB`. Actual OBJECT data storage path: `${data_dir}/object_data`. Use `READ_OBJECT` function to retrieve raw content | Directly returns raw binary content | + +### 1.1 Floating-Point Precision Configuration + +For **FLOAT** and **DOUBLE** series using **RLE** or **TS_2DIFF** encoding, the number of decimal places can be set via the **MAX_POINT_NUMBER** attribute during series creation. + +For example: + +```SQL +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +If not specified, the system will use the configuration in the `iotdb-system.properties` file under the `float_precision` item (default is 2 decimal places). + +### 1.2 Data Type Compatibility + +If the written data type does not match the registered data type of a series: + +- **Incompatible types** → The system will issue an error. +- **Compatible types** → The system will automatically convert the written data type to match the registered type. + +The compatibility of data types is shown in the table below: + +| Registered Data Type | Compatible Write Data Types | +|:---------------------|:---------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32, INT64, TIMESTAMP | +| FLOAT | INT32, FLOAT | +| DOUBLE | INT32, INT64, FLOAT, DOUBLE, TIMESTAMP | +| TEXT | TEXT, STRING | +| STRING | TEXT, STRING | +| BLOB | TEXT, STRING, BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32, INT64, TIMESTAMP | +| DATE | DATE | + +## 2. Timestamp Types + +A timestamp represents the moment when data is recorded. IoTDB supports two types: + +- **Absolute timestamps**: Directly specify a point in time. +- **Relative timestamps**: Define time offsets from a reference point (e.g., `now()`). + +### 2.1 Absolute Timestamp + +IoTDB supports timestamps in two formats: + +1. **LONG**: Milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). +2. **DATETIME**: Human-readable date-time strings. (including **DATETIME-INPUT** and **DATETIME-DISPLAY** subcategories). + +When entering a timestamp, users can use either a LONG value or a DATETIME string. Supported input formats include: + +
+ +**DATETIME-INPUT Type Supports Format** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ +> **Note:** `ZZ` represents a time zone offset (e.g., `+0800` for Beijing Time, `-0500` for Eastern Standard Time). + +IoTDB supports timestamp display in **LONG** format or **DATETIME-DISPLAY** format, allowing users to customize time output. + +
+ +**Syntax for Custom Time Formats in DATETIME-DISPLAY** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 Relative Timestamp + +Relative timestamps allow specifying time offsets from **now()** or a **DATETIME** reference. + +The formal definition is: + +```Plain +Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ +RelativeTime = (now() | DATETIME) ((+|-) Duration)+ +``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ +**Examples:** + +```Plain +now() - 1d2h // A time 1 day and 2 hours earlier than the server time +now() - 1w // A time 1 week earlier than the server time +``` + +> **Note:** There must be spaces on both sides of `+` and `-` operators. \ No newline at end of file diff --git a/src/UserGuide/Master/Table/Background-knowledge/Data-Type_timecho.md b/src/UserGuide/Master/Table/Background-knowledge/Data-Type_timecho.md new file mode 100644 index 000000000..e455155e6 --- /dev/null +++ b/src/UserGuide/Master/Table/Background-knowledge/Data-Type_timecho.md @@ -0,0 +1,212 @@ + + +# Data Type + +## 1. Basic Data Types + +IoTDB supports the following ten data types: + +- **BOOLEAN** (Boolean value) +- **INT32** (32-bit integer) +- **INT64** (64-bit integer) +- **FLOAT** (Single-precision floating-point number) +- **DOUBLE** (Double-precision floating-point number) +- **TEXT** (Text data, suitable for long strings) +- **STRING** (String data with additional statistical information for optimized queries) +- **BLOB** (Large binary object) +- **OBJECT** (Large Binary Object) + > Supported since V2.0.8 +- **TIMESTAMP** (Timestamp, representing precise moments in time) +- **DATE** (Date, storing only calendar date information) + +The difference between **STRING** and **TEXT**: + +- **STRING** stores text data and includes additional statistical information to optimize value-filtering queries. +- **TEXT** is suitable for storing long text strings without additional query optimization. + +The differences between **OBJECT** and **BLOB** types are as follows: + +| | **OBJECT** | **BLOB** | +|----------------------|-------------------------------------------------------------------------------------------------------------------------|--------------------------------------| +| **Write Amplification** (Lower is better) | Low (Write amplification factor is always 1) | High (Write amplification factor = 2 + number of merges) | +| **Space Amplification** (Lower is better) | Low (Merge & release on write) | High (Merge on read and release on compact) | +| **Query Results** | When querying an OBJECT column by default, returns metadata like: `(Object) XX.XX KB`. Actual OBJECT data storage path: `${data_dir}/object_data`. Use `READ_OBJECT` function to retrieve raw content | Directly returns raw binary content | + +### 1.1 Floating-Point Precision Configuration + +For **FLOAT** and **DOUBLE** series using **RLE** or **TS_2DIFF** encoding, the number of decimal places can be set via the **MAX_POINT_NUMBER** attribute during series creation. + +For example: + +```SQL +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +If not specified, the system will use the configuration in the `iotdb-system.properties` file under the `float_precision` item (default is 2 decimal places). + +### 1.2 Data Type Compatibility + +If the written data type does not match the registered data type of a series: + +- **Incompatible types** → The system will issue an error. +- **Compatible types** → The system will automatically convert the written data type to match the registered type. + +The compatibility of data types is shown in the table below: + +| Registered Data Type | Compatible Write Data Types | +|:---------------------|:---------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32, INT64, TIMESTAMP | +| FLOAT | INT32, FLOAT | +| DOUBLE | INT32, INT64, FLOAT, DOUBLE, TIMESTAMP | +| TEXT | TEXT, STRING | +| STRING | TEXT, STRING | +| BLOB | TEXT, STRING, BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32, INT64, TIMESTAMP | +| DATE | DATE | + +## 2. Timestamp Types + +A timestamp represents the moment when data is recorded. IoTDB supports two types: + +- **Absolute timestamps**: Directly specify a point in time. +- **Relative timestamps**: Define time offsets from a reference point (e.g., `now()`). + +### 2.1 Absolute Timestamp + +IoTDB supports timestamps in two formats: + +1. **LONG**: Milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). +2. **DATETIME**: Human-readable date-time strings. (including **DATETIME-INPUT** and **DATETIME-DISPLAY** subcategories). + +When entering a timestamp, users can use either a LONG value or a DATETIME string. Supported input formats include: + +
+ +**DATETIME-INPUT Type Supports Format** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ +> **Note:** `ZZ` represents a time zone offset (e.g., `+0800` for Beijing Time, `-0500` for Eastern Standard Time). + +IoTDB supports timestamp display in **LONG** format or **DATETIME-DISPLAY** format, allowing users to customize time output. + +
+ +**Syntax for Custom Time Formats in DATETIME-DISPLAY** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 Relative Timestamp + +Relative timestamps allow specifying time offsets from **now()** or a **DATETIME** reference. + +The formal definition is: + +```Plain +Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ +RelativeTime = (now() | DATETIME) ((+|-) Duration)+ +``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ +**Examples:** + +```Plain +now() - 1d2h // A time 1 day and 2 hours earlier than the server time +now() - 1w // A time 1 week earlier than the server time +``` + +> **Note:** There must be spaces on both sides of `+` and `-` operators. \ No newline at end of file diff --git a/src/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md b/src/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md index b3018d2f5..3f45018e6 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_apache.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md b/src/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md index 0a0fdb1f3..47976fb45 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_timecho.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md index d869ad08f..ef146a655 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -306,7 +306,7 @@ It costs 0.014s To avoid oversized Object write requests, values of **Object** type can be split into segments and written sequentially. In SQL, the `to_object(isEOF, offset, content)` function must be used for value insertion. -> Supported since V2.0.8-beta +> Supported since V2.0.8 **Syntax:** diff --git a/src/UserGuide/Master/Table/Reference/System-Config-Manual.md b/src/UserGuide/Master/Table/Reference/System-Config-Manual.md index c7006be42..660b55b42 100644 --- a/src/UserGuide/Master/Table/Reference/System-Config-Manual.md +++ b/src/UserGuide/Master/Table/Reference/System-Config-Manual.md @@ -1,3 +1,6 @@ +--- +redirectTo: System-Config-Manual_apache.html +--- -# Config Manual - -## 1. IoTDB Configuration Files - -The configuration files for IoTDB are located in the `conf` folder under the IoTDB installation directory. Key configuration files include: - -1. `confignode-env.sh` **/** `confignode-env.bat`: - 1. Environment configuration file for ConfigNode. - 2. Used to configure memory size and other environment settings for ConfigNode. -2. `datanode-env.sh` **/** `datanode-env.bat`: - 1. Environment configuration file for DataNode. - 2. Used to configure memory size and other environment settings for DataNode. -3. `iotdb-system.properties`: - 1. Main configuration file for IoTDB. - 2. Contains configurable parameters for IoTDB. -4. `iotdb-system.properties.template`: - 1. Template for the `iotdb-system.properties` file. - 2. Provides a reference for all available configuration parameters. - -## 2. Modify Configurations - -### 2.1 **Modify Existing Parameters**: - -- Parameters already present in the `iotdb-system.properties` file can be directly modified. - -### 2.2 **Adding New Parameters**: - -- For parameters not listed in `iotdb-system.properties`, you can find them in the `iotdb-system.properties.template` file. -- Copy the desired parameter from the template file to `iotdb-system.properties` and modify its value. - -### 2.3 Configuration Update Methods - -Different configuration parameters have different update methods, categorized as follows: - -1. **Modify before the first startup.**: - 1. These parameters can only be modified before the first startup of ConfigNode/DataNode. - 2. Modifying them after the first startup will prevent ConfigNode/DataNode from starting. -2. **Restart Required for Changes to Take Effect**: - 1. These parameters can be modified after ConfigNode/DataNode has started. - 2. However, a restart of ConfigNode/DataNode is required for the changes to take effect. -3. **Hot Reload**: - 1. These parameters can be modified while ConfigNode/DataNode is running. - 2. After modification, use the following SQL commands to apply the changes: - - `load configuration`: Reloads the configuration. - - `set configuration key1 = 'value1'`: Updates specific configuration parameters. - -## 3. Environment Parameters - -The environment configuration files (`confignode-env.sh/bat` and `datanode-env.sh/bat`) are used to configure Java environment parameters for ConfigNode and DataNode, such as JVM settings. These configurations are passed to the JVM when ConfigNode or DataNode starts. - -### 3.1 **confignode-env.sh/bat** - -- MEMORY_SIZE - -| Name | MEMORY_SIZE | -| ----------- | ------------------------------------------------------------ | -| Description | Memory size allocated when IoTDB ConfigNode starts. | -| Type | String | -| Default | Depends on the operating system and machine configuration. Defaults to 3/10 of the machine's memory, capped at 16G. | -| Effective | Restart required | - -- ON_HEAP_MEMORY - -| Name | ON_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | On-heap memory size available for IoTDB ConfigNode. Previously named `MAX_HEAP_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -- OFF_HEAP_MEMORY - -| Name | OFF_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | Off-heap memory size available for IoTDB ConfigNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -### 3.2 **datanode-env.sh/bat** - -- MEMORY_SIZE - -| Name | MEMORY_SIZE | -| ----------- | ------------------------------------------------------------ | -| Description | Memory size allocated when IoTDB DataNode starts. | -| Type | String | -| Default | Depends on the operating system and machine configuration. Defaults to 1/2 of the machine's memory. | -| Effective | Restart required | - -- ON_HEAP_MEMORY - -| Name | ON_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | On-heap memory size available for IoTDB DataNode. Previously named `MAX_HEAP_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -- OFF_HEAP_MEMORY - -| Name | OFF_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | Off-heap memory size available for IoTDB DataNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -## 4. System Parameters (`iotdb-system.properties.template`) - -The `iotdb-system.properties` file contains various configurations for managing IoTDB clusters, nodes, replication, directories, monitoring, SSL, connections, object storage, tier management, and REST services. Below is a detailed breakdown of the parameters: - -### 4.1 Cluster Configuration - -- cluster_name - -| Name | cluster_name | -| ----------- | --------------------------------------------------------- | -| Description | Name of the cluster. | -| Type | String | -| Default | default_cluster | -| Effective | Use CLI: `set configuration cluster_name='xxx'`. | -| Note | Changes are distributed across nodes. Changes may not propagate to all nodes in case of network issues or node failures. Nodes that fail to update must manually modify `cluster_name` in their configuration files and restart. Under normal circumstances, it is not recommended to modify `cluster_name` by manually modifying configuration files or to perform hot-loading via `load configuration` method. | - -### 4.2 Seed ConfigNode - -- cn_seed_config_node - -| Name | cn_seed_config_node | -| ----------- | ------------------------------------------------------------ | -| Description | Address of the seed ConfigNode for Confignode to join the cluster. | -| Type | String | -| Default | 127.0.0.1:10710 | -| Effective | Modify before the first startup. | - -- dn_seed_config_node - -| Name | dn_seed_config_node | -| ----------- | ------------------------------------------------------------ | -| Description | Address of the seed ConfigNode for Datanode to join the cluster. | -| Type | String | -| Default | 127.0.0.1:10710 | -| Effective | Modify before the first startup. | - -### 4.3 Node RPC Configuration - -- cn_internal_address - -| Name | cn_internal_address | -| ----------- | ---------------------------------------------- | -| Description | Internal address for ConfigNode communication. | -| Type | String | -| Default | 127.0.0.1 | -| Effective | Modify before the first startup. | - -- cn_internal_port - -| Name | cn_internal_port | -| ----------- | ------------------------------------------- | -| Description | Port for ConfigNode internal communication. | -| Type | Short Int : [0,65535] | -| Default | 10710 | -| Effective | Modify before the first startup. | - -- cn_consensus_port - -| Name | cn_consensus_port | -| ----------- | ----------------------------------------------------- | -| Description | Port for ConfigNode consensus protocol communication. | -| Type | Short Int : [0,65535] | -| Default | 10720 | -| Effective | Modify before the first startup. | - -- dn_rpc_address - -| Name | dn_rpc_address | -| ----------- |---------------------------------| -| Description | Address for client RPC service. | -| Type | String | -| Default | 127.0.0.1 | -| Effective | Restart required. | - -- dn_rpc_port - -| Name | dn_rpc_port | -| ----------- | ---------------------------- | -| Description | Port for client RPC service. | -| Type | Short Int : [0,65535] | -| Default | 6667 | -| Effective | Restart required. | - -- dn_internal_address - -| Name | dn_internal_address | -| ----------- | -------------------------------------------- | -| Description | Internal address for DataNode communication. | -| Type | string | -| Default | 127.0.0.1 | -| Effective | Modify before the first startup. | - -- dn_internal_port - -| Name | dn_internal_port | -| ----------- | ----------------------------------------- | -| Description | Port for DataNode internal communication. | -| Type | int | -| Default | 10730 | -| Effective | Modify before the first startup. | - -- dn_mpp_data_exchange_port - -| Name | dn_mpp_data_exchange_port | -| ----------- | -------------------------------- | -| Description | Port for MPP data exchange. | -| Type | int | -| Default | 10740 | -| Effective | Modify before the first startup. | - -- dn_schema_region_consensus_port - -| Name | dn_schema_region_consensus_port | -| ----------- | ------------------------------------------------------------ | -| Description | Port for Datanode SchemaRegion consensus protocol communication. | -| Type | int | -| Default | 10750 | -| Effective | Modify before the first startup. | - -- dn_data_region_consensus_port - -| Name | dn_data_region_consensus_port | -| ----------- | ------------------------------------------------------------ | -| Description | Port for Datanode DataRegion consensus protocol communication. | -| Type | int | -| Default | 10760 | -| Effective | Modify before the first startup. | - -- dn_join_cluster_retry_interval_ms - -| Name | dn_join_cluster_retry_interval_ms | -| ----------- | --------------------------------------------------- | -| Description | Interval for DataNode to retry joining the cluster. | -| Type | long | -| Default | 5000 | -| Effective | Restart required. | - -### 4.4 Replication configuration - -- config_node_consensus_protocol_class - -| Name | config_node_consensus_protocol_class | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol for ConfigNode replication, only supports RatisConsensus | -| Type | String | -| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | -| Effective | Modify before the first startup. | - -- schema_replication_factor - -| Name | schema_replication_factor | -| ----------- | ------------------------------------------------------------ | -| Description | Default schema replication factor for databases. | -| Type | int32 | -| Default | 1 | -| Effective | Restart required. Takes effect on the new database after restarting. | - -- schema_region_consensus_protocol_class - -| Name | schema_region_consensus_protocol_class | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol for schema region replication. Only supports RatisConsensus when multi-replications. | -| Type | String | -| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | -| Effective | Modify before the first startup. | - -- data_replication_factor - -| Name | data_replication_factor | -| ----------- | ------------------------------------------------------------ | -| Description | Default data replication factor for databases. | -| Type | int32 | -| Default | 1 | -| Effective | Restart required. Takes effect on the new database after restarting. | - -- data_region_consensus_protocol_class - -| Name | data_region_consensus_protocol_class | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol for data region replication. Supports IoTConsensus or RatisConsensus when multi-replications. | -| Type | String | -| Default | org.apache.iotdb.consensus.iot.IoTConsensus | -| Effective | Modify before the first startup. | - -### 4.5 Directory configuration - -- cn_system_dir - -| Name | cn_system_dir | -| ----------- | ----------------------------------------------------------- | -| Description | System data storage path for ConfigNode. | -| Type | String | -| Default | data/confignode/system(Windows:data\\configndoe\\system) | -| Effective | Restart required | - -- cn_consensus_dir - -| Name | cn_consensus_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol data storage path for ConfigNode. | -| Type | String | -| Default | data/confignode/consensus(Windows:data\\configndoe\\consensus) | -| Effective | Restart required | - -- cn_pipe_receiver_file_dir - -| Name | cn_pipe_receiver_file_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for pipe receiver files in ConfigNode. | -| Type | String | -| Default | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | -| Effective | Restart required | - -- dn_system_dir - -| Name | dn_system_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Schema storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/system(Windows:data\\datanode\\system) | -| Effective | Restart required | - -- dn_data_dirs - -| Name | dn_data_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Data storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/data(Windows:data\\datanode\\data) | -| Effective | Restart required | - -- dn_multi_dir_strategy - -| Name | dn_multi_dir_strategy | -| ----------- | ------------------------------------------------------------ | -| Description | The strategy used by IoTDB to select directories in `data_dirs` for TsFiles. You can use either the simple class name or the fully qualified class name. The system provides the following two strategies: 1. SequenceStrategy: IoTDB selects directories sequentially, iterating through all directories in `data_dirs` in a round-robin manner. 2. MaxDiskUsableSpaceFirstStrategy IoTDB prioritizes the directory in `data_dirs` with the largest disk free space. To implement a custom strategy: 1. Inherit the `org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy `class and implement your own strategy method. 2. Fill in the configuration item with the fully qualified class name of your implementation (package name + class name, e.g., `UserDefineStrategyPackage`). 3. Add the JAR file containing your custom class to the project. | -| Type | String | -| Default | SequenceStrategy | -| Effective | Hot reload. | - -- dn_consensus_dir - -| Name | dn_consensus_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus log storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/consensus(Windows:data\\datanode\\consensus) | -| Effective | Restart required | - -- dn_wal_dirs - -| Name | dn_wal_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Write-ahead log (WAL) storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/wal(Windows:data\\datanode\\wal) | -| Effective | Restart required | - -- dn_tracing_dir - -| Name | dn_tracing_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Tracing root directory for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | datanode/tracing(Windows:datanode\\tracing) | -| Effective | Restart required | - -- dn_sync_dir - -| Name | dn_sync_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Sync storage path for DataNode.By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/sync(Windows:data\\datanode\\sync) | -| Effective | Restart required | - -- sort_tmp_dir - -| Name | sort_tmp_dir | -| ----------- | ------------------------------------------------- | -| Description | Temporary directory for sorting operations. | -| Type | String | -| Default | data/datanode/tmp(Windows:data\\datanode\\tmp) | -| Effective | Restart required | - -- dn_pipe_receiver_file_dirs - -| Name | dn_pipe_receiver_file_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for pipe receiver files in DataNode. | -| Type | String | -| Default | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | -| Effective | Restart required | - -- iot_consensus_v2_receiver_file_dirs - -| Name | iot_consensus_v2_receiver_file_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for IoTConsensus V2 receiver files. | -| Type | String | -| Default | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | -| Effective | Restart required | - -- iot_consensus_v2_deletion_file_dir - -| Name | iot_consensus_v2_deletion_file_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for IoTConsensus V2 deletion files. | -| Type | String | -| Default | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | -| Effective | Restart required | - -### 4.6 Metric Configuration - -- cn_metric_reporter_list - -| Name | cn_metric_reporter_list | -| ----------- | ----------------------------------------- | -| Description | Systems for reporting ConfigNode metrics. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- cn_metric_level - -| Name | cn_metric_level | -| ----------- | --------------------------------------- | -| Description | Level of detail for ConfigNode metrics. | -| Type | String | -| Default | IMPORTANT | -| Effective | Restart required. | - -- cn_metric_async_collect_period - -| Name | cn_metric_async_collect_period | -| ----------- | ------------------------------------------------------------ | -| Description | Period for asynchronous metric collection in ConfigNode (in seconds). | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- cn_metric_prometheus_reporter_port - -| Name | cn_metric_prometheus_reporter_port | -| ----------- | --------------------------------------------------- | -| Description | Port for Prometheus metric reporting in ConfigNode. | -| Type | int | -| Default | 9091 | -| Effective | Restart required. | - -- dn_metric_reporter_list - -| Name | dn_metric_reporter_list | -| ----------- | --------------------------------------- | -| Description | Systems for reporting DataNode metrics. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- dn_metric_level - -| Name | dn_metric_level | -| ----------- | ------------------------------------- | -| Description | Level of detail for DataNode metrics. | -| Type | String | -| Default | IMPORTANT | -| Effective | Restart required. | - -- dn_metric_async_collect_period - -| Name | dn_metric_async_collect_period | -| ----------- | ------------------------------------------------------------ | -| Description | Period for asynchronous metric collection in DataNode (in seconds). | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- dn_metric_prometheus_reporter_port - -| Name | dn_metric_prometheus_reporter_port | -| ----------- | ------------------------------------------------- | -| Description | Port for Prometheus metric reporting in DataNode. | -| Type | int | -| Default | 9092 | -| Effective | Restart required. | - -- dn_metric_internal_reporter_type - -| Name | dn_metric_internal_reporter_type | -| ----------- | ------------------------------------------------------------ | -| Description | Internal reporter types for DataNode metrics. For internal monitoring and checking that the data has been successfully written and refreshed. | -| Type | String | -| Default | IOTDB | -| Effective | Restart required. | - -### 4.7 SSL Configuration - -- enable_thrift_ssl - -| Name | enable_thrift_ssl | -| ----------- | --------------------------------------------- | -| Description | Enables SSL encryption for RPC communication. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- enable_https - -| Name | enable_https | -| ----------- | ------------------------------ | -| Description | Enables SSL for REST services. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- key_store_path - -| Name | key_store_path | -| ----------- | ---------------------------- | -| Description | Path to the SSL certificate. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- key_store_pwd - -| Name | key_store_pwd | -| ----------- | --------------------------------- | -| Description | Password for the SSL certificate. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -### 4.8 Connection Configuration - -- cn_rpc_thrift_compression_enable - -| Name | cn_rpc_thrift_compression_enable | -| ----------- | ----------------------------------- | -| Description | Enables Thrift compression for RPC. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- cn_rpc_max_concurrent_client_num - -| Name | cn_rpc_max_concurrent_client_num | -| ----------- |-------------------------------------------| -| Description | Maximum number of concurrent RPC clients. | -| Type | int | -| Default | 3000 | -| Effective | Restart required. | - -- cn_connection_timeout_ms - -| Name | cn_connection_timeout_ms | -| ----------- | ---------------------------------------------------- | -| Description | Connection timeout for ConfigNode (in milliseconds). | -| Type | int | -| Default | 60000 | -| Effective | Restart required. | - -- cn_selector_thread_nums_of_client_manager - -| Name | cn_selector_thread_nums_of_client_manager | -| ----------- | ------------------------------------------------------------ | -| Description | Number of selector threads for client management in ConfigNode. | -| Type | int | -| Default | 1 | -| Effective | Restart required. | - -- cn_max_client_count_for_each_node_in_client_manager - -| Name | cn_max_client_count_for_each_node_in_client_manager | -| ----------- | ------------------------------------------------------ | -| Description | Maximum clients per node in ConfigNode client manager. | -| Type | int | -| Default | 300 | -| Effective | Restart required. | - -- dn_session_timeout_threshold - -| Name | dn_session_timeout_threshold | -| ----------- | ---------------------------------------- | -| Description | Maximum idle time for DataNode sessions. | -| Type | int | -| Default | 0 | -| Effective | Restart required.t required. | - -- dn_rpc_thrift_compression_enable - -| Name | dn_rpc_thrift_compression_enable | -| ----------- | -------------------------------------------- | -| Description | Enables Thrift compression for DataNode RPC. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- dn_rpc_advanced_compression_enable - -| Name | dn_rpc_advanced_compression_enable | -| ----------- | ----------------------------------------------------- | -| Description | Enables advanced Thrift compression for DataNode RPC. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- dn_rpc_selector_thread_count - -| Name | rpc_selector_thread_count | -| ----------- | -------------------------------------------- | -| Description | Number of selector threads for DataNode RPC. | -| Type | int | -| Default | 1 | -| Effective | Restart required.t required. | - -- dn_rpc_min_concurrent_client_num - -| Name | rpc_min_concurrent_client_num | -| ----------- | ------------------------------------------------------ | -| Description | Minimum number of concurrent RPC clients for DataNode. | -| Type | Short Int : [0,65535] | -| Default | 1 | -| Effective | Restart required. | - -- dn_rpc_max_concurrent_client_num - -| Name | dn_rpc_max_concurrent_client_num | -| ----------- |--------------------------------------------------------| -| Description | Maximum number of concurrent RPC clients for DataNode. | -| Type | Short Int : [0,65535] | -| Default | 1000 | -| Effective | Restart required. | - -- dn_thrift_max_frame_size - -| Name | dn_thrift_max_frame_size | -| ----------- |------------------------------------------------| -| Description | Maximum frame size for RPC requests/responses. | -| Type | long | -| Default | 536870912 (Default 512MB) | -| Effective | Restart required. | - -- dn_thrift_init_buffer_size - -| Name | dn_thrift_init_buffer_size | -| ----------- | ----------------------------------- | -| Description | Initial buffer size for Thrift RPC. | -| Type | long | -| Default | 1024 | -| Effective | Restart required. | - -- dn_connection_timeout_ms - -| Name | dn_connection_timeout_ms | -| ----------- | -------------------------------------------------- | -| Description | Connection timeout for DataNode (in milliseconds). | -| Type | int | -| Default | 60000 | -| Effective | Restart required. | - -- dn_selector_thread_count_of_client_manager - -| Name | dn_selector_thread_count_of_client_manager | -| ----------- | ------------------------------------------------------------ | -| Description | selector thread (TAsyncClientManager) nums for async thread in a clientManager | -| Type | int | -| Default | 1 | -| Effective | Restart required.t required. | - -- dn_max_client_count_for_each_node_in_client_manager - -| Name | dn_max_client_count_for_each_node_in_client_manager | -| ----------- | --------------------------------------------------- | -| Description | Maximum clients per node in DataNode clientmanager. | -| Type | int | -| Default | 300 | -| Effective | Restart required. | - -### 4.9 Object storage management - -- remote_tsfile_cache_dirs - -| Name | remote_tsfile_cache_dirs | -| ----------- | ---------------------------------------- | -| Description | Local cache directory for cloud storage. | -| Type | String | -| Default | data/datanode/data/cache | -| Effective | Restart required. | - -- remote_tsfile_cache_page_size_in_kb - -| Name | remote_tsfile_cache_page_size_in_kb | -| ----------- | --------------------------------------------- | -| Description | Block size for cached files in cloud storage. | -| Type | int | -| Default | 20480 | -| Effective | Restart required. | - -- remote_tsfile_cache_max_disk_usage_in_mb - -| Name | remote_tsfile_cache_max_disk_usage_in_mb | -| ----------- | ------------------------------------------- | -| Description | Maximum disk usage for cloud storage cache. | -| Type | long | -| Default | 51200 | -| Effective | Restart required. | - -- object_storage_type - -| Name | object_storage_type | -| ----------- | ---------------------- | -| Description | Type of cloud storage. | -| Type | String | -| Default | AWS_S3 | -| Effective | Restart required. | - -- object_storage_endpoint - -| Name | object_storage_endpoint | -| ----------- | --------------------------- | -| Description | Endpoint for cloud storage. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- object_storage_bucket - -| Name | object_storage_bucket | -| ----------- | ------------------------------ | -| Description | Bucket name for cloud storage. | -| Type | String | -| Default | iotdb_data | -| Effective | Restart required. | - -- object_storage_access_key - -| Name | object_storage_access_key | -| ----------- | ----------------------------- | -| Description | Access key for cloud storage. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- object_storage_access_secret - -| Name | object_storage_access_secret | -| ----------- | -------------------------------- | -| Description | Access secret for cloud storage. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -### 4.10 Tier management - -- dn_default_space_usage_thresholds - -| Name | dn_default_space_usage_thresholds | -| ----------- | ------------------------------------------------------------ | -| Description | Disk usage threshold, data will be moved to the next tier when the usage of the tier is higher than this threshold.If tiered storage is enabled, please separate thresholds of different tiers by semicolons ";". | -| Type | double | -| Default | 0.85 | -| Effective | Hot reload. | - -- dn_tier_full_policy - -| Name | dn_tier_full_policy | -| ----------- | ------------------------------------------------------------ | -| Description | How to deal with the last tier's data when its used space has been higher than its dn_default_space_usage_thresholds. | -| Type | String | -| Default | NULL | -| Effective | Hot reload. | - -- migrate_thread_count - -| Name | migrate_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | thread pool size for migrate operation in the DataNode's data directories. | -| Type | int | -| Default | 1 | -| Effective | Hot reload. | - -- tiered_storage_migrate_speed_limit_bytes_per_sec - -| Name | tiered_storage_migrate_speed_limit_bytes_per_sec | -| ----------- | ------------------------------------------------------------ | -| Description | The migrate speed limit of different tiers can reach per second | -| Type | int | -| Default | 10485760 | -| Effective | Hot reload. | - -### 4.11 REST Service Configuration - -- enable_rest_service - -| Name | enable_rest_service | -| ----------- | --------------------------- | -| Description | Is the REST service enabled | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- rest_service_port - -| Name | rest_service_port | -| ----------- | ------------------------------------ | -| Description | the binding port of the REST service | -| Type | int32 | -| Default | 18080 | -| Effective | Restart required. | - -- enable_swagger - -| Name | enable_swagger | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to display rest service interface information through swagger. eg: http://ip:port/swagger.json | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- rest_query_default_row_size_limit - -| Name | rest_query_default_row_size_limit | -| ----------- | ------------------------------------------------------------ | -| Description | the default row limit to a REST query response when the rowSize parameter is not given in request | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- cache_expire_in_seconds - -| Name | cache_expire_in_seconds | -| ----------- | ------------------------------------------------------------ | -| Description | The expiration time of the user login information cache (in seconds) | -| Type | int32 | -| Default | 28800 | -| Effective | Restart required. | - -- cache_max_num - -| Name | cache_max_num | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of users can be stored in the user login cache. | -| Type | int32 | -| Default | 100 | -| Effective | Restart required. | - -- cache_init_num - -| Name | cache_init_num | -| ----------- | ------------------------------------------------------------ | -| Description | The initial capacity of users can be stored in the user login cache. | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- client_auth - -| Name | client_auth | -| ----------- | --------------------------------- | -| Description | Is client authentication required | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- trust_store_path - -| Name | trust_store_path | -| ----------- | -------------------- | -| Description | SSL trust store path | -| Type | String | -| Default | "" | -| Effective | Restart required. | - -- trust_store_pwd - -| Name | trust_store_pwd | -| ----------- | ------------------------- | -| Description | SSL trust store password. | -| Type | String | -| Default | "" | -| Effective | Restart required. | - -- idle_timeout_in_seconds - -| Name | idle_timeout_in_seconds | -| ----------- | ------------------------ | -| Description | SSL timeout (in seconds) | -| Type | int32 | -| Default | 5000 | -| Effective | Restart required. | - -### 4.12 Load balancing configuration - -- series_slot_num - -| Name | series_slot_num | -| ----------- | ------------------------------------------- | -| Description | Number of SeriesPartitionSlots per Database | -| Type | int32 | -| Default | 10000 | -| Effective | Modify before the first startup. | - -- series_partition_executor_class - -| Name | series_partition_executor_class | -| ----------- | ------------------------------------------------------------ | -| Description | SeriesPartitionSlot executor class | -| Type | String | -| Default | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | -| Effective | Modify before the first startup. | - -- schema_region_group_extension_policy - -| Name | schema_region_group_extension_policy | -| ----------- | ------------------------------------------------------------ | -| Description | The policy of extension SchemaRegionGroup for each Database. | -| Type | string | -| Default | AUTO | -| Effective | Restart required. | - -- default_schema_region_group_num_per_database - -| Name | default_schema_region_group_num_per_database | -| ----------- | ------------------------------------------------------------ | -| Description | When set schema_region_group_extension_policy=CUSTOM, this parameter is the default number of SchemaRegionGroups for each Database.When set schema_region_group_extension_policy=AUTO, this parameter is the default minimal number of SchemaRegionGroups for each Database. | -| Type | int | -| Default | 1 | -| Effective | Restart required. | - -- schema_region_per_data_node - -| Name | schema_region_per_data_node | -| ----------- | ------------------------------------------------------------ | -| Description | It only takes effect when set schema_region_group_extension_policy=AUTO.This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode. | -| Type | double | -| Default | 1.0 | -| Effective | Restart required. | - -- data_region_group_extension_policy - -| Name | data_region_group_extension_policy | -| ----------- | ---------------------------------------------------------- | -| Description | The policy of extension DataRegionGroup for each Database. | -| Type | string | -| Default | AUTO | -| Effective | Restart required. | - -- default_data_region_group_num_per_database - -| Name | default_data_region_group_per_database | -| ----------- | ------------------------------------------------------------ | -| Description | When set data_region_group_extension_policy=CUSTOM, this parameter is the default number of DataRegionGroups for each Database.When set data_region_group_extension_policy=AUTO, this parameter is the default minimal number of DataRegionGroups for each Database. | -| Type | int | -| Default | 2 | -| Effective | Restart required. | - -- data_region_per_data_node - -| Name | data_region_per_data_node | -| ----------- | ------------------------------------------------------------ | -| Description | It only takes effect when set data_region_group_extension_policy=AUTO.This parameter is the maximum number of DataRegions expected to be managed by each DataNode. | -| Type | double | -| Default | 5.0 | -| Effective | Restart required. | - -- enable_auto_leader_balance_for_ratis_consensus - -| Name | enable_auto_leader_balance_for_ratis_consensus | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable auto leader balance for Ratis consensus protocol. | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- enable_auto_leader_balance_for_iot_consensus - -| Name | enable_auto_leader_balance_for_iot_consensus | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable auto leader balance for IoTConsensus protocol. | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -### 4.13 Cluster management - -- time_partition_origin - -| Name | time_partition_origin | -| ----------- | ------------------------------------------------------------ | -| Description | Time partition origin in milliseconds, default is equal to zero. | -| Type | Long | -| Unit | ms | -| Default | 0 | -| Effective | Modify before the first startup. | - -- time_partition_interval - -| Name | time_partition_interval | -| ----------- | ------------------------------------------------------------ | -| Description | Time partition interval in milliseconds, and partitioning data inside each data region, default is equal to one week | -| Type | Long | -| Unit | ms | -| Default | 604800000 | -| Effective | Modify before the first startup. | - -- heartbeat_interval_in_ms - -| Name | heartbeat_interval_in_ms | -| ----------- | -------------------------------------- | -| Description | The heartbeat interval in milliseconds | -| Type | Long | -| Unit | ms | -| Default | 1000 | -| Effective | Restart required. | - -- disk_space_warning_threshold - -| Name | disk_space_warning_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | Disk remaining threshold at which DataNode is set to ReadOnly status | -| Type | double(percentage) | -| Default | 0.05 | -| Effective | Restart required. | - -### 4.14 Memory Control Configuration - -- datanode_memory_proportion - -| Name | datanode_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Memory Allocation Ratio: StorageEngine, QueryEngine, SchemaEngine, Consensus, StreamingEngine and Free Memory. | -| Type | Ratio | -| Default | 3:3:1:1:1:1 | -| Effective | Restart required. | - -- schema_memory_proportion - -| Name | schema_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, and PartitionCache. | -| Type | Ratio | -| Default | 5:4:1 | -| Effective | Restart required. | - -- storage_engine_memory_proportion - -| Name | storage_engine_memory_proportion | -| ----------- | ----------------------------------------------------------- | -| Description | Memory allocation ratio in StorageEngine: Write, Compaction | -| Type | Ratio | -| Default | 8:2 | -| Effective | Restart required. | - -- write_memory_proportion - -| Name | write_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Memory allocation ratio in writing: Memtable, TimePartitionInfo | -| Type | Ratio | -| Default | 19:1 | -| Effective | Restart required. | - -- primitive_array_size - -| Name | primitive_array_size | -| ----------- | --------------------------------------------------------- | -| Description | primitive array size (length of each array) in array pool | -| Type | int32 | -| Default | 64 | -| Effective | Restart required. | - -- chunk_metadata_size_proportion - -| Name | chunk_metadata_size_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of compaction memory for chunk metadata maintains in memory when doing compaction | -| Type | Double | -| Default | 0.1 | -| Effective | Restart required. | - -- flush_proportion - -| Name | flush_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory for invoking flush disk, 0.4 by defaultIf you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 | -| Type | Double | -| Default | 0.4 | -| Effective | Restart required. | - -- buffered_arrays_memory_proportion - -| Name | buffered_arrays_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory allocated for buffered arrays, 0.6 by default | -| Type | Double | -| Default | 0.6 | -| Effective | Restart required. | - -- reject_proportion - -| Name | reject_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory for rejecting insertion, 0.8 by defaultIf you have extremely high write load (like batch=1000) and the physical memory size is large enough, it can be set higher than the default value like 0.9 | -| Type | Double | -| Default | 0.8 | -| Effective | Restart required. | - -- device_path_cache_proportion - -| Name | device_path_cache_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory for the DevicePathCache. DevicePathCache is the deviceId cache, keeping only one copy of the same deviceId in memory | -| Type | Double | -| Default | 0.05 | -| Effective | Restart required. | - -- write_memory_variation_report_proportion - -| Name | write_memory_variation_report_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | If memory cost of data region increased more than proportion of allocated memory for writing, report to system. The default value is 0.001 | -| Type | Double | -| Default | 0.001 | -| Effective | Restart required. | - -- check_period_when_insert_blocked - -| Name | check_period_when_insert_blocked | -| ----------- | ------------------------------------------------------------ | -| Description | When an insertion is rejected, the waiting period (in ms) to check system again, 50 by default.If the insertion has been rejected and the read load is low, it can be set larger. | -| Type | int32 | -| Default | 50 | -| Effective | Restart required. | - -- io_task_queue_size_for_flushing - -| Name | io_task_queue_size_for_flushing | -| ----------- | -------------------------------------------- | -| Description | size of ioTaskQueue. The default value is 10 | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- enable_query_memory_estimation - -| Name | enable_query_memory_estimation | -| ----------- | ------------------------------------------------------------ | -| Description | If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory | -| Type | bool | -| Default | true | -| Effective | Hot reload. | - -### 4.15 Schema Engine Configuration - -- schema_engine_mode - -| Name | schema_engine_mode | -| ----------- | ------------------------------------------------------------ | -| Description | The schema management mode of schema engine. Currently, support Memory and PBTree.This config of all DataNodes in one cluster must keep same. | -| Type | string | -| Default | Memory | -| Effective | Modify before the first startup. | - -- partition_cache_size - -| Name | partition_cache_size | -| ----------- | ------------------------- | -| Description | cache size for partition. | -| Type | Int32 | -| Default | 1000 | -| Effective | Restart required. | - -- sync_mlog_period_in_ms - -| Name | sync_mlog_period_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The cycle when metadata log is periodically forced to be written to disk(in milliseconds)If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshmentSetting this parameter to 0 may slow down the operation on slow disk. | -| Type | Int64 | -| Default | 100 | -| Effective | Restart required. | - -- tag_attribute_flush_interval - -| Name | tag_attribute_flush_interval | -| ----------- | ------------------------------------------------------------ | -| Description | interval num for tag and attribute records when force flushing to disk | -| Type | int32 | -| Default | 1000 | -| Effective | Modify before the first startup. | - -- tag_attribute_total_size - -| Name | tag_attribute_total_size | -| ----------- | ------------------------------------------------------------ | -| Description | max size for a storage block for tags and attributes of a one-time series | -| Type | int32 | -| Default | 700 | -| Effective | Modify before the first startup. | - -- max_measurement_num_of_internal_request - -| Name | max_measurement_num_of_internal_request | -| ----------- | ------------------------------------------------------------ | -| Description | max measurement num of internal requestWhen creating timeseries with Session.createMultiTimeseries, the user input plan, the timeseries num ofwhich exceeds this num, will be split to several plans with timeseries no more than this num. | -| Type | Int32 | -| Default | 10000 | -| Effective | Restart required. | - -- datanode_schema_cache_eviction_policy - -| Name | datanode_schema_cache_eviction_policy | -| ----------- | --------------------------------------- | -| Description | Policy of DataNodeSchemaCache eviction. | -| Type | String | -| Default | FIFO | -| Effective | Restart required. | - -- cluster_timeseries_limit_threshold - -| Name | cluster_timeseries_limit_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | This configuration parameter sets the maximum number of time series allowed in the cluster. | -| Type | Int32 | -| Default | -1 | -| Effective | Restart required. | - -- cluster_device_limit_threshold - -| Name | cluster_device_limit_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | This configuration parameter sets the maximum number of devices allowed in the cluster. | -| Type | Int32 | -| Default | -1 | -| Effective | Restart required. | - -- database_limit_threshold - -| Name | database_limit_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | This configuration parameter sets the maximum number of Cluster Databases allowed. | -| Type | Int32 | -| Default | -1 | -| Effective | Restart required. | - -### 4.16 Configurations for creating schema automatically - -- enable_auto_create_schema - -| Name | enable_auto_create_schema | -| ----------- | ------------------------------------------------ | -| Description | Whether creating schema automatically is enabled | -| Value | true or false | -| Default | true | -| Effective | Restart required. | - -- default_storage_group_level - -| Name | default_storage_group_level | -| ----------- | ------------------------------------------------------------ | -| Description | Database level when creating schema automatically is enabled e.g. root.sg0.d1.s2We will set root.sg0 as the database if database level is 1If the incoming path is shorter than this value, the creation/insertion will fail. | -| Value | int32 | -| Default | 1 | -| Effective | Restart required. | - -- boolean_string_infer_type - -| Name | boolean_string_infer_type | -| ----------- |------------------------------------------------------------------------------------| -| Description | register time series as which type when receiving boolean string "true" or "false" | -| Value | BOOLEAN or TEXT | -| Default | BOOLEAN | -| Effective | Hot_reload | - -- integer_string_infer_type - -| Name | integer_string_infer_type | -| ----------- |------------------------------------------------------------------------------------------------------------------| -| Description | register time series as which type when receiving an integer string and using float or double may lose precision | -| Value | INT32, INT64, FLOAT, DOUBLE, TEXT | -| Default | DOUBLE | -| Effective | Hot_reload | - -- floating_string_infer_type - -| Name | floating_string_infer_type | -| ----------- |----------------------------------------------------------------------------------| -| Description | register time series as which type when receiving a floating number string "6.7" | -| Value | DOUBLE, FLOAT or TEXT | -| Default | DOUBLE | -| Effective | Hot_reload | - -- nan_string_infer_type - -| Name | nan_string_infer_type | -| ----------- |--------------------------------------------------------------------| -| Description | register time series as which type when receiving the Literal NaN. | -| Value | DOUBLE, FLOAT or TEXT | -| Default | DOUBLE | -| Effective | Hot_reload | - -- default_boolean_encoding - -| Name | default_boolean_encoding | -| ----------- |----------------------------------------------------------------| -| Description | BOOLEAN encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE | -| Default | RLE | -| Effective | Hot_reload | - -- default_int32_encoding - -| Name | default_int32_encoding | -| ----------- |--------------------------------------------------------------| -| Description | INT32 encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| Default | TS_2DIFF | -| Effective | Hot_reload | - -- default_int64_encoding - -| Name | default_int64_encoding | -| ----------- |--------------------------------------------------------------| -| Description | INT64 encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| Default | TS_2DIFF | -| Effective | Hot_reload | - -- default_float_encoding - -| Name | default_float_encoding | -| ----------- |--------------------------------------------------------------| -| Description | FLOAT encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, GORILLA | -| Default | GORILLA | -| Effective | Hot_reload | - -- default_double_encoding - -| Name | default_double_encoding | -| ----------- |---------------------------------------------------------------| -| Description | DOUBLE encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, GORILLA | -| Default | GORILLA | -| Effective | Hot_reload | - -- default_text_encoding - -| Name | default_text_encoding | -| ----------- |-------------------------------------------------------------| -| Description | TEXT encoding when creating schema automatically is enabled | -| Value | PLAIN | -| Default | PLAIN | -| Effective | Hot_reload | - - -* boolean_compressor - -| Name | boolean_compressor | -|------------------|-----------------------------------------------------------------------------------------| -| Description | BOOLEAN compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* int32_compressor - -| Name | int32_compressor | -|----------------------|--------------------------------------------------------------------------------------------| -| Description | INT32/DATE compression when creating schema automatically is enabled(Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* int64_compressor - -| Name | int64_compressor | -|--------------------|-------------------------------------------------------------------------------------------------| -| Description | INT64/TIMESTAMP compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* float_compressor - -| Name | float_compressor | -|-----------------------|---------------------------------------------------------------------------------------| -| Description | FLOAT compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* double_compressor - -| Name | double_compressor | -|-------------------|----------------------------------------------------------------------------------------| -| Description | DOUBLE compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* text_compressor - -| Name | text_compressor | -|--------------------|--------------------------------------------------------------------------------------------------| -| Description | TEXT/BINARY/BLOB compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - - -### 4.17 Query Configurations - -- read_consistency_level - -| Name | read_consistency_level | -| ----------- | ------------------------------------------------------------ | -| Description | The read consistency levelThese consistency levels are currently supported:strong(Default, read from the leader replica)weak(Read from a random replica) | -| Type | String | -| Default | strong | -| Effective | Restart required. | - -- meta_data_cache_enable - -| Name | meta_data_cache_enable | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to cache meta data (BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not. | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- chunk_timeseriesmeta_free_memory_proportion - -| Name | chunk_timeseriesmeta_free_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50 | -| Type | String | -| Default | 1 : 100 : 200 : 300 : 400 | -| Effective | Restart required. | - -- enable_last_cache - -| Name | enable_last_cache | -| ----------- | ---------------------------- | -| Description | Whether to enable LAST cache | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- mpp_data_exchange_core_pool_size - -| Name | mpp_data_exchange_core_pool_size | -| ----------- | -------------------------------------------- | -| Description | Core size of ThreadPool of MPP data exchange | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- mpp_data_exchange_max_pool_size - -| Name | mpp_data_exchange_max_pool_size | -| ----------- | ------------------------------------------- | -| Description | Max size of ThreadPool of MPP data exchange | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- mpp_data_exchange_keep_alive_time_in_ms - -| Name | mpp_data_exchange_keep_alive_time_in_ms | -| ----------- | --------------------------------------- | -| Description | Max waiting time for MPP data exchange | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- driver_task_execution_time_slice_in_ms - -| Name | driver_task_execution_time_slice_in_ms | -| ----------- | -------------------------------------- | -| Description | The max execution time of a DriverTask | -| Type | int32 | -| Default | 200 | -| Effective | Restart required. | - -- max_tsblock_size_in_bytes - -| Name | max_tsblock_size_in_bytes | -| ----------- | ----------------------------- | -| Description | The max capacity of a TsBlock | -| Type | int32 | -| Default | 131072 | -| Effective | Restart required. | - -- max_tsblock_line_numbers - -| Name | max_tsblock_line_numbers | -| ----------- | ------------------------------------------- | -| Description | The max number of lines in a single TsBlock | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- slow_query_threshold - -| Name | slow_query_threshold | -| ----------- | -------------------------------------- | -| Description | Time cost(ms) threshold for slow query | -| Type | long | -| Default | 10000 | -| Effective | Hot reload | - -- query_cost_stat_window - -| Name | query_cost_stat_window | -|-------------|--------------------| -| Description | Time window threshold(min) for record of history queries. | -| Type | Int32 | -| Default | 0 | -| Effective | Hot reload | - -- query_timeout_threshold - -| Name | query_timeout_threshold | -| ----------- | ----------------------------------------- | -| Description | The max executing time of query. unit: ms | -| Type | Int32 | -| Default | 60000 | -| Effective | Restart required. | - -- max_allowed_concurrent_queries - -| Name | max_allowed_concurrent_queries | -| ----------- | -------------------------------------------------- | -| Description | The maximum allowed concurrently executing queries | -| Type | Int32 | -| Default | 1000 | -| Effective | Restart required. | - -- query_thread_count - -| Name | query_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | How many threads can concurrently execute query statement. When <= 0, use CPU core number. | -| Type | Int32 | -| Default | 0 | -| Effective | Restart required. | - -- degree_of_query_parallelism - -| Name | degree_of_query_parallelism | -| ----------- | ------------------------------------------------------------ | -| Description | How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2. | -| Type | Int32 | -| Default | 0 | -| Effective | Restart required. | - -- mode_map_size_threshold - -| Name | mode_map_size_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | The threshold of count map size when calculating the MODE aggregation function | -| Type | Int32 | -| Default | 10000 | -| Effective | Restart required. | - -- batch_size - -| Name | batch_size | -| ----------- | ------------------------------------------------------------ | -| Description | The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) | -| Type | Int32 | -| Default | 100000 | -| Effective | Restart required. | - -- sort_buffer_size_in_bytes - -| Name | sort_buffer_size_in_bytes | -| ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Description | The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort. | -| Type | long | -| Default | 1048576(Before V2.0.6)
0(Supports from V2.0.6), if `sort_buffer_size_in_bytes <= 0`, default value will be used, `default value = min(32MB, memory for query operators / query_thread_count / 2)`, if `sort_buffer_size_in_bytes > 0`, the specified value will be used. | -| Effective | Hot_reload | - -- merge_threshold_of_explain_analyze - -| Name | merge_threshold_of_explain_analyze | -| ----------- | ------------------------------------------------------------ | -| Description | The threshold of operator count in the result set of EXPLAIN ANALYZE, if the number of operator in the result set is larger than this threshold, operator will be merged. | -| Type | int | -| Default | 10 | -| Effective | Hot reload | - -### 4.18 TTL Configuration - -- ttl_check_interval - -| Name | ttl_check_interval | -| ----------- | ------------------------------------------------------------ | -| Description | The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours. | -| Type | int | -| Default | 7200000 | -| Effective | Restart required. | - -- max_expired_time - -| Name | max_expired_time | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum expiring time of device which has a ttl. Default is 1 month.If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction. | -| Type | int | -| Default | 2592000000 | -| Effective | Restart required. | - -- expired_data_ratio - -| Name | expired_data_ratio | -| ----------- | ------------------------------------------------------------ | -| Description | The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction. | -| Type | float | -| Default | 0.3 | -| Effective | Restart required. | - -### 4.19 Storage Engine Configuration - -- timestamp_precision - -| Name | timestamp_precision | -| ----------- | ------------------------------------------------------------ | -| Description | Use this value to set timestamp precision as "ms", "us" or "ns". | -| Type | String | -| Default | ms | -| Effective | Modify before the first startup. | - -- timestamp_precision_check_enabled - -| Name | timestamp_precision_check_enabled | -| ----------- | ------------------------------------------------------------ | -| Description | When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted. | -| Type | Boolean | -| Default | true | -| Effective | Modify before the first startup. | - -- max_waiting_time_when_insert_blocked - -| Name | max_waiting_time_when_insert_blocked | -| ----------- | ------------------------------------------------------------ | -| Description | When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. | -| Type | Int32 | -| Default | 10000 | -| Effective | Restart required. | - -- handle_system_error - -| Name | handle_system_error | -| ----------- | -------------------------------------------------------- | -| Description | What will the system do when unrecoverable error occurs. | -| Type | String | -| Default | CHANGE_TO_READ_ONLY | -| Effective | Restart required. | - -- enable_timed_flush_seq_memtable - -| Name | enable_timed_flush_seq_memtable | -| ----------- | --------------------------------------------------- | -| Description | Whether to timed flush sequence tsfiles' memtables. | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- seq_memtable_flush_interval_in_ms - -| Name | seq_memtable_flush_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | -| Type | long | -| Default | 600000 | -| Effective | Hot reload | - -- seq_memtable_flush_check_interval_in_ms - -| Name | seq_memtable_flush_check_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The interval to check whether sequence memtables need flushing. | -| Type | long | -| Default | 30000 | -| Effective | Hot reload | - -- enable_timed_flush_unseq_memtable - -| Name | enable_timed_flush_unseq_memtable | -| ----------- | ----------------------------------------------------- | -| Description | Whether to timed flush unsequence tsfiles' memtables. | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- unseq_memtable_flush_interval_in_ms - -| Name | unseq_memtable_flush_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | -| Type | long | -| Default | 600000 | -| Effective | Hot reload | - -- unseq_memtable_flush_check_interval_in_ms - -| Name | unseq_memtable_flush_check_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The interval to check whether unsequence memtables need flushing. | -| Type | long | -| Default | 30000 | -| Effective | Hot reload | - -- tvlist_sort_algorithm - -| Name | tvlist_sort_algorithm | -| ----------- | ------------------------------------------------- | -| Description | The sort algorithms used in the memtable's TVList | -| Type | String | -| Default | TIM | -| Effective | Restart required. | - -- avg_series_point_number_threshold - -| Name | avg_series_point_number_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. | -| Type | int32 | -| Default | 100000 | -| Effective | Restart required. | - -- flush_thread_count - -| Name | flush_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | How many threads can concurrently flush. When <= 0, use CPU core number. | -| Type | int32 | -| Default | 0 | -| Effective | Restart required. | - -- enable_partial_insert - -| Name | enable_partial_insert | -| ----------- | ------------------------------------------------------------ | -| Description | In one insert (one device, one timestamp, multiple measurements), if enable partial insert, one measurement failure will not impact other measurements | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- recovery_log_interval_in_ms - -| Name | recovery_log_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | the interval to log recover progress of each vsg when starting iotdb | -| Type | Int32 | -| Default | 5000 | -| Effective | Restart required. | - -- 0.13_data_insert_adapt - -| Name | 0.13_data_insert_adapt | -| ----------- | ------------------------------------------------------------ | -| Description | If using a v0.13 client to insert data, please set this configuration to true. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- enable_tsfile_validation - -| Name | enable_tsfile_validation | -| ----------- | ------------------------------------------------------------ | -| Description | Verify that TSfiles generated by Flush, Load, and Compaction are correct. | -| Type | boolean | -| Default | false | -| Effective | Hot reload | - -- tier_ttl_in_ms - -| Name | tier_ttl_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | Default tier TTL. When the survival time of the data exceeds the threshold, it will be migrated to the next tier. | -| Type | long | -| Default | -1 | -| Effective | Restart required. | - -- max_object_file_size_in_byte - -| Name | max_object_file_size_in_byte | -|-------------|--------------------------------------------------------------------------| -| Description | Maximum size limit for a single object file (supported since V2.0.8-beta). | -| Type | long | -| Default | 4294967296 (4 GB in bytes) | -| Effective | Hot reload | - -- restrict_object_limit - -| Name | restrict_object_limit | -|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Description | No special restrictions on table names, column names, or device identifiers for `OBJECT` type (supported since V2.0.8-beta). When set to `true` and the table contains `OBJECT` columns, the following restrictions apply:
1. Naming Rules: Values in TAG columns, table names, and field names must not use `.` or `..`; Prohibited characters include `./` or `.\`, otherwise metadata creation will fail; Names containing filesystem-unsupported characters will cause write errors.
2. Case Sensitivity: If the underlying filesystem is case-insensitive, device identifiers like `'d1'` and `'D1'` are treated as identical; Creating similar identifiers may overwrite `OBJECT` data files, leading to data corruption.
3. Storage Path: Actual storage path format: `${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin` | -| Type | boolean | -| Default | false | -| Effective | Can only be modified before the first service startup. | - -### 4.20 Compaction Configurations - -- enable_seq_space_compaction - -| Name | enable_seq_space_compaction | -| ----------- | ---------------------------------------------------------- | -| Description | sequence space compaction: only compact the sequence files | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- enable_unseq_space_compaction - -| Name | enable_unseq_space_compaction | -| ----------- | ------------------------------------------------------------ | -| Description | unsequence space compaction: only compact the unsequence files | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- enable_cross_space_compaction - -| Name | enable_cross_space_compaction | -| ----------- | ------------------------------------------------------------ | -| Description | cross space compaction: compact the unsequence files into the overlapped sequence files | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- enable_auto_repair_compaction - -| Name | enable_auto_repair_compaction | -| ----------- | ---------------------------------------------- | -| Description | enable auto repair unsorted file by compaction | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- cross_selector - -| Name | cross_selector | -| ----------- | ------------------------------------------- | -| Description | the selector of cross space compaction task | -| Type | String | -| Default | rewrite | -| Effective | Restart required. | - -- cross_performer - -| Name | cross_performer | -| ----------- |-----------------------------------------------------------| -| Description | the compaction performer of cross space compaction task, Options: read_point, fast | -| Type | String | -| Default | fast | -| Effective | Hot reload . | - -- inner_seq_selector - -| Name | inner_seq_selector | -| ----------- |--------------------------------------------------------| -| Description | the selector of inner sequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | -| Type | String | -| Default | size_tiered_multi_target | -| Effective | Hot reload | - -- inner_seq_performer - -| Name | inner_seq_performer | -| ----------- |---------------------------------------------------------| -| Description | the performer of inner sequence space compaction task, Options: read_chunk, fast | -| Type | String | -| Default | read_chunk | -| Effective | Hot reload | - -- inner_unseq_selector - -| Name | inner_unseq_selector | -| ----------- |----------------------------------------------------------| -| Description | the selector of inner unsequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | -| Type | String | -| Default | size_tiered_multi_target | -| Effective | Hot reload | - -- inner_unseq_performer - -| Name | inner_unseq_performer | -| ----------- |-----------------------------------------------------------| -| Description | the performer of inner unsequence space compaction task, Options: read_point, fast | -| Type | String | -| Default | fast | -| Effective | Hot reload | - -- compaction_priority - -| Name | compaction_priority | -| ----------- | ------------------------------------------------------------ | -| Description | The priority of compaction executionINNER_CROSS: prioritize inner space compaction, reduce the number of files firstCROSS_INNER: prioritize cross space compaction, eliminate the unsequence files firstBALANCE: alternate two compaction types | -| Type | String | -| Default | INNER_CROSS | -| Effective | Restart required. | - -- candidate_compaction_task_queue_size - -| Name | candidate_compaction_task_queue_size | -| ----------- | -------------------------------------------- | -| Description | The size of candidate compaction task queue. | -| Type | int32 | -| Default | 50 | -| Effective | Restart required. | - -- target_compaction_file_size - -| Name | target_compaction_file_size | -| ----------- | ------------------------------------------------------------ | -| Description | This parameter is used in two places:The target tsfile size of inner space compaction.The candidate size of seq tsfile in cross space compaction will be smaller than target_compaction_file_size * 1.5.In most cases, the target file size of cross compaction won't exceed this threshold, and if it does, it will not be much larger than it. | -| Type | Int64 | -| Default | 2147483648 | -| Effective | Hot reload | - -- inner_compaction_total_file_size_threshold - -| Name | inner_compaction_total_file_size_threshold | -| ----------- | ---------------------------------------------------- | -| Description | The total file size limit in inner space compaction. | -| Type | int64 | -| Default | 10737418240 | -| Effective | Hot reload | - -- inner_compaction_total_file_num_threshold - -| Name | inner_compaction_total_file_num_threshold | -| ----------- | --------------------------------------------------- | -| Description | The total file num limit in inner space compaction. | -| Type | int32 | -| Default | 100 | -| Effective | Hot reload | - -- max_level_gap_in_inner_compaction - -| Name | max_level_gap_in_inner_compaction | -| ----------- | ----------------------------------------------- | -| Description | The max level gap in inner compaction selection | -| Type | int32 | -| Default | 2 | -| Effective | Hot reload | - -- target_chunk_size - -| Name | target_chunk_size | -| ----------- | ------------------------------------------------------------ | -| Description | The target chunk size in flushing and compaction. If the size of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks.| -| Type | Int64 | -| Default | 1600000 | -| Effective | Restart required. | - -- target_chunk_point_num - -| Name | target_chunk_point_num | -| ----------- |-----------------------------------------------------------------| -| Description | The target point nums in one chunk in flushing and compaction. If the point number of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks. | -| Type | Int64 | -| Default | 100000 | -| Effective | Restart required. | - -- chunk_size_lower_bound_in_compaction - -| Name | chunk_size_lower_bound_in_compaction | -| ----------- | ------------------------------------------------------------ | -| Description | If the chunk size is lower than this threshold, it will be deserialized into points | -| Type | Int64 | -| Default | 128 | -| Effective | Restart required. | - -- chunk_point_num_lower_bound_in_compaction - -| Name | chunk_point_num_lower_bound_in_compaction | -| ----------- |------------------------------------------------------------------------------------------| -| Description | If the chunk point num is lower than this threshold, it will be deserialized into points | -| Type | Int64 | -| Default | 100 | -| Effective | Restart required. | - -- inner_compaction_candidate_file_num - -| Name | inner_compaction_candidate_file_num | -| ----------- | ------------------------------------------------------------ | -| Description | The file num requirement when selecting inner space compaction candidate files | -| Type | int32 | -| Default | 30 | -| Effective | Hot reload | - -- max_cross_compaction_candidate_file_num - -| Name | max_cross_compaction_candidate_file_num | -| ----------- | ------------------------------------------------------------ | -| Description | The max file when selecting cross space compaction candidate files | -| Type | int32 | -| Default | 500 | -| Effective | Hot reload | - -- max_cross_compaction_candidate_file_size - -| Name | max_cross_compaction_candidate_file_size | -| ----------- | ------------------------------------------------------------ | -| Description | The max total size when selecting cross space compaction candidate files | -| Type | Int64 | -| Default | 5368709120 | -| Effective | Hot reload | - -- min_cross_compaction_unseq_file_level - -| Name | min_cross_compaction_unseq_file_level | -| ----------- | ------------------------------------------------------------ | -| Description | The min inner compaction level of unsequence file which can be selected as candidate | -| Type | int32 | -| Default | 1 | -| Effective | Hot reload | - -- compaction_thread_count - -| Name | compaction_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | How many threads will be set up to perform compaction, 10 by default. | -| Type | int32 | -| Default | 10 | -| Effective | Hot reload | - -- compaction_max_aligned_series_num_in_one_batch - -| Name | compaction_max_aligned_series_num_in_one_batch | -| ----------- | ------------------------------------------------------------ | -| Description | How many chunk will be compacted in aligned series compaction, 10 by default. | -| Type | int32 | -| Default | 10 | -| Effective | Hot reload | - -- compaction_schedule_interval_in_ms - -| Name | compaction_schedule_interval_in_ms | -| ----------- | ---------------------------------------- | -| Description | The interval of compaction task schedule | -| Type | Int64 | -| Default | 60000 | -| Effective | Restart required. | - -- compaction_write_throughput_mb_per_sec - -| Name | compaction_write_throughput_mb_per_sec | -| ----------- | -------------------------------------------------------- | -| Description | The limit of write throughput merge can reach per second | -| Type | int32 | -| Default | 16 | -| Effective | Restart required. | - -- compaction_read_throughput_mb_per_sec - -| Name | compaction_read_throughput_mb_per_sec | -| ----------- | ------------------------------------------------------- | -| Description | The limit of read throughput merge can reach per second | -| Type | int32 | -| Default | 0 | -| Effective | Hot reload | - -- compaction_read_operation_per_sec - -| Name | compaction_read_operation_per_sec | -| ----------- | ------------------------------------------------------ | -| Description | The limit of read operation merge can reach per second | -| Type | int32 | -| Default | 0 | -| Effective | Hot reload | - -- sub_compaction_thread_count - -| Name | sub_compaction_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | The number of sub compaction threads to be set up to perform compaction. | -| Type | int32 | -| Default | 4 | -| Effective | Hot reload | - -- inner_compaction_task_selection_disk_redundancy - -| Name | inner_compaction_task_selection_disk_redundancy | -| ----------- | ------------------------------------------------------------ | -| Description | Redundancy value of disk availability, only use for inner compaction. | -| Type | double | -| Default | 0.05 | -| Effective | Hot reload | - -- inner_compaction_task_selection_mods_file_threshold - -| Name | inner_compaction_task_selection_mods_file_threshold | -| ----------- | -------------------------------------------------------- | -| Description | Mods file size threshold, only use for inner compaction. | -| Type | long | -| Default | 131072 | -| Effective | Hot reload | - -- compaction_schedule_thread_num - -| Name | compaction_schedule_thread_num | -| ----------- | ------------------------------------------------------------ | -| Description | The number of threads to be set up to select compaction task. | -| Type | int32 | -| Default | 4 | -| Effective | Hot reload | - -### 4.21 Write Ahead Log Configuration - -- wal_mode - -| Name | wal_mode | -| ----------- | ------------------------------------------------------------ | -| Description | The details of these three modes are as follows:DISABLE: the system will disable wal.SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully. | -| Type | String | -| Default | ASYNC | -| Effective | Restart required. | - -- max_wal_nodes_num - -| Name | max_wal_nodes_num | -| ----------- | ------------------------------------------------------------ | -| Description | each node corresponds to one wal directory The default value 0 means the number is determined by the system, the number is in the range of [data region num / 2, data region num]. | -| Type | int32 | -| Default | 0 | -| Effective | Restart required. | - -- wal_async_mode_fsync_delay_in_ms - -| Name | wal_async_mode_fsync_delay_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | Duration a wal flush operation will wait before calling fsync in the async mode | -| Type | int32 | -| Default | 1000 | -| Effective | Hot reload | - -- wal_sync_mode_fsync_delay_in_ms - -| Name | wal_sync_mode_fsync_delay_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | Duration a wal flush operation will wait before calling fsync in the sync mode | -| Type | int32 | -| Default | 3 | -| Effective | Hot reload | - -- wal_buffer_size_in_byte - -| Name | wal_buffer_size_in_byte | -| ----------- | ---------------------------- | -| Description | Buffer size of each wal node | -| Type | int32 | -| Default | 33554432 | -| Effective | Restart required. | - -- wal_buffer_queue_capacity - -| Name | wal_buffer_queue_capacity | -| ----------- | --------------------------------- | -| Description | Buffer capacity of each wal queue | -| Type | int32 | -| Default | 500 | -| Effective | Restart required. | - -- wal_file_size_threshold_in_byte - -| Name | wal_file_size_threshold_in_byte | -| ----------- | ------------------------------- | -| Description | Size threshold of each wal file | -| Type | int32 | -| Default | 31457280 | -| Effective | Hot reload | - -- wal_min_effective_info_ratio - -| Name | wal_min_effective_info_ratio | -| ----------- | --------------------------------------------------- | -| Description | Minimum ratio of effective information in wal files | -| Type | double | -| Default | 0.1 | -| Effective | Hot reload | - -- wal_memtable_snapshot_threshold_in_byte - -| Name | wal_memtable_snapshot_threshold_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | MemTable size threshold for triggering MemTable snapshot in wal | -| Type | int64 | -| Default | 8388608 | -| Effective | Hot reload | - -- max_wal_memtable_snapshot_num - -| Name | max_wal_memtable_snapshot_num | -| ----------- | ------------------------------------- | -| Description | MemTable's max snapshot number in wal | -| Type | int32 | -| Default | 1 | -| Effective | Hot reload | - -- delete_wal_files_period_in_ms - -| Name | delete_wal_files_period_in_ms | -| ----------- | ----------------------------------------------------------- | -| Description | The period when outdated wal files are periodically deleted | -| Type | int64 | -| Default | 20000 | -| Effective | Hot reload | - -- wal_throttle_threshold_in_byte - -| Name | wal_throttle_threshold_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The minimum size of wal files when throttle down in IoTConsensus | -| Type | long | -| Default | 53687091200 | -| Effective | Hot reload | - -- iot_consensus_cache_window_time_in_ms - -| Name | iot_consensus_cache_window_time_in_ms | -| ----------- | ------------------------------------------------ | -| Description | Maximum wait time of write cache in IoTConsensus | -| Type | long | -| Default | -1 | -| Effective | Hot reload | - -- enable_wal_compression - -| Name | iot_consensus_cache_window_time_in_ms | -| ----------- | ------------------------------------- | -| Description | Enable Write Ahead Log compression. | -| Type | boolean | -| Default | true | -| Effective | Hot reload | - -### 4.22 **IoTConsensus Configuration** - -- data_region_iot_max_log_entries_num_per_batch - -| Name | data_region_iot_max_log_entries_num_per_batch | -| ----------- | ------------------------------------------------- | -| Description | The maximum log entries num in IoTConsensus Batch | -| Type | int32 | -| Default | 1024 | -| Effective | Restart required. | - -- data_region_iot_max_size_per_batch - -| Name | data_region_iot_max_size_per_batch | -| ----------- | -------------------------------------- | -| Description | The maximum size in IoTConsensus Batch | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- data_region_iot_max_pending_batches_num - -| Name | data_region_iot_max_pending_batches_num | -| ----------- | ----------------------------------------------- | -| Description | The maximum pending batches num in IoTConsensus | -| Type | int32 | -| Default | 5 | -| Effective | Restart required. | - -- data_region_iot_max_memory_ratio_for_queue - -| Name | data_region_iot_max_memory_ratio_for_queue | -| ----------- | -------------------------------------------------- | -| Description | The maximum memory ratio for queue in IoTConsensus | -| Type | double | -| Default | 0.6 | -| Effective | Restart required. | - -- region_migration_speed_limit_bytes_per_second - -| Name | region_migration_speed_limit_bytes_per_second | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum transit size in byte per second for region migration | -| Type | long | -| Default | 33554432 | -| Effective | Restart required. | - -### 4.23 TsFile Configurations - -- group_size_in_byte - -| Name | group_size_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of bytes written to disk each time the data in memory is written to disk | -| Type | int32 | -| Default | 134217728 | -| Effective | Hot reload | - -- page_size_in_byte - -| Name | page_size_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The memory size for each series writer to pack page, default value is 64KB | -| Type | int32 | -| Default | 65536 | -| Effective | Hot reload | - -- max_number_of_points_in_page - -| Name | max_number_of_points_in_page | -| ----------- | ------------------------------------------- | -| Description | The maximum number of data points in a page | -| Type | int32 | -| Default | 10000 | -| Effective | Hot reload | - -- pattern_matching_threshold - -| Name | pattern_matching_threshold | -| ----------- | ------------------------------------------- | -| Description | The threshold for pattern matching in regex | -| Type | int32 | -| Default | 1000000 | -| Effective | Hot reload | - -- float_precision - -| Name | float_precision | -| ----------- | ------------------------------------------------------------ | -| Description | Floating-point precision of query results.Only effective for RLE and TS_2DIFF encodings.Due to the limitation of machine precision, some values may not be interpreted strictly. | -| Type | int32 | -| Default | 2 | -| Effective | Hot reload | - -- value_encoder - -| Name | value_encoder | -| ----------- | ------------------------------------------------------------ | -| Description | Encoder of value series. default value is PLAIN. | -| Type | For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG. | -| Default | PLAIN | -| Effective | Hot reload | - -- compressor - -| Name | compressor | -| ----------- | ------------------------------------------------------------ | -| Description | Compression configuration And it is also used as the default compressor of time column in aligned timeseries. | -| Type | Data compression method, supports UNCOMPRESSED, SNAPPY, ZSTD, LZMA2 or LZ4. Default value is LZ4 | -| Default | LZ4 | -| Effective | Hot reload | - -- encrypt_flag - -| Name | encrypt_flag | -| ----------- | ---------------------- | -| Description | Enable data encryption | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- encrypt_type - -| Name | encrypt_type | -| ----------- |---------------------------------------| -| Description | The method of data encrytion | -| Type | String | -| Default | org.apache.tsfile.encrypt.UNENCRYPTED | -| Effective | Restart required. | - -- encrypt_key_path - -| Name | encrypt_key_path | -| ----------- | ----------------------------------- | -| Description | The path of key for data encryption | -| Type | String | -| Default | None | -| Effective | Restart required. | - -### 4.24 Authorization Configuration - -- authorizer_provider_class - -| Name | authorizer_provider_class | -| ----------- | ------------------------------------------------------------ | -| Description | which class to serve for authorization. | -| Type | String | -| Default | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | -| Effective | Restart required. | -| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | - -- openID_url - -| Name | openID_url | -| ----------- | ------------------------------------------------------------ | -| Description | The url of openID server If OpenIdAuthorizer is enabled, then openID_url must be set. | -| Type | String(a http link) | -| Default | None | -| Effective | Restart required. | - -- iotdb_server_encrypt_decrypt_provider - -| Name | iotdb_server_encrypt_decrypt_provider | -| ----------- | ------------------------------------------------------------ | -| Description | encryption provider class | -| Type | String | -| Default | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | -| Effective | Modify before the first startup. | - -- iotdb_server_encrypt_decrypt_provider_parameter - -| Name | iotdb_server_encrypt_decrypt_provider_parameter | -| ----------- | ----------------------------------------------- | -| Description | encryption provided class parameter | -| Type | String | -| Default | None | -| Effective | Modify before the first startup. | - -- author_cache_size - -| Name | author_cache_size | -| ----------- | --------------------------- | -| Description | Cache size of user and role | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- author_cache_expire_time - -| Name | author_cache_expire_time | -| ----------- | ---------------------------------- | -| Description | Cache expire time of user and role | -| Type | int32 | -| Default | 30 | -| Effective | Restart required. | - -### 4.25 UDF Configuration - -- udf_initial_byte_array_length_for_memory_control - -| Name | udf_initial_byte_array_length_for_memory_control | -| ----------- | ------------------------------------------------------------ | -| Description | Used to estimate the memory usage of text fields in a UDF query.It is recommended to set this value to be slightly larger than the average length of all text records. | -| Type | int32 | -| Default | 48 | -| Effective | Restart required. | - -- udf_memory_budget_in_mb - -| Name | udf_memory_budget_in_mb | -| ----------- | ------------------------------------------------------------ | -| Description | How much memory may be used in ONE UDF query (in MB). The upper limit is 20% of allocated memory for read. | -| Type | Float | -| Default | 30.0 | -| Effective | Restart required. | - -- udf_reader_transformer_collector_memory_proportion - -| Name | udf_reader_transformer_collector_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | UDF memory allocation ratio.The parameter form is a:b:c, where a, b, and c are integers. | -| Type | String | -| Default | 1:1:1 | -| Effective | Restart required. | - -- udf_lib_dir - -| Name | udf_lib_dir | -| ----------- | ---------------------------- | -| Description | the udf lib directory | -| Type | String | -| Default | ext/udf(Windows:ext\\udf) | -| Effective | Restart required. | - -### 4.26 Trigger Configuration - -- trigger_lib_dir - -| Name | trigger_lib_dir | -| ----------- | ------------------------- | -| Description | the trigger lib directory | -| Type | String | -| Default | ext/trigger | -| Effective | Restart required. | - -- stateful_trigger_retry_num_when_not_found - -| Name | stateful_trigger_retry_num_when_not_found | -| ----------- | ------------------------------------------------------------ | -| Description | How many times will we retry to found an instance of stateful trigger on DataNodes | -| Type | Int32 | -| Default | 3 | -| Effective | Restart required. | - -### 4.27 **Select-Into Configuration** - -- into_operation_buffer_size_in_byte - -| Name | into_operation_buffer_size_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum memory occupied by the data to be written when executing select-into statements. | -| Type | long | -| Default | 104857600 | -| Effective | Hot reload | - -- select_into_insert_tablet_plan_row_limit - -| Name | select_into_insert_tablet_plan_row_limit | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements. | -| Type | int32 | -| Default | 10000 | -| Effective | Hot reload | - -- into_operation_execution_thread_count - -| Name | into_operation_execution_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | The number of threads in the thread pool that execute insert-tablet tasks | -| Type | int32 | -| Default | 2 | -| Effective | Restart required. | - -### 4.28 Continuous Query Configuration - -- continuous_query_submit_thread_count - -| Name | continuous_query_execution_thread | -| ----------- | ------------------------------------------------------------ | -| Description | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | -| Type | int32 | -| Default | 2 | -| Effective | Restart required. | - -- continuous_query_min_every_interval_in_ms - -| Name | continuous_query_min_every_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The minimum value of the continuous query execution time interval | -| Type | long (duration) | -| Default | 1000 | -| Effective | Restart required. | - -### 4.29 Pipe Configuration - -- pipe_lib_dir - -| Name | pipe_lib_dir | -| ----------- | ----------------------- | -| Description | the pipe lib directory. | -| Type | string | -| Default | ext/pipe | -| Effective | Not support modify | - -- pipe_subtask_executor_max_thread_num - -| Name | pipe_subtask_executor_max_thread_num | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- pipe_sink_timeout_ms - -| Name | pipe_sink_timeout_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The connection timeout (in milliseconds) for the thrift client. | -| Type | int | -| Default | 900000 | -| Effective | Restart required. | - -- pipe_sink_selector_number - -| Name | pipe_sink_selector_number | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of selectors that can be used in the sink.Recommend to set this value to less than or equal to pipe_sink_max_client_number. | -| Type | int | -| Default | 4 | -| Effective | Restart required. | - -- pipe_sink_max_client_number - -| Name | pipe_sink_max_client_number | -| ----------- | ----------------------------------------------------------- | -| Description | The maximum number of clients that can be used in the sink. | -| Type | int | -| Default | 16 | -| Effective | Restart required. | - -- pipe_air_gap_receiver_enabled - -| Name | pipe_air_gap_receiver_enabled | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable receiving pipe data through air gap.The receiver can only return 0 or 1 in TCP mode to indicate whether the data is received successfully. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- pipe_air_gap_receiver_port - -| Name | pipe_air_gap_receiver_port | -| ----------- | ------------------------------------------------------------ | -| Description | The port for the server to receive pipe data through air gap. | -| Type | int | -| Default | 9780 | -| Effective | Restart required. | - -- pipe_all_sinks_rate_limit_bytes_per_second - -| Name | pipe_all_sinks_rate_limit_bytes_per_second | -| ----------- | ------------------------------------------------------------ | -| Description | The total bytes that all pipe sinks can transfer per second.When given a value less than or equal to 0, it means no limit. default value is -1, which means no limit. | -| Type | double | -| Default | -1 | -| Effective | Hot reload | - -### 4.30 RatisConsensus Configuration - -- config_node_ratis_log_appender_buffer_size_max - -| Name | config_node_ratis_log_appender_buffer_size_max | -| ----------- | ------------------------------------------------------------ | -| Description | max payload size for a single log-sync-RPC from leader to follower of ConfigNode (in byte, by default 16MB) | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- schema_region_ratis_log_appender_buffer_size_max - -| Name | schema_region_ratis_log_appender_buffer_size_max | -| ----------- | ------------------------------------------------------------ | -| Description | max payload size for a single log-sync-RPC from leader to follower of SchemaRegion (in byte, by default 16MB) | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- data_region_ratis_log_appender_buffer_size_max - -| Name | data_region_ratis_log_appender_buffer_size_max | -| ----------- | ------------------------------------------------------------ | -| Description | max payload size for a single log-sync-RPC from leader to follower of DataRegion (in byte, by default 16MB) | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- config_node_ratis_snapshot_trigger_threshold - -| Name | config_node_ratis_snapshot_trigger_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of Confignode | -| Type | int32 | -| Default | 400,000 | -| Effective | Restart required. | - -- schema_region_ratis_snapshot_trigger_threshold - -| Name | schema_region_ratis_snapshot_trigger_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of SchemaRegion | -| Type | int32 | -| Default | 400,000 | -| Effective | Restart required. | - -- data_region_ratis_snapshot_trigger_threshold - -| Name | data_region_ratis_snapshot_trigger_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of DataRegion | -| Type | int32 | -| Default | 400,000 | -| Effective | Restart required. | - -- config_node_ratis_log_unsafe_flush_enable - -| Name | config_node_ratis_log_unsafe_flush_enable | -| ----------- | ------------------------------------------------------ | -| Description | Is confignode allowed flushing Raft Log asynchronously | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- schema_region_ratis_log_unsafe_flush_enable - -| Name | schema_region_ratis_log_unsafe_flush_enable | -| ----------- | -------------------------------------------------------- | -| Description | Is schemaregion allowed flushing Raft Log asynchronously | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- data_region_ratis_log_unsafe_flush_enable - -| Name | data_region_ratis_log_unsafe_flush_enable | -| ----------- | ------------------------------------------------------ | -| Description | Is dataregion allowed flushing Raft Log asynchronously | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- config_node_ratis_log_segment_size_max_in_byte - -| Name | config_node_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a RaftLog segment file of confignode (in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- schema_region_ratis_log_segment_size_max_in_byte - -| Name | schema_region_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a RaftLog segment file of schemaregion (in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- data_region_ratis_log_segment_size_max_in_byte - -| Name | data_region_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a RaftLog segment file of dataregion(in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- config_node_simple_consensus_log_segment_size_max_in_byte - -| Name | data_region_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a simple log segment file of confignode(in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- config_node_ratis_grpc_flow_control_window - -| Name | config_node_ratis_grpc_flow_control_window | -| ----------- | ---------------------------------------------------------- | -| Description | confignode flow control window for ratis grpc log appender | -| Type | int32 | -| Default | 4194304 | -| Effective | Restart required. | - -- schema_region_ratis_grpc_flow_control_window - -| Name | schema_region_ratis_grpc_flow_control_window | -| ----------- | ------------------------------------------------------------ | -| Description | schema region flow control window for ratis grpc log appender | -| Type | int32 | -| Default | 4194304 | -| Effective | Restart required. | - -- data_region_ratis_grpc_flow_control_window - -| Name | data_region_ratis_grpc_flow_control_window | -| ----------- | ----------------------------------------------------------- | -| Description | data region flow control window for ratis grpc log appender | -| Type | int32 | -| Default | 4194304 | -| Effective | Restart required. | - -- config_node_ratis_grpc_leader_outstanding_appends_max - -| Name | config_node_ratis_grpc_leader_outstanding_appends_max | -| ----------- | ----------------------------------------------------- | -| Description | config node grpc line concurrency threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- schema_region_ratis_grpc_leader_outstanding_appends_max - -| Name | schema_region_ratis_grpc_leader_outstanding_appends_max | -| ----------- | ------------------------------------------------------- | -| Description | schema region grpc line concurrency threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- data_region_ratis_grpc_leader_outstanding_appends_max - -| Name | data_region_ratis_grpc_leader_outstanding_appends_max | -| ----------- | ----------------------------------------------------- | -| Description | data region grpc line concurrency threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- config_node_ratis_log_force_sync_num - -| Name | config_node_ratis_log_force_sync_num | -| ----------- | ------------------------------------ | -| Description | config node fsync threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- schema_region_ratis_log_force_sync_num - -| Name | schema_region_ratis_log_force_sync_num | -| ----------- | -------------------------------------- | -| Description | schema region fsync threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- data_region_ratis_log_force_sync_num - -| Name | data_region_ratis_log_force_sync_num | -| ----------- | ------------------------------------ | -| Description | data region fsync threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- config_node_ratis_rpc_leader_election_timeout_min_ms - -| Name | config_node_ratis_rpc_leader_election_timeout_min_ms | -| ----------- | ---------------------------------------------------- | -| Description | confignode leader min election timeout | -| Type | int32 | -| Default | 2000ms | -| Effective | Restart required. | - -- schema_region_ratis_rpc_leader_election_timeout_min_ms - -| Name | schema_region_ratis_rpc_leader_election_timeout_min_ms | -| ----------- | ------------------------------------------------------ | -| Description | schema region leader min election timeout | -| Type | int32 | -| Default | 2000ms | -| Effective | Restart required. | - -- data_region_ratis_rpc_leader_election_timeout_min_ms - -| Name | data_region_ratis_rpc_leader_election_timeout_min_ms | -| ----------- | ---------------------------------------------------- | -| Description | data region leader min election timeout | -| Type | int32 | -| Default | 2000ms | -| Effective | Restart required. | - -- config_node_ratis_rpc_leader_election_timeout_max_ms - -| Name | config_node_ratis_rpc_leader_election_timeout_max_ms | -| ----------- | ---------------------------------------------------- | -| Description | confignode leader max election timeout | -| Type | int32 | -| Default | 4000ms | -| Effective | Restart required. | - -- schema_region_ratis_rpc_leader_election_timeout_max_ms - -| Name | schema_region_ratis_rpc_leader_election_timeout_max_ms | -| ----------- | ------------------------------------------------------ | -| Description | schema region leader max election timeout | -| Type | int32 | -| Default | 4000ms | -| Effective | Restart required. | - -- data_region_ratis_rpc_leader_election_timeout_max_ms - -| Name | data_region_ratis_rpc_leader_election_timeout_max_ms | -| ----------- | ---------------------------------------------------- | -| Description | data region leader max election timeout | -| Type | int32 | -| Default | 4000ms | -| Effective | Restart required. | - -- config_node_ratis_request_timeout_ms - -| Name | config_node_ratis_request_timeout_ms | -| ----------- | --------------------------------------- | -| Description | confignode ratis client retry threshold | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- schema_region_ratis_request_timeout_ms - -| Name | schema_region_ratis_request_timeout_ms | -| ----------- | ------------------------------------------ | -| Description | schema region ratis client retry threshold | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- data_region_ratis_request_timeout_ms - -| Name | data_region_ratis_request_timeout_ms | -| ----------- | ---------------------------------------- | -| Description | data region ratis client retry threshold | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- config_node_ratis_max_retry_attempts - -| Name | config_node_ratis_max_retry_attempts | -| ----------- | ------------------------------------ | -| Description | confignode ratis client retry times | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- config_node_ratis_initial_sleep_time_ms - -| Name | config_node_ratis_initial_sleep_time_ms | -| ----------- | ------------------------------------------ | -| Description | confignode ratis client initial sleep time | -| Type | int32 | -| Default | 100ms | -| Effective | Restart required. | - -- config_node_ratis_max_sleep_time_ms - -| Name | config_node_ratis_max_sleep_time_ms | -| ----------- | -------------------------------------------- | -| Description | confignode ratis client max retry sleep time | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- schema_region_ratis_max_retry_attempts - -| Name | schema_region_ratis_max_retry_attempts | -| ----------- | ------------------------------------------ | -| Description | schema region ratis client max retry times | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- schema_region_ratis_initial_sleep_time_ms - -| Name | schema_region_ratis_initial_sleep_time_ms | -| ----------- | ------------------------------------------ | -| Description | schema region ratis client init sleep time | -| Type | int32 | -| Default | 100ms | -| Effective | Restart required. | - -- schema_region_ratis_max_sleep_time_ms - -| Name | schema_region_ratis_max_sleep_time_ms | -| ----------- | ----------------------------------------- | -| Description | schema region ratis client max sleep time | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- data_region_ratis_max_retry_attempts - -| Name | data_region_ratis_max_retry_attempts | -| ----------- | --------------------------------------------- | -| Description | data region ratis client max retry sleep time | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- data_region_ratis_initial_sleep_time_ms - -| Name | data_region_ratis_initial_sleep_time_ms | -| ----------- | ---------------------------------------- | -| Description | data region ratis client init sleep time | -| Type | int32 | -| Default | 100ms | -| Effective | Restart required. | - -- data_region_ratis_max_sleep_time_ms - -| Name | data_region_ratis_max_sleep_time_ms | -| ----------- | --------------------------------------------- | -| Description | data region ratis client max retry sleep time | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- ratis_first_election_timeout_min_ms - -| Name | ratis_first_election_timeout_min_ms | -| ----------- | ----------------------------------- | -| Description | Ratis first election min timeout | -| Type | int64 | -| Default | 50 (ms) | -| Effective | Restart required. | - -- ratis_first_election_timeout_max_ms - -| Name | ratis_first_election_timeout_max_ms | -| ----------- | ----------------------------------- | -| Description | Ratis first election max timeout | -| Type | int64 | -| Default | 150 (ms) | -| Effective | Restart required. | - -- config_node_ratis_preserve_logs_num_when_purge - -| Name | config_node_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------------------ | -| Description | confignode snapshot preserves certain logs when taking snapshot and purge | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- schema_region_ratis_preserve_logs_num_when_purge - -| Name | schema_region_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------------------ | -| Description | schema region snapshot preserves certain logs when taking snapshot and purge | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- data_region_ratis_preserve_logs_num_when_purge - -| Name | data_region_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------------------ | -| Description | data region snapshot preserves certain logs when taking snapshot and purge | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- config_node_ratis_log_max_size - -| Name | config_node_ratis_log_max_size | -| ----------- | -------------------------------------- | -| Description | config node Raft Log disk size control | -| Type | int64 | -| Default | 2147483648 (2GB) | -| Effective | Restart required. | - -- schema_region_ratis_log_max_size - -| Name | schema_region_ratis_log_max_size | -| ----------- | ---------------------------------------- | -| Description | schema region Raft Log disk size control | -| Type | int64 | -| Default | 2147483648 (2GB) | -| Effective | Restart required. | - -- data_region_ratis_log_max_size - -| Name | data_region_ratis_log_max_size | -| ----------- | -------------------------------------- | -| Description | data region Raft Log disk size control | -| Type | int64 | -| Default | 21474836480 (20GB) | -| Effective | Restart required. | - -- config_node_ratis_periodic_snapshot_interval - -| Name | config_node_ratis_periodic_snapshot_interval | -| ----------- | -------------------------------------------- | -| Description | config node Raft periodic snapshot interval | -| Type | int64 | -| Default | 86400 (s) | -| Effective | Restart required. | - -- schema_region_ratis_periodic_snapshot_interval - -| Name | schema_region_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------ | -| Description | schema region Raft periodic snapshot interval | -| Type | int64 | -| Default | 86400 (s) | -| Effective | Restart required. | - -- data_region_ratis_periodic_snapshot_interval - -| Name | data_region_ratis_preserve_logs_num_when_purge | -| ----------- | ---------------------------------------------- | -| Description | data region Raft periodic snapshot interval | -| Type | int64 | -| Default | 86400 (s) | -| Effective | Restart required. | - -### 4.31 IoTConsensusV2 Configuration - -- iot_consensus_v2_pipeline_size - -| Name | iot_consensus_v2_pipeline_size | -| ----------- | ------------------------------------------------------------ | -| Description | Default event buffer size for connector and receiver in iot consensus v2 | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- iot_consensus_v2_mode - -| Name | iot_consensus_v2_pipeline_size | -| ----------- | ------------------------------ | -| Description | IoTConsensusV2 mode. | -| Type | String | -| Default | batch | -| Effective | Restart required. | - -### 4.32 Procedure Configuration - -- procedure_core_worker_thread_count - -| Name | procedure_core_worker_thread_count | -| ----------- | ------------------------------------- | -| Description | Default number of worker thread count | -| Type | int32 | -| Default | 4 | -| Effective | Restart required. | - -- procedure_completed_clean_interval - -| Name | procedure_completed_clean_interval | -| ----------- | ------------------------------------------------------------ | -| Description | Default time interval of completed procedure cleaner work in, time unit is second | -| Type | int32 | -| Default | 30(s) | -| Effective | Restart required. | - -- procedure_completed_evict_ttl - -| Name | procedure_completed_evict_ttl | -| ----------- | ------------------------------------------------------- | -| Description | Default ttl of completed procedure, time unit is second | -| Type | int32 | -| Default | 60(s) | -| Effective | Restart required. | - -### 4.33 MQTT Broker Configuration - -- enable_mqtt_service - -| Name | enable_mqtt_service。 | -| ----------- | ----------------------------------- | -| Description | whether to enable the mqtt service. | -| Type | Boolean | -| Default | false | -| Effective | Hot reload | - -- mqtt_host - -| Name | mqtt_host | -| ----------- | ------------------------------ | -| Description | the mqtt service binding host. | -| Type | String | -| Default | 127.0.0.1 | -| Effective | Hot reload | - -- mqtt_port - -| Name | mqtt_port | -| ----------- | ------------------------------ | -| Description | the mqtt service binding port. | -| Type | int32 | -| Default | 1883 | -| Effective | Hot reload | - -- mqtt_handler_pool_size - -| Name | mqtt_handler_pool_size | -| ----------- | ---------------------------------------------------- | -| Description | the handler pool size for handing the mqtt messages. | -| Type | int32 | -| Default | 1 | -| Effective | Hot reload | - -- mqtt_payload_formatter - -| Name | mqtt_payload_formatter | -| ----------- | ----------------------------------- | -| Description | the mqtt message payload formatter. | -| Type | String | -| Default | json | -| Effective | Hot reload | - -- mqtt_max_message_size - -| Name | mqtt_max_message_size | -| ----------- | ---------------------------------- | -| Description | max length of mqtt message in byte | -| Type | int32 | -| Default | 1048576 | -| Effective | Hot reload | - -### 4.34 Audit log Configuration - -- enable_audit_log - -| Name | enable_audit_log | -| ----------- | -------------------------------- | -| Description | whether to enable the audit log. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- audit_log_storage - -| Name | audit_log_storage | -| ----------- | ----------------------------- | -| Description | Output location of audit logs | -| Type | String | -| Default | IOTDB,LOGGER | -| Effective | Restart required. | - -- audit_log_operation - -| Name | audit_log_operation | -| ----------- | ------------------------------------------------------------ | -| Description | whether enable audit log for DML operation of datawhether enable audit log for DDL operation of schemawhether enable audit log for QUERY operation of data and schema | -| Type | String | -| Default | DML,DDL,QUERY | -| Effective | Restart required. | - -- enable_audit_log_for_native_insert_api - -| Name | enable_audit_log_for_native_insert_api | -| ----------- | ---------------------------------------------- | -| Description | whether the local write api records audit logs | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -### 4.35 White List Configuration - -- enable_white_list - -| Name | enable_white_list | -| ----------- | ------------------------- | -| Description | whether enable white list | -| Type | Boolean | -| Default | false | -| Effective | Hot reload | - -### 4.36 IoTDB-AI Configuration - -- model_inference_execution_thread_count - -| Name | model_inference_execution_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | The thread count which can be used for model inference operation. | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -### 4.37 Load TsFile Configuration - -- load_clean_up_task_execution_delay_time_seconds - -| Name | load_clean_up_task_execution_delay_time_seconds | -| ----------- | ------------------------------------------------------------ | -| Description | Load clean up task is used to clean up the unsuccessful loaded tsfile after a certain period of time. | -| Type | int | -| Default | 1800 | -| Effective | Hot reload | - -- load_write_throughput_bytes_per_second - -| Name | load_write_throughput_bytes_per_second | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum bytes per second of disk write throughput when loading tsfile. | -| Type | int | -| Default | -1 | -| Effective | Hot reload | - -- load_active_listening_enable - -| Name | load_active_listening_enable | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable the active listening mode for tsfile loading. | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- load_active_listening_dirs - -| Name | load_active_listening_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | The directory to be actively listened for tsfile loading.Multiple directories should be separated by a ','. | -| Type | String | -| Default | ext/load/pending | -| Effective | Hot reload | - -- load_active_listening_fail_dir - -| Name | load_active_listening_fail_dir | -| ----------- | ------------------------------------------------------------ | -| Description | The directory where tsfiles are moved if the active listening mode fails to load them. | -| Type | String | -| Default | ext/load/failed | -| Effective | Hot reload | - -- load_active_listening_max_thread_num - -| Name | load_active_listening_max_thread_num | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of threads that can be used to load tsfile actively.The default value, when this parameter is commented out or <= 0, use CPU core number. | -| Type | Long | -| Default | 0 | -| Effective | Restart required. | - -- load_active_listening_check_interval_seconds - -| Name | load_active_listening_check_interval_seconds | -| ----------- | ------------------------------------------------------------ | -| Description | The interval specified in seconds for the active listening mode to check the directory specified in `load_active_listening_dirs`.The active listening mode will check the directory every `load_active_listening_check_interval_seconds seconds`. | -| Type | Long | -| Default | 5 | -| Effective | Restart required. | - -* last_cache_operation_on_load - -|Name| last_cache_operation_on_load | -|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|Description| The operation performed to LastCache when a TsFile is successfully loaded. `UPDATE`: use the data in the TsFile to update LastCache; `UPDATE_NO_BLOB`: similar to UPDATE, but will invalidate LastCache for blob series; `CLEAN_DEVICE`: invalidate LastCache of devices contained in the TsFile; `CLEAN_ALL`: clean the whole LastCache. | -|Type| String | -|Default| UPDATE_NO_BLOB | -|Effective| Effective after restart | - -* cache_last_values_for_load - -|Name| cache_last_values_for_load | -|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|Description| Whether to cache last values before loading a TsFile. Only effective when `last_cache_operation_on_load=UPDATE_NO_BLOB` or `last_cache_operation_on_load=UPDATE`. When set to true, blob series will be ignored even with `last_cache_operation_on_load=UPDATE`. Enabling this will increase the memory footprint during loading TsFiles. | -|Type| Boolean | -|Default| true | -|Effective| Effective after restart | - -* cache_last_values_memory_budget_in_byte - -|Name| cache_last_values_memory_budget_in_byte | -|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|Description| When `cache_last_values_for_load=true`, the maximum memory that can be used to cache last values. If this value is exceeded, the cached values will be abandoned and last values will be read from the TsFile in a streaming manner. | -|Type| int32 | -|Default| 4194304 | -|Effective| Effective after restart | - - -### 4.38 Dispatch Retry Configuration - -- enable_retry_for_unknown_error - -| Name | enable_retry_for_unknown_error | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum retrying time for write request remotely dispatching, time unit is milliseconds. | -| Type | Long | -| Default | 60000 | -| Effective | Hot reload | - -- enable_retry_for_unknown_error - -| Name | enable_retry_for_unknown_error | -| ----------- | ------------------------------------ | -| Description | Whether retrying for unknown errors. | -| Type | boolean | -| Default | false | -| Effective | Hot reload | \ No newline at end of file diff --git a/src/UserGuide/Master/Table/Reference/System-Config-Manual_apache.md b/src/UserGuide/Master/Table/Reference/System-Config-Manual_apache.md new file mode 100644 index 000000000..c7006be42 --- /dev/null +++ b/src/UserGuide/Master/Table/Reference/System-Config-Manual_apache.md @@ -0,0 +1,3383 @@ + +# Config Manual + +## 1. IoTDB Configuration Files + +The configuration files for IoTDB are located in the `conf` folder under the IoTDB installation directory. Key configuration files include: + +1. `confignode-env.sh` **/** `confignode-env.bat`: + 1. Environment configuration file for ConfigNode. + 2. Used to configure memory size and other environment settings for ConfigNode. +2. `datanode-env.sh` **/** `datanode-env.bat`: + 1. Environment configuration file for DataNode. + 2. Used to configure memory size and other environment settings for DataNode. +3. `iotdb-system.properties`: + 1. Main configuration file for IoTDB. + 2. Contains configurable parameters for IoTDB. +4. `iotdb-system.properties.template`: + 1. Template for the `iotdb-system.properties` file. + 2. Provides a reference for all available configuration parameters. + +## 2. Modify Configurations + +### 2.1 **Modify Existing Parameters**: + +- Parameters already present in the `iotdb-system.properties` file can be directly modified. + +### 2.2 **Adding New Parameters**: + +- For parameters not listed in `iotdb-system.properties`, you can find them in the `iotdb-system.properties.template` file. +- Copy the desired parameter from the template file to `iotdb-system.properties` and modify its value. + +### 2.3 Configuration Update Methods + +Different configuration parameters have different update methods, categorized as follows: + +1. **Modify before the first startup.**: + 1. These parameters can only be modified before the first startup of ConfigNode/DataNode. + 2. Modifying them after the first startup will prevent ConfigNode/DataNode from starting. +2. **Restart Required for Changes to Take Effect**: + 1. These parameters can be modified after ConfigNode/DataNode has started. + 2. However, a restart of ConfigNode/DataNode is required for the changes to take effect. +3. **Hot Reload**: + 1. These parameters can be modified while ConfigNode/DataNode is running. + 2. After modification, use the following SQL commands to apply the changes: + - `load configuration`: Reloads the configuration. + - `set configuration key1 = 'value1'`: Updates specific configuration parameters. + +## 3. Environment Parameters + +The environment configuration files (`confignode-env.sh/bat` and `datanode-env.sh/bat`) are used to configure Java environment parameters for ConfigNode and DataNode, such as JVM settings. These configurations are passed to the JVM when ConfigNode or DataNode starts. + +### 3.1 **confignode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB ConfigNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 3/10 of the machine's memory, capped at 16G. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB ConfigNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB ConfigNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +### 3.2 **datanode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB DataNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 1/2 of the machine's memory. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB DataNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB DataNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +## 4. System Parameters (`iotdb-system.properties.template`) + +The `iotdb-system.properties` file contains various configurations for managing IoTDB clusters, nodes, replication, directories, monitoring, SSL, connections, object storage, tier management, and REST services. Below is a detailed breakdown of the parameters: + +### 4.1 Cluster Configuration + +- cluster_name + +| Name | cluster_name | +| ----------- | --------------------------------------------------------- | +| Description | Name of the cluster. | +| Type | String | +| Default | default_cluster | +| Effective | Use CLI: `set configuration cluster_name='xxx'`. | +| Note | Changes are distributed across nodes. Changes may not propagate to all nodes in case of network issues or node failures. Nodes that fail to update must manually modify `cluster_name` in their configuration files and restart. Under normal circumstances, it is not recommended to modify `cluster_name` by manually modifying configuration files or to perform hot-loading via `load configuration` method. | + +### 4.2 Seed ConfigNode + +- cn_seed_config_node + +| Name | cn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Confignode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +- dn_seed_config_node + +| Name | dn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Datanode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +### 4.3 Node RPC Configuration + +- cn_internal_address + +| Name | cn_internal_address | +| ----------- | ---------------------------------------------- | +| Description | Internal address for ConfigNode communication. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- cn_internal_port + +| Name | cn_internal_port | +| ----------- | ------------------------------------------- | +| Description | Port for ConfigNode internal communication. | +| Type | Short Int : [0,65535] | +| Default | 10710 | +| Effective | Modify before the first startup. | + +- cn_consensus_port + +| Name | cn_consensus_port | +| ----------- | ----------------------------------------------------- | +| Description | Port for ConfigNode consensus protocol communication. | +| Type | Short Int : [0,65535] | +| Default | 10720 | +| Effective | Modify before the first startup. | + +- dn_rpc_address + +| Name | dn_rpc_address | +| ----------- |---------------------------------| +| Description | Address for client RPC service. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Restart required. | + +- dn_rpc_port + +| Name | dn_rpc_port | +| ----------- | ---------------------------- | +| Description | Port for client RPC service. | +| Type | Short Int : [0,65535] | +| Default | 6667 | +| Effective | Restart required. | + +- dn_internal_address + +| Name | dn_internal_address | +| ----------- | -------------------------------------------- | +| Description | Internal address for DataNode communication. | +| Type | string | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- dn_internal_port + +| Name | dn_internal_port | +| ----------- | ----------------------------------------- | +| Description | Port for DataNode internal communication. | +| Type | int | +| Default | 10730 | +| Effective | Modify before the first startup. | + +- dn_mpp_data_exchange_port + +| Name | dn_mpp_data_exchange_port | +| ----------- | -------------------------------- | +| Description | Port for MPP data exchange. | +| Type | int | +| Default | 10740 | +| Effective | Modify before the first startup. | + +- dn_schema_region_consensus_port + +| Name | dn_schema_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode SchemaRegion consensus protocol communication. | +| Type | int | +| Default | 10750 | +| Effective | Modify before the first startup. | + +- dn_data_region_consensus_port + +| Name | dn_data_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode DataRegion consensus protocol communication. | +| Type | int | +| Default | 10760 | +| Effective | Modify before the first startup. | + +- dn_join_cluster_retry_interval_ms + +| Name | dn_join_cluster_retry_interval_ms | +| ----------- | --------------------------------------------------- | +| Description | Interval for DataNode to retry joining the cluster. | +| Type | long | +| Default | 5000 | +| Effective | Restart required. | + +### 4.4 Replication configuration + +- config_node_consensus_protocol_class + +| Name | config_node_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for ConfigNode replication, only supports RatisConsensus | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- schema_replication_factor + +| Name | schema_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default schema replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- schema_region_consensus_protocol_class + +| Name | schema_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for schema region replication. Only supports RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- data_replication_factor + +| Name | data_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default data replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- data_region_consensus_protocol_class + +| Name | data_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for data region replication. Supports IoTConsensus or RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.iot.IoTConsensus | +| Effective | Modify before the first startup. | + +### 4.5 Directory configuration + +- cn_system_dir + +| Name | cn_system_dir | +| ----------- | ----------------------------------------------------------- | +| Description | System data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/system(Windows:data\\configndoe\\system) | +| Effective | Restart required | + +- cn_consensus_dir + +| Name | cn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| Effective | Restart required | + +- cn_pipe_receiver_file_dir + +| Name | cn_pipe_receiver_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in ConfigNode. | +| Type | String | +| Default | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- dn_system_dir + +| Name | dn_system_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Schema storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/system(Windows:data\\datanode\\system) | +| Effective | Restart required | + +- dn_data_dirs + +| Name | dn_data_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Data storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/data(Windows:data\\datanode\\data) | +| Effective | Restart required | + +- dn_multi_dir_strategy + +| Name | dn_multi_dir_strategy | +| ----------- | ------------------------------------------------------------ | +| Description | The strategy used by IoTDB to select directories in `data_dirs` for TsFiles. You can use either the simple class name or the fully qualified class name. The system provides the following two strategies: 1. SequenceStrategy: IoTDB selects directories sequentially, iterating through all directories in `data_dirs` in a round-robin manner. 2. MaxDiskUsableSpaceFirstStrategy IoTDB prioritizes the directory in `data_dirs` with the largest disk free space. To implement a custom strategy: 1. Inherit the `org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy `class and implement your own strategy method. 2. Fill in the configuration item with the fully qualified class name of your implementation (package name + class name, e.g., `UserDefineStrategyPackage`). 3. Add the JAR file containing your custom class to the project. | +| Type | String | +| Default | SequenceStrategy | +| Effective | Hot reload. | + +- dn_consensus_dir + +| Name | dn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus log storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| Effective | Restart required | + +- dn_wal_dirs + +| Name | dn_wal_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Write-ahead log (WAL) storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/wal(Windows:data\\datanode\\wal) | +| Effective | Restart required | + +- dn_tracing_dir + +| Name | dn_tracing_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Tracing root directory for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | datanode/tracing(Windows:datanode\\tracing) | +| Effective | Restart required | + +- dn_sync_dir + +| Name | dn_sync_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Sync storage path for DataNode.By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/sync(Windows:data\\datanode\\sync) | +| Effective | Restart required | + +- sort_tmp_dir + +| Name | sort_tmp_dir | +| ----------- | ------------------------------------------------- | +| Description | Temporary directory for sorting operations. | +| Type | String | +| Default | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| Effective | Restart required | + +- dn_pipe_receiver_file_dirs + +| Name | dn_pipe_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in DataNode. | +| Type | String | +| Default | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_receiver_file_dirs + +| Name | iot_consensus_v2_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 receiver files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_deletion_file_dir + +| Name | iot_consensus_v2_deletion_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 deletion files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| Effective | Restart required | + +### 4.6 Metric Configuration + +- cn_metric_reporter_list + +| Name | cn_metric_reporter_list | +| ----------- | ----------------------------------------- | +| Description | Systems for reporting ConfigNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- cn_metric_level + +| Name | cn_metric_level | +| ----------- | --------------------------------------- | +| Description | Level of detail for ConfigNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- cn_metric_async_collect_period + +| Name | cn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in ConfigNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- cn_metric_prometheus_reporter_port + +| Name | cn_metric_prometheus_reporter_port | +| ----------- | --------------------------------------------------- | +| Description | Port for Prometheus metric reporting in ConfigNode. | +| Type | int | +| Default | 9091 | +| Effective | Restart required. | + +- dn_metric_reporter_list + +| Name | dn_metric_reporter_list | +| ----------- | --------------------------------------- | +| Description | Systems for reporting DataNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- dn_metric_level + +| Name | dn_metric_level | +| ----------- | ------------------------------------- | +| Description | Level of detail for DataNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- dn_metric_async_collect_period + +| Name | dn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in DataNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- dn_metric_prometheus_reporter_port + +| Name | dn_metric_prometheus_reporter_port | +| ----------- | ------------------------------------------------- | +| Description | Port for Prometheus metric reporting in DataNode. | +| Type | int | +| Default | 9092 | +| Effective | Restart required. | + +- dn_metric_internal_reporter_type + +| Name | dn_metric_internal_reporter_type | +| ----------- | ------------------------------------------------------------ | +| Description | Internal reporter types for DataNode metrics. For internal monitoring and checking that the data has been successfully written and refreshed. | +| Type | String | +| Default | IOTDB | +| Effective | Restart required. | + +### 4.7 SSL Configuration + +- enable_thrift_ssl + +| Name | enable_thrift_ssl | +| ----------- | --------------------------------------------- | +| Description | Enables SSL encryption for RPC communication. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_https + +| Name | enable_https | +| ----------- | ------------------------------ | +| Description | Enables SSL for REST services. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- key_store_path + +| Name | key_store_path | +| ----------- | ---------------------------- | +| Description | Path to the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- key_store_pwd + +| Name | key_store_pwd | +| ----------- | --------------------------------- | +| Description | Password for the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.8 Connection Configuration + +- cn_rpc_thrift_compression_enable + +| Name | cn_rpc_thrift_compression_enable | +| ----------- | ----------------------------------- | +| Description | Enables Thrift compression for RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- cn_rpc_max_concurrent_client_num + +| Name | cn_rpc_max_concurrent_client_num | +| ----------- |-------------------------------------------| +| Description | Maximum number of concurrent RPC clients. | +| Type | int | +| Default | 3000 | +| Effective | Restart required. | + +- cn_connection_timeout_ms + +| Name | cn_connection_timeout_ms | +| ----------- | ---------------------------------------------------- | +| Description | Connection timeout for ConfigNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- cn_selector_thread_nums_of_client_manager + +| Name | cn_selector_thread_nums_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | Number of selector threads for client management in ConfigNode. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- cn_max_client_count_for_each_node_in_client_manager + +| Name | cn_max_client_count_for_each_node_in_client_manager | +| ----------- | ------------------------------------------------------ | +| Description | Maximum clients per node in ConfigNode client manager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +- dn_session_timeout_threshold + +| Name | dn_session_timeout_threshold | +| ----------- | ---------------------------------------- | +| Description | Maximum idle time for DataNode sessions. | +| Type | int | +| Default | 0 | +| Effective | Restart required.t required. | + +- dn_rpc_thrift_compression_enable + +| Name | dn_rpc_thrift_compression_enable | +| ----------- | -------------------------------------------- | +| Description | Enables Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_advanced_compression_enable + +| Name | dn_rpc_advanced_compression_enable | +| ----------- | ----------------------------------------------------- | +| Description | Enables advanced Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_selector_thread_count + +| Name | rpc_selector_thread_count | +| ----------- | -------------------------------------------- | +| Description | Number of selector threads for DataNode RPC. | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_rpc_min_concurrent_client_num + +| Name | rpc_min_concurrent_client_num | +| ----------- | ------------------------------------------------------ | +| Description | Minimum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1 | +| Effective | Restart required. | + +- dn_rpc_max_concurrent_client_num + +| Name | dn_rpc_max_concurrent_client_num | +| ----------- |--------------------------------------------------------| +| Description | Maximum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1000 | +| Effective | Restart required. | + +- dn_thrift_max_frame_size + +| Name | dn_thrift_max_frame_size | +| ----------- |------------------------------------------------| +| Description | Maximum frame size for RPC requests/responses. | +| Type | long | +| Default | 536870912 (Default 512MB) | +| Effective | Restart required. | + +- dn_thrift_init_buffer_size + +| Name | dn_thrift_init_buffer_size | +| ----------- | ----------------------------------- | +| Description | Initial buffer size for Thrift RPC. | +| Type | long | +| Default | 1024 | +| Effective | Restart required. | + +- dn_connection_timeout_ms + +| Name | dn_connection_timeout_ms | +| ----------- | -------------------------------------------------- | +| Description | Connection timeout for DataNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- dn_selector_thread_count_of_client_manager + +| Name | dn_selector_thread_count_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | selector thread (TAsyncClientManager) nums for async thread in a clientManager | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_max_client_count_for_each_node_in_client_manager + +| Name | dn_max_client_count_for_each_node_in_client_manager | +| ----------- | --------------------------------------------------- | +| Description | Maximum clients per node in DataNode clientmanager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +### 4.9 Object storage management + +- remote_tsfile_cache_dirs + +| Name | remote_tsfile_cache_dirs | +| ----------- | ---------------------------------------- | +| Description | Local cache directory for cloud storage. | +| Type | String | +| Default | data/datanode/data/cache | +| Effective | Restart required. | + +- remote_tsfile_cache_page_size_in_kb + +| Name | remote_tsfile_cache_page_size_in_kb | +| ----------- | --------------------------------------------- | +| Description | Block size for cached files in cloud storage. | +| Type | int | +| Default | 20480 | +| Effective | Restart required. | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| Name | remote_tsfile_cache_max_disk_usage_in_mb | +| ----------- | ------------------------------------------- | +| Description | Maximum disk usage for cloud storage cache. | +| Type | long | +| Default | 51200 | +| Effective | Restart required. | + +- object_storage_type + +| Name | object_storage_type | +| ----------- | ---------------------- | +| Description | Type of cloud storage. | +| Type | String | +| Default | AWS_S3 | +| Effective | Restart required. | + +- object_storage_endpoint + +| Name | object_storage_endpoint | +| ----------- | --------------------------- | +| Description | Endpoint for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_bucket + +| Name | object_storage_bucket | +| ----------- | ------------------------------ | +| Description | Bucket name for cloud storage. | +| Type | String | +| Default | iotdb_data | +| Effective | Restart required. | + +- object_storage_access_key + +| Name | object_storage_access_key | +| ----------- | ----------------------------- | +| Description | Access key for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_access_secret + +| Name | object_storage_access_secret | +| ----------- | -------------------------------- | +| Description | Access secret for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.10 Tier management + +- dn_default_space_usage_thresholds + +| Name | dn_default_space_usage_thresholds | +| ----------- | ------------------------------------------------------------ | +| Description | Disk usage threshold, data will be moved to the next tier when the usage of the tier is higher than this threshold.If tiered storage is enabled, please separate thresholds of different tiers by semicolons ";". | +| Type | double | +| Default | 0.85 | +| Effective | Hot reload. | + +- dn_tier_full_policy + +| Name | dn_tier_full_policy | +| ----------- | ------------------------------------------------------------ | +| Description | How to deal with the last tier's data when its used space has been higher than its dn_default_space_usage_thresholds. | +| Type | String | +| Default | NULL | +| Effective | Hot reload. | + +- migrate_thread_count + +| Name | migrate_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | thread pool size for migrate operation in the DataNode's data directories. | +| Type | int | +| Default | 1 | +| Effective | Hot reload. | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| Name | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ----------- | ------------------------------------------------------------ | +| Description | The migrate speed limit of different tiers can reach per second | +| Type | int | +| Default | 10485760 | +| Effective | Hot reload. | + +### 4.11 REST Service Configuration + +- enable_rest_service + +| Name | enable_rest_service | +| ----------- | --------------------------- | +| Description | Is the REST service enabled | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_service_port + +| Name | rest_service_port | +| ----------- | ------------------------------------ | +| Description | the binding port of the REST service | +| Type | int32 | +| Default | 18080 | +| Effective | Restart required. | + +- enable_swagger + +| Name | enable_swagger | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to display rest service interface information through swagger. eg: http://ip:port/swagger.json | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_query_default_row_size_limit + +| Name | rest_query_default_row_size_limit | +| ----------- | ------------------------------------------------------------ | +| Description | the default row limit to a REST query response when the rowSize parameter is not given in request | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- cache_expire_in_seconds + +| Name | cache_expire_in_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The expiration time of the user login information cache (in seconds) | +| Type | int32 | +| Default | 28800 | +| Effective | Restart required. | + +- cache_max_num + +| Name | cache_max_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of users can be stored in the user login cache. | +| Type | int32 | +| Default | 100 | +| Effective | Restart required. | + +- cache_init_num + +| Name | cache_init_num | +| ----------- | ------------------------------------------------------------ | +| Description | The initial capacity of users can be stored in the user login cache. | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- client_auth + +| Name | client_auth | +| ----------- | --------------------------------- | +| Description | Is client authentication required | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- trust_store_path + +| Name | trust_store_path | +| ----------- | -------------------- | +| Description | SSL trust store path | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- trust_store_pwd + +| Name | trust_store_pwd | +| ----------- | ------------------------- | +| Description | SSL trust store password. | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- idle_timeout_in_seconds + +| Name | idle_timeout_in_seconds | +| ----------- | ------------------------ | +| Description | SSL timeout (in seconds) | +| Type | int32 | +| Default | 5000 | +| Effective | Restart required. | + +### 4.12 Load balancing configuration + +- series_slot_num + +| Name | series_slot_num | +| ----------- | ------------------------------------------- | +| Description | Number of SeriesPartitionSlots per Database | +| Type | int32 | +| Default | 10000 | +| Effective | Modify before the first startup. | + +- series_partition_executor_class + +| Name | series_partition_executor_class | +| ----------- | ------------------------------------------------------------ | +| Description | SeriesPartitionSlot executor class | +| Type | String | +| Default | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| Effective | Modify before the first startup. | + +- schema_region_group_extension_policy + +| Name | schema_region_group_extension_policy | +| ----------- | ------------------------------------------------------------ | +| Description | The policy of extension SchemaRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_schema_region_group_num_per_database + +| Name | default_schema_region_group_num_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set schema_region_group_extension_policy=CUSTOM, this parameter is the default number of SchemaRegionGroups for each Database.When set schema_region_group_extension_policy=AUTO, this parameter is the default minimal number of SchemaRegionGroups for each Database. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- schema_region_per_data_node + +| Name | schema_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set schema_region_group_extension_policy=AUTO.This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 1.0 | +| Effective | Restart required. | + +- data_region_group_extension_policy + +| Name | data_region_group_extension_policy | +| ----------- | ---------------------------------------------------------- | +| Description | The policy of extension DataRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_data_region_group_num_per_database + +| Name | default_data_region_group_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set data_region_group_extension_policy=CUSTOM, this parameter is the default number of DataRegionGroups for each Database.When set data_region_group_extension_policy=AUTO, this parameter is the default minimal number of DataRegionGroups for each Database. | +| Type | int | +| Default | 2 | +| Effective | Restart required. | + +- data_region_per_data_node + +| Name | data_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set data_region_group_extension_policy=AUTO.This parameter is the maximum number of DataRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 5.0 | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_ratis_consensus + +| Name | enable_auto_leader_balance_for_ratis_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for Ratis consensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_iot_consensus + +| Name | enable_auto_leader_balance_for_iot_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for IoTConsensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.13 Cluster management + +- time_partition_origin + +| Name | time_partition_origin | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition origin in milliseconds, default is equal to zero. | +| Type | Long | +| Unit | ms | +| Default | 0 | +| Effective | Modify before the first startup. | + +- time_partition_interval + +| Name | time_partition_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition interval in milliseconds, and partitioning data inside each data region, default is equal to one week | +| Type | Long | +| Unit | ms | +| Default | 604800000 | +| Effective | Modify before the first startup. | + +- heartbeat_interval_in_ms + +| Name | heartbeat_interval_in_ms | +| ----------- | -------------------------------------- | +| Description | The heartbeat interval in milliseconds | +| Type | Long | +| Unit | ms | +| Default | 1000 | +| Effective | Restart required. | + +- disk_space_warning_threshold + +| Name | disk_space_warning_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | Disk remaining threshold at which DataNode is set to ReadOnly status | +| Type | double(percentage) | +| Default | 0.05 | +| Effective | Restart required. | + +### 4.14 Memory Control Configuration + +- datanode_memory_proportion + +| Name | datanode_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory Allocation Ratio: StorageEngine, QueryEngine, SchemaEngine, Consensus, StreamingEngine and Free Memory. | +| Type | Ratio | +| Default | 3:3:1:1:1:1 | +| Effective | Restart required. | + +- schema_memory_proportion + +| Name | schema_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, and PartitionCache. | +| Type | Ratio | +| Default | 5:4:1 | +| Effective | Restart required. | + +- storage_engine_memory_proportion + +| Name | storage_engine_memory_proportion | +| ----------- | ----------------------------------------------------------- | +| Description | Memory allocation ratio in StorageEngine: Write, Compaction | +| Type | Ratio | +| Default | 8:2 | +| Effective | Restart required. | + +- write_memory_proportion + +| Name | write_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory allocation ratio in writing: Memtable, TimePartitionInfo | +| Type | Ratio | +| Default | 19:1 | +| Effective | Restart required. | + +- primitive_array_size + +| Name | primitive_array_size | +| ----------- | --------------------------------------------------------- | +| Description | primitive array size (length of each array) in array pool | +| Type | int32 | +| Default | 64 | +| Effective | Restart required. | + +- chunk_metadata_size_proportion + +| Name | chunk_metadata_size_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of compaction memory for chunk metadata maintains in memory when doing compaction | +| Type | Double | +| Default | 0.1 | +| Effective | Restart required. | + +- flush_proportion + +| Name | flush_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for invoking flush disk, 0.4 by defaultIf you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 | +| Type | Double | +| Default | 0.4 | +| Effective | Restart required. | + +- buffered_arrays_memory_proportion + +| Name | buffered_arrays_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory allocated for buffered arrays, 0.6 by default | +| Type | Double | +| Default | 0.6 | +| Effective | Restart required. | + +- reject_proportion + +| Name | reject_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for rejecting insertion, 0.8 by defaultIf you have extremely high write load (like batch=1000) and the physical memory size is large enough, it can be set higher than the default value like 0.9 | +| Type | Double | +| Default | 0.8 | +| Effective | Restart required. | + +- device_path_cache_proportion + +| Name | device_path_cache_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for the DevicePathCache. DevicePathCache is the deviceId cache, keeping only one copy of the same deviceId in memory | +| Type | Double | +| Default | 0.05 | +| Effective | Restart required. | + +- write_memory_variation_report_proportion + +| Name | write_memory_variation_report_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | If memory cost of data region increased more than proportion of allocated memory for writing, report to system. The default value is 0.001 | +| Type | Double | +| Default | 0.001 | +| Effective | Restart required. | + +- check_period_when_insert_blocked + +| Name | check_period_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When an insertion is rejected, the waiting period (in ms) to check system again, 50 by default.If the insertion has been rejected and the read load is low, it can be set larger. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- io_task_queue_size_for_flushing + +| Name | io_task_queue_size_for_flushing | +| ----------- | -------------------------------------------- | +| Description | size of ioTaskQueue. The default value is 10 | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- enable_query_memory_estimation + +| Name | enable_query_memory_estimation | +| ----------- | ------------------------------------------------------------ | +| Description | If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory | +| Type | bool | +| Default | true | +| Effective | Hot reload. | + +### 4.15 Schema Engine Configuration + +- schema_engine_mode + +| Name | schema_engine_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The schema management mode of schema engine. Currently, support Memory and PBTree.This config of all DataNodes in one cluster must keep same. | +| Type | string | +| Default | Memory | +| Effective | Modify before the first startup. | + +- partition_cache_size + +| Name | partition_cache_size | +| ----------- | ------------------------- | +| Description | cache size for partition. | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- sync_mlog_period_in_ms + +| Name | sync_mlog_period_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The cycle when metadata log is periodically forced to be written to disk(in milliseconds)If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshmentSetting this parameter to 0 may slow down the operation on slow disk. | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- tag_attribute_flush_interval + +| Name | tag_attribute_flush_interval | +| ----------- | ------------------------------------------------------------ | +| Description | interval num for tag and attribute records when force flushing to disk | +| Type | int32 | +| Default | 1000 | +| Effective | Modify before the first startup. | + +- tag_attribute_total_size + +| Name | tag_attribute_total_size | +| ----------- | ------------------------------------------------------------ | +| Description | max size for a storage block for tags and attributes of a one-time series | +| Type | int32 | +| Default | 700 | +| Effective | Modify before the first startup. | + +- max_measurement_num_of_internal_request + +| Name | max_measurement_num_of_internal_request | +| ----------- | ------------------------------------------------------------ | +| Description | max measurement num of internal requestWhen creating timeseries with Session.createMultiTimeseries, the user input plan, the timeseries num ofwhich exceeds this num, will be split to several plans with timeseries no more than this num. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- datanode_schema_cache_eviction_policy + +| Name | datanode_schema_cache_eviction_policy | +| ----------- | --------------------------------------- | +| Description | Policy of DataNodeSchemaCache eviction. | +| Type | String | +| Default | FIFO | +| Effective | Restart required. | + +- cluster_timeseries_limit_threshold + +| Name | cluster_timeseries_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of time series allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- cluster_device_limit_threshold + +| Name | cluster_device_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of devices allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- database_limit_threshold + +| Name | database_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of Cluster Databases allowed. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +### 4.16 Configurations for creating schema automatically + +- enable_auto_create_schema + +| Name | enable_auto_create_schema | +| ----------- | ------------------------------------------------ | +| Description | Whether creating schema automatically is enabled | +| Value | true or false | +| Default | true | +| Effective | Restart required. | + +- default_storage_group_level + +| Name | default_storage_group_level | +| ----------- | ------------------------------------------------------------ | +| Description | Database level when creating schema automatically is enabled e.g. root.sg0.d1.s2We will set root.sg0 as the database if database level is 1If the incoming path is shorter than this value, the creation/insertion will fail. | +| Value | int32 | +| Default | 1 | +| Effective | Restart required. | + +- boolean_string_infer_type + +| Name | boolean_string_infer_type | +| ----------- |------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving boolean string "true" or "false" | +| Value | BOOLEAN or TEXT | +| Default | BOOLEAN | +| Effective | Hot_reload | + +- integer_string_infer_type + +| Name | integer_string_infer_type | +| ----------- |------------------------------------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving an integer string and using float or double may lose precision | +| Value | INT32, INT64, FLOAT, DOUBLE, TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- floating_string_infer_type + +| Name | floating_string_infer_type | +| ----------- |----------------------------------------------------------------------------------| +| Description | register time series as which type when receiving a floating number string "6.7" | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- nan_string_infer_type + +| Name | nan_string_infer_type | +| ----------- |--------------------------------------------------------------------| +| Description | register time series as which type when receiving the Literal NaN. | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- default_boolean_encoding + +| Name | default_boolean_encoding | +| ----------- |----------------------------------------------------------------| +| Description | BOOLEAN encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE | +| Default | RLE | +| Effective | Hot_reload | + +- default_int32_encoding + +| Name | default_int32_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT32 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_int64_encoding + +| Name | default_int64_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT64 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_float_encoding + +| Name | default_float_encoding | +| ----------- |--------------------------------------------------------------| +| Description | FLOAT encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_double_encoding + +| Name | default_double_encoding | +| ----------- |---------------------------------------------------------------| +| Description | DOUBLE encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_text_encoding + +| Name | default_text_encoding | +| ----------- |-------------------------------------------------------------| +| Description | TEXT encoding when creating schema automatically is enabled | +| Value | PLAIN | +| Default | PLAIN | +| Effective | Hot_reload | + + +* boolean_compressor + +| Name | boolean_compressor | +|------------------|-----------------------------------------------------------------------------------------| +| Description | BOOLEAN compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int32_compressor + +| Name | int32_compressor | +|----------------------|--------------------------------------------------------------------------------------------| +| Description | INT32/DATE compression when creating schema automatically is enabled(Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int64_compressor + +| Name | int64_compressor | +|--------------------|-------------------------------------------------------------------------------------------------| +| Description | INT64/TIMESTAMP compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* float_compressor + +| Name | float_compressor | +|-----------------------|---------------------------------------------------------------------------------------| +| Description | FLOAT compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* double_compressor + +| Name | double_compressor | +|-------------------|----------------------------------------------------------------------------------------| +| Description | DOUBLE compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* text_compressor + +| Name | text_compressor | +|--------------------|--------------------------------------------------------------------------------------------------| +| Description | TEXT/BINARY/BLOB compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + + +### 4.17 Query Configurations + +- read_consistency_level + +| Name | read_consistency_level | +| ----------- | ------------------------------------------------------------ | +| Description | The read consistency levelThese consistency levels are currently supported:strong(Default, read from the leader replica)weak(Read from a random replica) | +| Type | String | +| Default | strong | +| Effective | Restart required. | + +- meta_data_cache_enable + +| Name | meta_data_cache_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to cache meta data (BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- chunk_timeseriesmeta_free_memory_proportion + +| Name | chunk_timeseriesmeta_free_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50 | +| Type | String | +| Default | 1 : 100 : 200 : 300 : 400 | +| Effective | Restart required. | + +- enable_last_cache + +| Name | enable_last_cache | +| ----------- | ---------------------------- | +| Description | Whether to enable LAST cache | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- mpp_data_exchange_core_pool_size + +| Name | mpp_data_exchange_core_pool_size | +| ----------- | -------------------------------------------- | +| Description | Core size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_max_pool_size + +| Name | mpp_data_exchange_max_pool_size | +| ----------- | ------------------------------------------- | +| Description | Max size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_keep_alive_time_in_ms + +| Name | mpp_data_exchange_keep_alive_time_in_ms | +| ----------- | --------------------------------------- | +| Description | Max waiting time for MPP data exchange | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- driver_task_execution_time_slice_in_ms + +| Name | driver_task_execution_time_slice_in_ms | +| ----------- | -------------------------------------- | +| Description | The max execution time of a DriverTask | +| Type | int32 | +| Default | 200 | +| Effective | Restart required. | + +- max_tsblock_size_in_bytes + +| Name | max_tsblock_size_in_bytes | +| ----------- | ----------------------------- | +| Description | The max capacity of a TsBlock | +| Type | int32 | +| Default | 131072 | +| Effective | Restart required. | + +- max_tsblock_line_numbers + +| Name | max_tsblock_line_numbers | +| ----------- | ------------------------------------------- | +| Description | The max number of lines in a single TsBlock | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- slow_query_threshold + +| Name | slow_query_threshold | +| ----------- | -------------------------------------- | +| Description | Time cost(ms) threshold for slow query | +| Type | long | +| Default | 10000 | +| Effective | Hot reload | + +- query_cost_stat_window + +| Name | query_cost_stat_window | +|-------------|--------------------| +| Description | Time window threshold(min) for record of history queries. | +| Type | Int32 | +| Default | 0 | +| Effective | Hot reload | + +- query_timeout_threshold + +| Name | query_timeout_threshold | +| ----------- | ----------------------------------------- | +| Description | The max executing time of query. unit: ms | +| Type | Int32 | +| Default | 60000 | +| Effective | Restart required. | + +- max_allowed_concurrent_queries + +| Name | max_allowed_concurrent_queries | +| ----------- | -------------------------------------------------- | +| Description | The maximum allowed concurrently executing queries | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- query_thread_count + +| Name | query_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently execute query statement. When <= 0, use CPU core number. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- degree_of_query_parallelism + +| Name | degree_of_query_parallelism | +| ----------- | ------------------------------------------------------------ | +| Description | How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- mode_map_size_threshold + +| Name | mode_map_size_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of count map size when calculating the MODE aggregation function | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- batch_size + +| Name | batch_size | +| ----------- | ------------------------------------------------------------ | +| Description | The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) | +| Type | Int32 | +| Default | 100000 | +| Effective | Restart required. | + +- sort_buffer_size_in_bytes + +| Name | sort_buffer_size_in_bytes | +| ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort. | +| Type | long | +| Default | 1048576(Before V2.0.6)
0(Supports from V2.0.6), if `sort_buffer_size_in_bytes <= 0`, default value will be used, `default value = min(32MB, memory for query operators / query_thread_count / 2)`, if `sort_buffer_size_in_bytes > 0`, the specified value will be used. | +| Effective | Hot_reload | + +- merge_threshold_of_explain_analyze + +| Name | merge_threshold_of_explain_analyze | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of operator count in the result set of EXPLAIN ANALYZE, if the number of operator in the result set is larger than this threshold, operator will be merged. | +| Type | int | +| Default | 10 | +| Effective | Hot reload | + +### 4.18 TTL Configuration + +- ttl_check_interval + +| Name | ttl_check_interval | +| ----------- | ------------------------------------------------------------ | +| Description | The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours. | +| Type | int | +| Default | 7200000 | +| Effective | Restart required. | + +- max_expired_time + +| Name | max_expired_time | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum expiring time of device which has a ttl. Default is 1 month.If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction. | +| Type | int | +| Default | 2592000000 | +| Effective | Restart required. | + +- expired_data_ratio + +| Name | expired_data_ratio | +| ----------- | ------------------------------------------------------------ | +| Description | The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction. | +| Type | float | +| Default | 0.3 | +| Effective | Restart required. | + +### 4.19 Storage Engine Configuration + +- timestamp_precision + +| Name | timestamp_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Use this value to set timestamp precision as "ms", "us" or "ns". | +| Type | String | +| Default | ms | +| Effective | Modify before the first startup. | + +- timestamp_precision_check_enabled + +| Name | timestamp_precision_check_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted. | +| Type | Boolean | +| Default | true | +| Effective | Modify before the first startup. | + +- max_waiting_time_when_insert_blocked + +| Name | max_waiting_time_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- handle_system_error + +| Name | handle_system_error | +| ----------- | -------------------------------------------------------- | +| Description | What will the system do when unrecoverable error occurs. | +| Type | String | +| Default | CHANGE_TO_READ_ONLY | +| Effective | Restart required. | + +- enable_timed_flush_seq_memtable + +| Name | enable_timed_flush_seq_memtable | +| ----------- | --------------------------------------------------- | +| Description | Whether to timed flush sequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- seq_memtable_flush_interval_in_ms + +| Name | seq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- seq_memtable_flush_check_interval_in_ms + +| Name | seq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether sequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- enable_timed_flush_unseq_memtable + +| Name | enable_timed_flush_unseq_memtable | +| ----------- | ----------------------------------------------------- | +| Description | Whether to timed flush unsequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- unseq_memtable_flush_interval_in_ms + +| Name | unseq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- unseq_memtable_flush_check_interval_in_ms + +| Name | unseq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether unsequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- tvlist_sort_algorithm + +| Name | tvlist_sort_algorithm | +| ----------- | ------------------------------------------------- | +| Description | The sort algorithms used in the memtable's TVList | +| Type | String | +| Default | TIM | +| Effective | Restart required. | + +- avg_series_point_number_threshold + +| Name | avg_series_point_number_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. | +| Type | int32 | +| Default | 100000 | +| Effective | Restart required. | + +- flush_thread_count + +| Name | flush_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently flush. When <= 0, use CPU core number. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- enable_partial_insert + +| Name | enable_partial_insert | +| ----------- | ------------------------------------------------------------ | +| Description | In one insert (one device, one timestamp, multiple measurements), if enable partial insert, one measurement failure will not impact other measurements | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- recovery_log_interval_in_ms + +| Name | recovery_log_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | the interval to log recover progress of each vsg when starting iotdb | +| Type | Int32 | +| Default | 5000 | +| Effective | Restart required. | + +- 0.13_data_insert_adapt + +| Name | 0.13_data_insert_adapt | +| ----------- | ------------------------------------------------------------ | +| Description | If using a v0.13 client to insert data, please set this configuration to true. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_tsfile_validation + +| Name | enable_tsfile_validation | +| ----------- | ------------------------------------------------------------ | +| Description | Verify that TSfiles generated by Flush, Load, and Compaction are correct. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | + +- tier_ttl_in_ms + +| Name | tier_ttl_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Default tier TTL. When the survival time of the data exceeds the threshold, it will be migrated to the next tier. | +| Type | long | +| Default | -1 | +| Effective | Restart required. | + +- max_object_file_size_in_byte + +| Name | max_object_file_size_in_byte | +|-------------|--------------------------------------------------------------------------| +| Description | Maximum size limit for a single object file (supported since V2.0.8-beta). | +| Type | long | +| Default | 4294967296 (4 GB in bytes) | +| Effective | Hot reload | + +- restrict_object_limit + +| Name | restrict_object_limit | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | No special restrictions on table names, column names, or device identifiers for `OBJECT` type (supported since V2.0.8-beta). When set to `true` and the table contains `OBJECT` columns, the following restrictions apply:
1. Naming Rules: Values in TAG columns, table names, and field names must not use `.` or `..`; Prohibited characters include `./` or `.\`, otherwise metadata creation will fail; Names containing filesystem-unsupported characters will cause write errors.
2. Case Sensitivity: If the underlying filesystem is case-insensitive, device identifiers like `'d1'` and `'D1'` are treated as identical; Creating similar identifiers may overwrite `OBJECT` data files, leading to data corruption.
3. Storage Path: Actual storage path format: `${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin` | +| Type | boolean | +| Default | false | +| Effective | Can only be modified before the first service startup. | + +### 4.20 Compaction Configurations + +- enable_seq_space_compaction + +| Name | enable_seq_space_compaction | +| ----------- | ---------------------------------------------------------- | +| Description | sequence space compaction: only compact the sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_unseq_space_compaction + +| Name | enable_unseq_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | unsequence space compaction: only compact the unsequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_cross_space_compaction + +| Name | enable_cross_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | cross space compaction: compact the unsequence files into the overlapped sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_auto_repair_compaction + +| Name | enable_auto_repair_compaction | +| ----------- | ---------------------------------------------- | +| Description | enable auto repair unsorted file by compaction | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- cross_selector + +| Name | cross_selector | +| ----------- | ------------------------------------------- | +| Description | the selector of cross space compaction task | +| Type | String | +| Default | rewrite | +| Effective | Restart required. | + +- cross_performer + +| Name | cross_performer | +| ----------- |-----------------------------------------------------------| +| Description | the compaction performer of cross space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload . | + +- inner_seq_selector + +| Name | inner_seq_selector | +| ----------- |--------------------------------------------------------| +| Description | the selector of inner sequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_seq_performer + +| Name | inner_seq_performer | +| ----------- |---------------------------------------------------------| +| Description | the performer of inner sequence space compaction task, Options: read_chunk, fast | +| Type | String | +| Default | read_chunk | +| Effective | Hot reload | + +- inner_unseq_selector + +| Name | inner_unseq_selector | +| ----------- |----------------------------------------------------------| +| Description | the selector of inner unsequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_unseq_performer + +| Name | inner_unseq_performer | +| ----------- |-----------------------------------------------------------| +| Description | the performer of inner unsequence space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload | + +- compaction_priority + +| Name | compaction_priority | +| ----------- | ------------------------------------------------------------ | +| Description | The priority of compaction executionINNER_CROSS: prioritize inner space compaction, reduce the number of files firstCROSS_INNER: prioritize cross space compaction, eliminate the unsequence files firstBALANCE: alternate two compaction types | +| Type | String | +| Default | INNER_CROSS | +| Effective | Restart required. | + +- candidate_compaction_task_queue_size + +| Name | candidate_compaction_task_queue_size | +| ----------- | -------------------------------------------- | +| Description | The size of candidate compaction task queue. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- target_compaction_file_size + +| Name | target_compaction_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | This parameter is used in two places:The target tsfile size of inner space compaction.The candidate size of seq tsfile in cross space compaction will be smaller than target_compaction_file_size * 1.5.In most cases, the target file size of cross compaction won't exceed this threshold, and if it does, it will not be much larger than it. | +| Type | Int64 | +| Default | 2147483648 | +| Effective | Hot reload | + +- inner_compaction_total_file_size_threshold + +| Name | inner_compaction_total_file_size_threshold | +| ----------- | ---------------------------------------------------- | +| Description | The total file size limit in inner space compaction. | +| Type | int64 | +| Default | 10737418240 | +| Effective | Hot reload | + +- inner_compaction_total_file_num_threshold + +| Name | inner_compaction_total_file_num_threshold | +| ----------- | --------------------------------------------------- | +| Description | The total file num limit in inner space compaction. | +| Type | int32 | +| Default | 100 | +| Effective | Hot reload | + +- max_level_gap_in_inner_compaction + +| Name | max_level_gap_in_inner_compaction | +| ----------- | ----------------------------------------------- | +| Description | The max level gap in inner compaction selection | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- target_chunk_size + +| Name | target_chunk_size | +| ----------- | ------------------------------------------------------------ | +| Description | The target chunk size in flushing and compaction. If the size of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks.| +| Type | Int64 | +| Default | 1600000 | +| Effective | Restart required. | + +- target_chunk_point_num + +| Name | target_chunk_point_num | +| ----------- |-----------------------------------------------------------------| +| Description | The target point nums in one chunk in flushing and compaction. If the point number of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks. | +| Type | Int64 | +| Default | 100000 | +| Effective | Restart required. | + +- chunk_size_lower_bound_in_compaction + +| Name | chunk_size_lower_bound_in_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | If the chunk size is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 128 | +| Effective | Restart required. | + +- chunk_point_num_lower_bound_in_compaction + +| Name | chunk_point_num_lower_bound_in_compaction | +| ----------- |------------------------------------------------------------------------------------------| +| Description | If the chunk point num is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- inner_compaction_candidate_file_num + +| Name | inner_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The file num requirement when selecting inner space compaction candidate files | +| Type | int32 | +| Default | 30 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_num + +| Name | max_cross_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The max file when selecting cross space compaction candidate files | +| Type | int32 | +| Default | 500 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_size + +| Name | max_cross_compaction_candidate_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | The max total size when selecting cross space compaction candidate files | +| Type | Int64 | +| Default | 5368709120 | +| Effective | Hot reload | + +- min_cross_compaction_unseq_file_level + +| Name | min_cross_compaction_unseq_file_level | +| ----------- | ------------------------------------------------------------ | +| Description | The min inner compaction level of unsequence file which can be selected as candidate | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- compaction_thread_count + +| Name | compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads will be set up to perform compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_max_aligned_series_num_in_one_batch + +| Name | compaction_max_aligned_series_num_in_one_batch | +| ----------- | ------------------------------------------------------------ | +| Description | How many chunk will be compacted in aligned series compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_schedule_interval_in_ms + +| Name | compaction_schedule_interval_in_ms | +| ----------- | ---------------------------------------- | +| Description | The interval of compaction task schedule | +| Type | Int64 | +| Default | 60000 | +| Effective | Restart required. | + +- compaction_write_throughput_mb_per_sec + +| Name | compaction_write_throughput_mb_per_sec | +| ----------- | -------------------------------------------------------- | +| Description | The limit of write throughput merge can reach per second | +| Type | int32 | +| Default | 16 | +| Effective | Restart required. | + +- compaction_read_throughput_mb_per_sec + +| Name | compaction_read_throughput_mb_per_sec | +| ----------- | ------------------------------------------------------- | +| Description | The limit of read throughput merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- compaction_read_operation_per_sec + +| Name | compaction_read_operation_per_sec | +| ----------- | ------------------------------------------------------ | +| Description | The limit of read operation merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- sub_compaction_thread_count + +| Name | sub_compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of sub compaction threads to be set up to perform compaction. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +- inner_compaction_task_selection_disk_redundancy + +| Name | inner_compaction_task_selection_disk_redundancy | +| ----------- | ------------------------------------------------------------ | +| Description | Redundancy value of disk availability, only use for inner compaction. | +| Type | double | +| Default | 0.05 | +| Effective | Hot reload | + +- inner_compaction_task_selection_mods_file_threshold + +| Name | inner_compaction_task_selection_mods_file_threshold | +| ----------- | -------------------------------------------------------- | +| Description | Mods file size threshold, only use for inner compaction. | +| Type | long | +| Default | 131072 | +| Effective | Hot reload | + +- compaction_schedule_thread_num + +| Name | compaction_schedule_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads to be set up to select compaction task. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +### 4.21 Write Ahead Log Configuration + +- wal_mode + +| Name | wal_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The details of these three modes are as follows:DISABLE: the system will disable wal.SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully. | +| Type | String | +| Default | ASYNC | +| Effective | Restart required. | + +- max_wal_nodes_num + +| Name | max_wal_nodes_num | +| ----------- | ------------------------------------------------------------ | +| Description | each node corresponds to one wal directory The default value 0 means the number is determined by the system, the number is in the range of [data region num / 2, data region num]. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- wal_async_mode_fsync_delay_in_ms + +| Name | wal_async_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the async mode | +| Type | int32 | +| Default | 1000 | +| Effective | Hot reload | + +- wal_sync_mode_fsync_delay_in_ms + +| Name | wal_sync_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the sync mode | +| Type | int32 | +| Default | 3 | +| Effective | Hot reload | + +- wal_buffer_size_in_byte + +| Name | wal_buffer_size_in_byte | +| ----------- | ---------------------------- | +| Description | Buffer size of each wal node | +| Type | int32 | +| Default | 33554432 | +| Effective | Restart required. | + +- wal_buffer_queue_capacity + +| Name | wal_buffer_queue_capacity | +| ----------- | --------------------------------- | +| Description | Buffer capacity of each wal queue | +| Type | int32 | +| Default | 500 | +| Effective | Restart required. | + +- wal_file_size_threshold_in_byte + +| Name | wal_file_size_threshold_in_byte | +| ----------- | ------------------------------- | +| Description | Size threshold of each wal file | +| Type | int32 | +| Default | 31457280 | +| Effective | Hot reload | + +- wal_min_effective_info_ratio + +| Name | wal_min_effective_info_ratio | +| ----------- | --------------------------------------------------- | +| Description | Minimum ratio of effective information in wal files | +| Type | double | +| Default | 0.1 | +| Effective | Hot reload | + +- wal_memtable_snapshot_threshold_in_byte + +| Name | wal_memtable_snapshot_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | MemTable size threshold for triggering MemTable snapshot in wal | +| Type | int64 | +| Default | 8388608 | +| Effective | Hot reload | + +- max_wal_memtable_snapshot_num + +| Name | max_wal_memtable_snapshot_num | +| ----------- | ------------------------------------- | +| Description | MemTable's max snapshot number in wal | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- delete_wal_files_period_in_ms + +| Name | delete_wal_files_period_in_ms | +| ----------- | ----------------------------------------------------------- | +| Description | The period when outdated wal files are periodically deleted | +| Type | int64 | +| Default | 20000 | +| Effective | Hot reload | + +- wal_throttle_threshold_in_byte + +| Name | wal_throttle_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum size of wal files when throttle down in IoTConsensus | +| Type | long | +| Default | 53687091200 | +| Effective | Hot reload | + +- iot_consensus_cache_window_time_in_ms + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------------------ | +| Description | Maximum wait time of write cache in IoTConsensus | +| Type | long | +| Default | -1 | +| Effective | Hot reload | + +- enable_wal_compression + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------- | +| Description | Enable Write Ahead Log compression. | +| Type | boolean | +| Default | true | +| Effective | Hot reload | + +### 4.22 **IoTConsensus Configuration** + +- data_region_iot_max_log_entries_num_per_batch + +| Name | data_region_iot_max_log_entries_num_per_batch | +| ----------- | ------------------------------------------------- | +| Description | The maximum log entries num in IoTConsensus Batch | +| Type | int32 | +| Default | 1024 | +| Effective | Restart required. | + +- data_region_iot_max_size_per_batch + +| Name | data_region_iot_max_size_per_batch | +| ----------- | -------------------------------------- | +| Description | The maximum size in IoTConsensus Batch | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_iot_max_pending_batches_num + +| Name | data_region_iot_max_pending_batches_num | +| ----------- | ----------------------------------------------- | +| Description | The maximum pending batches num in IoTConsensus | +| Type | int32 | +| Default | 5 | +| Effective | Restart required. | + +- data_region_iot_max_memory_ratio_for_queue + +| Name | data_region_iot_max_memory_ratio_for_queue | +| ----------- | -------------------------------------------------- | +| Description | The maximum memory ratio for queue in IoTConsensus | +| Type | double | +| Default | 0.6 | +| Effective | Restart required. | + +- region_migration_speed_limit_bytes_per_second + +| Name | region_migration_speed_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum transit size in byte per second for region migration | +| Type | long | +| Default | 33554432 | +| Effective | Restart required. | + +### 4.23 TsFile Configurations + +- group_size_in_byte + +| Name | group_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of bytes written to disk each time the data in memory is written to disk | +| Type | int32 | +| Default | 134217728 | +| Effective | Hot reload | + +- page_size_in_byte + +| Name | page_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The memory size for each series writer to pack page, default value is 64KB | +| Type | int32 | +| Default | 65536 | +| Effective | Hot reload | + +- max_number_of_points_in_page + +| Name | max_number_of_points_in_page | +| ----------- | ------------------------------------------- | +| Description | The maximum number of data points in a page | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- pattern_matching_threshold + +| Name | pattern_matching_threshold | +| ----------- | ------------------------------------------- | +| Description | The threshold for pattern matching in regex | +| Type | int32 | +| Default | 1000000 | +| Effective | Hot reload | + +- float_precision + +| Name | float_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Floating-point precision of query results.Only effective for RLE and TS_2DIFF encodings.Due to the limitation of machine precision, some values may not be interpreted strictly. | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- value_encoder + +| Name | value_encoder | +| ----------- | ------------------------------------------------------------ | +| Description | Encoder of value series. default value is PLAIN. | +| Type | For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG. | +| Default | PLAIN | +| Effective | Hot reload | + +- compressor + +| Name | compressor | +| ----------- | ------------------------------------------------------------ | +| Description | Compression configuration And it is also used as the default compressor of time column in aligned timeseries. | +| Type | Data compression method, supports UNCOMPRESSED, SNAPPY, ZSTD, LZMA2 or LZ4. Default value is LZ4 | +| Default | LZ4 | +| Effective | Hot reload | + +- encrypt_flag + +| Name | encrypt_flag | +| ----------- | ---------------------- | +| Description | Enable data encryption | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- encrypt_type + +| Name | encrypt_type | +| ----------- |---------------------------------------| +| Description | The method of data encrytion | +| Type | String | +| Default | org.apache.tsfile.encrypt.UNENCRYPTED | +| Effective | Restart required. | + +- encrypt_key_path + +| Name | encrypt_key_path | +| ----------- | ----------------------------------- | +| Description | The path of key for data encryption | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.24 Authorization Configuration + +- authorizer_provider_class + +| Name | authorizer_provider_class | +| ----------- | ------------------------------------------------------------ | +| Description | which class to serve for authorization. | +| Type | String | +| Default | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| Effective | Restart required. | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| Name | openID_url | +| ----------- | ------------------------------------------------------------ | +| Description | The url of openID server If OpenIdAuthorizer is enabled, then openID_url must be set. | +| Type | String(a http link) | +| Default | None | +| Effective | Restart required. | + +- iotdb_server_encrypt_decrypt_provider + +| Name | iotdb_server_encrypt_decrypt_provider | +| ----------- | ------------------------------------------------------------ | +| Description | encryption provider class | +| Type | String | +| Default | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| Effective | Modify before the first startup. | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| Name | iotdb_server_encrypt_decrypt_provider_parameter | +| ----------- | ----------------------------------------------- | +| Description | encryption provided class parameter | +| Type | String | +| Default | None | +| Effective | Modify before the first startup. | + +- author_cache_size + +| Name | author_cache_size | +| ----------- | --------------------------- | +| Description | Cache size of user and role | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- author_cache_expire_time + +| Name | author_cache_expire_time | +| ----------- | ---------------------------------- | +| Description | Cache expire time of user and role | +| Type | int32 | +| Default | 30 | +| Effective | Restart required. | + +### 4.25 UDF Configuration + +- udf_initial_byte_array_length_for_memory_control + +| Name | udf_initial_byte_array_length_for_memory_control | +| ----------- | ------------------------------------------------------------ | +| Description | Used to estimate the memory usage of text fields in a UDF query.It is recommended to set this value to be slightly larger than the average length of all text records. | +| Type | int32 | +| Default | 48 | +| Effective | Restart required. | + +- udf_memory_budget_in_mb + +| Name | udf_memory_budget_in_mb | +| ----------- | ------------------------------------------------------------ | +| Description | How much memory may be used in ONE UDF query (in MB). The upper limit is 20% of allocated memory for read. | +| Type | Float | +| Default | 30.0 | +| Effective | Restart required. | + +- udf_reader_transformer_collector_memory_proportion + +| Name | udf_reader_transformer_collector_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | UDF memory allocation ratio.The parameter form is a:b:c, where a, b, and c are integers. | +| Type | String | +| Default | 1:1:1 | +| Effective | Restart required. | + +- udf_lib_dir + +| Name | udf_lib_dir | +| ----------- | ---------------------------- | +| Description | the udf lib directory | +| Type | String | +| Default | ext/udf(Windows:ext\\udf) | +| Effective | Restart required. | + +### 4.26 Trigger Configuration + +- trigger_lib_dir + +| Name | trigger_lib_dir | +| ----------- | ------------------------- | +| Description | the trigger lib directory | +| Type | String | +| Default | ext/trigger | +| Effective | Restart required. | + +- stateful_trigger_retry_num_when_not_found + +| Name | stateful_trigger_retry_num_when_not_found | +| ----------- | ------------------------------------------------------------ | +| Description | How many times will we retry to found an instance of stateful trigger on DataNodes | +| Type | Int32 | +| Default | 3 | +| Effective | Restart required. | + +### 4.27 **Select-Into Configuration** + +- into_operation_buffer_size_in_byte + +| Name | into_operation_buffer_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum memory occupied by the data to be written when executing select-into statements. | +| Type | long | +| Default | 104857600 | +| Effective | Hot reload | + +- select_into_insert_tablet_plan_row_limit + +| Name | select_into_insert_tablet_plan_row_limit | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements. | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- into_operation_execution_thread_count + +| Name | into_operation_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the thread pool that execute insert-tablet tasks | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +### 4.28 Continuous Query Configuration + +- continuous_query_submit_thread_count + +| Name | continuous_query_execution_thread | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +- continuous_query_min_every_interval_in_ms + +| Name | continuous_query_min_every_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum value of the continuous query execution time interval | +| Type | long (duration) | +| Default | 1000 | +| Effective | Restart required. | + +### 4.29 Pipe Configuration + +- pipe_lib_dir + +| Name | pipe_lib_dir | +| ----------- | ----------------------- | +| Description | the pipe lib directory. | +| Type | string | +| Default | ext/pipe | +| Effective | Not support modify | + +- pipe_subtask_executor_max_thread_num + +| Name | pipe_subtask_executor_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- pipe_sink_timeout_ms + +| Name | pipe_sink_timeout_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The connection timeout (in milliseconds) for the thrift client. | +| Type | int | +| Default | 900000 | +| Effective | Restart required. | + +- pipe_sink_selector_number + +| Name | pipe_sink_selector_number | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of selectors that can be used in the sink.Recommend to set this value to less than or equal to pipe_sink_max_client_number. | +| Type | int | +| Default | 4 | +| Effective | Restart required. | + +- pipe_sink_max_client_number + +| Name | pipe_sink_max_client_number | +| ----------- | ----------------------------------------------------------- | +| Description | The maximum number of clients that can be used in the sink. | +| Type | int | +| Default | 16 | +| Effective | Restart required. | + +- pipe_air_gap_receiver_enabled + +| Name | pipe_air_gap_receiver_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable receiving pipe data through air gap.The receiver can only return 0 or 1 in TCP mode to indicate whether the data is received successfully. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- pipe_air_gap_receiver_port + +| Name | pipe_air_gap_receiver_port | +| ----------- | ------------------------------------------------------------ | +| Description | The port for the server to receive pipe data through air gap. | +| Type | int | +| Default | 9780 | +| Effective | Restart required. | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| Name | pipe_all_sinks_rate_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The total bytes that all pipe sinks can transfer per second.When given a value less than or equal to 0, it means no limit. default value is -1, which means no limit. | +| Type | double | +| Default | -1 | +| Effective | Hot reload | + +### 4.30 RatisConsensus Configuration + +- config_node_ratis_log_appender_buffer_size_max + +| Name | config_node_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of ConfigNode (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- schema_region_ratis_log_appender_buffer_size_max + +| Name | schema_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of SchemaRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_ratis_log_appender_buffer_size_max + +| Name | data_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of DataRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- config_node_ratis_snapshot_trigger_threshold + +| Name | config_node_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of Confignode | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- schema_region_ratis_snapshot_trigger_threshold + +| Name | schema_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of SchemaRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- data_region_ratis_snapshot_trigger_threshold + +| Name | data_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of DataRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- config_node_ratis_log_unsafe_flush_enable + +| Name | config_node_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is confignode allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- schema_region_ratis_log_unsafe_flush_enable + +| Name | schema_region_ratis_log_unsafe_flush_enable | +| ----------- | -------------------------------------------------------- | +| Description | Is schemaregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- data_region_ratis_log_unsafe_flush_enable + +| Name | data_region_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is dataregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- config_node_ratis_log_segment_size_max_in_byte + +| Name | config_node_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of confignode (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- schema_region_ratis_log_segment_size_max_in_byte + +| Name | schema_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of schemaregion (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- data_region_ratis_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of dataregion(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a simple log segment file of confignode(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_ratis_grpc_flow_control_window + +| Name | config_node_ratis_grpc_flow_control_window | +| ----------- | ---------------------------------------------------------- | +| Description | confignode flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_flow_control_window + +| Name | schema_region_ratis_grpc_flow_control_window | +| ----------- | ------------------------------------------------------------ | +| Description | schema region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- data_region_ratis_grpc_flow_control_window + +| Name | data_region_ratis_grpc_flow_control_window | +| ----------- | ----------------------------------------------------------- | +| Description | data region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| Name | config_node_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | config node grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| Name | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ------------------------------------------------------- | +| Description | schema region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| Name | data_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | data region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_log_force_sync_num + +| Name | config_node_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | config node fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_log_force_sync_num + +| Name | schema_region_ratis_log_force_sync_num | +| ----------- | -------------------------------------- | +| Description | schema region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_log_force_sync_num + +| Name | data_region_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | data region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- config_node_ratis_request_timeout_ms + +| Name | config_node_ratis_request_timeout_ms | +| ----------- | --------------------------------------- | +| Description | confignode ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_request_timeout_ms + +| Name | schema_region_ratis_request_timeout_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- data_region_ratis_request_timeout_ms + +| Name | data_region_ratis_request_timeout_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- config_node_ratis_max_retry_attempts + +| Name | config_node_ratis_max_retry_attempts | +| ----------- | ------------------------------------ | +| Description | confignode ratis client retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- config_node_ratis_initial_sleep_time_ms + +| Name | config_node_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | confignode ratis client initial sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- config_node_ratis_max_sleep_time_ms + +| Name | config_node_ratis_max_sleep_time_ms | +| ----------- | -------------------------------------------- | +| Description | confignode ratis client max retry sleep time | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_max_retry_attempts + +| Name | schema_region_ratis_max_retry_attempts | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client max retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- schema_region_ratis_initial_sleep_time_ms + +| Name | schema_region_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- schema_region_ratis_max_sleep_time_ms + +| Name | schema_region_ratis_max_sleep_time_ms | +| ----------- | ----------------------------------------- | +| Description | schema region ratis client max sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_max_retry_attempts + +| Name | data_region_ratis_max_retry_attempts | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- data_region_ratis_initial_sleep_time_ms + +| Name | data_region_ratis_initial_sleep_time_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- data_region_ratis_max_sleep_time_ms + +| Name | data_region_ratis_max_sleep_time_ms | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- ratis_first_election_timeout_min_ms + +| Name | ratis_first_election_timeout_min_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election min timeout | +| Type | int64 | +| Default | 50 (ms) | +| Effective | Restart required. | + +- ratis_first_election_timeout_max_ms + +| Name | ratis_first_election_timeout_max_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election max timeout | +| Type | int64 | +| Default | 150 (ms) | +| Effective | Restart required. | + +- config_node_ratis_preserve_logs_num_when_purge + +| Name | config_node_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | confignode snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- schema_region_ratis_preserve_logs_num_when_purge + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | schema region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_preserve_logs_num_when_purge + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | data region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- config_node_ratis_log_max_size + +| Name | config_node_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | config node Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- schema_region_ratis_log_max_size + +| Name | schema_region_ratis_log_max_size | +| ----------- | ---------------------------------------- | +| Description | schema region Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- data_region_ratis_log_max_size + +| Name | data_region_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | data region Raft Log disk size control | +| Type | int64 | +| Default | 21474836480 (20GB) | +| Effective | Restart required. | + +- config_node_ratis_periodic_snapshot_interval + +| Name | config_node_ratis_periodic_snapshot_interval | +| ----------- | -------------------------------------------- | +| Description | config node Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- schema_region_ratis_periodic_snapshot_interval + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------ | +| Description | schema region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- data_region_ratis_periodic_snapshot_interval + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ---------------------------------------------- | +| Description | data region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +### 4.31 IoTConsensusV2 Configuration + +- iot_consensus_v2_pipeline_size + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------------------------------------ | +| Description | Default event buffer size for connector and receiver in iot consensus v2 | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- iot_consensus_v2_mode + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------ | +| Description | IoTConsensusV2 mode. | +| Type | String | +| Default | batch | +| Effective | Restart required. | + +### 4.32 Procedure Configuration + +- procedure_core_worker_thread_count + +| Name | procedure_core_worker_thread_count | +| ----------- | ------------------------------------- | +| Description | Default number of worker thread count | +| Type | int32 | +| Default | 4 | +| Effective | Restart required. | + +- procedure_completed_clean_interval + +| Name | procedure_completed_clean_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Default time interval of completed procedure cleaner work in, time unit is second | +| Type | int32 | +| Default | 30(s) | +| Effective | Restart required. | + +- procedure_completed_evict_ttl + +| Name | procedure_completed_evict_ttl | +| ----------- | ------------------------------------------------------- | +| Description | Default ttl of completed procedure, time unit is second | +| Type | int32 | +| Default | 60(s) | +| Effective | Restart required. | + +### 4.33 MQTT Broker Configuration + +- enable_mqtt_service + +| Name | enable_mqtt_service。 | +| ----------- | ----------------------------------- | +| Description | whether to enable the mqtt service. | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +- mqtt_host + +| Name | mqtt_host | +| ----------- | ------------------------------ | +| Description | the mqtt service binding host. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Hot reload | + +- mqtt_port + +| Name | mqtt_port | +| ----------- | ------------------------------ | +| Description | the mqtt service binding port. | +| Type | int32 | +| Default | 1883 | +| Effective | Hot reload | + +- mqtt_handler_pool_size + +| Name | mqtt_handler_pool_size | +| ----------- | ---------------------------------------------------- | +| Description | the handler pool size for handing the mqtt messages. | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- mqtt_payload_formatter + +| Name | mqtt_payload_formatter | +| ----------- | ----------------------------------- | +| Description | the mqtt message payload formatter. | +| Type | String | +| Default | json | +| Effective | Hot reload | + +- mqtt_max_message_size + +| Name | mqtt_max_message_size | +| ----------- | ---------------------------------- | +| Description | max length of mqtt message in byte | +| Type | int32 | +| Default | 1048576 | +| Effective | Hot reload | + +### 4.34 Audit log Configuration + +- enable_audit_log + +| Name | enable_audit_log | +| ----------- | -------------------------------- | +| Description | whether to enable the audit log. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- audit_log_storage + +| Name | audit_log_storage | +| ----------- | ----------------------------- | +| Description | Output location of audit logs | +| Type | String | +| Default | IOTDB,LOGGER | +| Effective | Restart required. | + +- audit_log_operation + +| Name | audit_log_operation | +| ----------- | ------------------------------------------------------------ | +| Description | whether enable audit log for DML operation of datawhether enable audit log for DDL operation of schemawhether enable audit log for QUERY operation of data and schema | +| Type | String | +| Default | DML,DDL,QUERY | +| Effective | Restart required. | + +- enable_audit_log_for_native_insert_api + +| Name | enable_audit_log_for_native_insert_api | +| ----------- | ---------------------------------------------- | +| Description | whether the local write api records audit logs | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.35 White List Configuration + +- enable_white_list + +| Name | enable_white_list | +| ----------- | ------------------------- | +| Description | whether enable white list | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +### 4.36 IoTDB-AI Configuration + +- model_inference_execution_thread_count + +| Name | model_inference_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The thread count which can be used for model inference operation. | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +### 4.37 Load TsFile Configuration + +- load_clean_up_task_execution_delay_time_seconds + +| Name | load_clean_up_task_execution_delay_time_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | Load clean up task is used to clean up the unsuccessful loaded tsfile after a certain period of time. | +| Type | int | +| Default | 1800 | +| Effective | Hot reload | + +- load_write_throughput_bytes_per_second + +| Name | load_write_throughput_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum bytes per second of disk write throughput when loading tsfile. | +| Type | int | +| Default | -1 | +| Effective | Hot reload | + +- load_active_listening_enable + +| Name | load_active_listening_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable the active listening mode for tsfile loading. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- load_active_listening_dirs + +| Name | load_active_listening_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | The directory to be actively listened for tsfile loading.Multiple directories should be separated by a ','. | +| Type | String | +| Default | ext/load/pending | +| Effective | Hot reload | + +- load_active_listening_fail_dir + +| Name | load_active_listening_fail_dir | +| ----------- | ------------------------------------------------------------ | +| Description | The directory where tsfiles are moved if the active listening mode fails to load them. | +| Type | String | +| Default | ext/load/failed | +| Effective | Hot reload | + +- load_active_listening_max_thread_num + +| Name | load_active_listening_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to load tsfile actively.The default value, when this parameter is commented out or <= 0, use CPU core number. | +| Type | Long | +| Default | 0 | +| Effective | Restart required. | + +- load_active_listening_check_interval_seconds + +| Name | load_active_listening_check_interval_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The interval specified in seconds for the active listening mode to check the directory specified in `load_active_listening_dirs`.The active listening mode will check the directory every `load_active_listening_check_interval_seconds seconds`. | +| Type | Long | +| Default | 5 | +| Effective | Restart required. | + +* last_cache_operation_on_load + +|Name| last_cache_operation_on_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| The operation performed to LastCache when a TsFile is successfully loaded. `UPDATE`: use the data in the TsFile to update LastCache; `UPDATE_NO_BLOB`: similar to UPDATE, but will invalidate LastCache for blob series; `CLEAN_DEVICE`: invalidate LastCache of devices contained in the TsFile; `CLEAN_ALL`: clean the whole LastCache. | +|Type| String | +|Default| UPDATE_NO_BLOB | +|Effective| Effective after restart | + +* cache_last_values_for_load + +|Name| cache_last_values_for_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| Whether to cache last values before loading a TsFile. Only effective when `last_cache_operation_on_load=UPDATE_NO_BLOB` or `last_cache_operation_on_load=UPDATE`. When set to true, blob series will be ignored even with `last_cache_operation_on_load=UPDATE`. Enabling this will increase the memory footprint during loading TsFiles. | +|Type| Boolean | +|Default| true | +|Effective| Effective after restart | + +* cache_last_values_memory_budget_in_byte + +|Name| cache_last_values_memory_budget_in_byte | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| When `cache_last_values_for_load=true`, the maximum memory that can be used to cache last values. If this value is exceeded, the cached values will be abandoned and last values will be read from the TsFile in a streaming manner. | +|Type| int32 | +|Default| 4194304 | +|Effective| Effective after restart | + + +### 4.38 Dispatch Retry Configuration + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum retrying time for write request remotely dispatching, time unit is milliseconds. | +| Type | Long | +| Default | 60000 | +| Effective | Hot reload | + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------ | +| Description | Whether retrying for unknown errors. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | \ No newline at end of file diff --git a/src/UserGuide/Master/Table/Reference/System-Config-Manual_timecho.md b/src/UserGuide/Master/Table/Reference/System-Config-Manual_timecho.md new file mode 100644 index 000000000..d32adb558 --- /dev/null +++ b/src/UserGuide/Master/Table/Reference/System-Config-Manual_timecho.md @@ -0,0 +1,3384 @@ + + +# Config Manual + +## 1. IoTDB Configuration Files + +The configuration files for IoTDB are located in the `conf` folder under the IoTDB installation directory. Key configuration files include: + +1. `confignode-env.sh` **/** `confignode-env.bat`: + 1. Environment configuration file for ConfigNode. + 2. Used to configure memory size and other environment settings for ConfigNode. +2. `datanode-env.sh` **/** `datanode-env.bat`: + 1. Environment configuration file for DataNode. + 2. Used to configure memory size and other environment settings for DataNode. +3. `iotdb-system.properties`: + 1. Main configuration file for IoTDB. + 2. Contains configurable parameters for IoTDB. +4. `iotdb-system.properties.template`: + 1. Template for the `iotdb-system.properties` file. + 2. Provides a reference for all available configuration parameters. + +## 2. Modify Configurations + +### 2.1 **Modify Existing Parameters**: + +- Parameters already present in the `iotdb-system.properties` file can be directly modified. + +### 2.2 **Adding New Parameters**: + +- For parameters not listed in `iotdb-system.properties`, you can find them in the `iotdb-system.properties.template` file. +- Copy the desired parameter from the template file to `iotdb-system.properties` and modify its value. + +### 2.3 Configuration Update Methods + +Different configuration parameters have different update methods, categorized as follows: + +1. **Modify before the first startup.**: + 1. These parameters can only be modified before the first startup of ConfigNode/DataNode. + 2. Modifying them after the first startup will prevent ConfigNode/DataNode from starting. +2. **Restart Required for Changes to Take Effect**: + 1. These parameters can be modified after ConfigNode/DataNode has started. + 2. However, a restart of ConfigNode/DataNode is required for the changes to take effect. +3. **Hot Reload**: + 1. These parameters can be modified while ConfigNode/DataNode is running. + 2. After modification, use the following SQL commands to apply the changes: + - `load configuration`: Reloads the configuration. + - `set configuration key1 = 'value1'`: Updates specific configuration parameters. + +## 3. Environment Parameters + +The environment configuration files (`confignode-env.sh/bat` and `datanode-env.sh/bat`) are used to configure Java environment parameters for ConfigNode and DataNode, such as JVM settings. These configurations are passed to the JVM when ConfigNode or DataNode starts. + +### 3.1 **confignode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB ConfigNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 3/10 of the machine's memory, capped at 16G. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB ConfigNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB ConfigNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +### 3.2 **datanode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB DataNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 1/2 of the machine's memory. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB DataNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB DataNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +## 4. System Parameters (`iotdb-system.properties.template`) + +The `iotdb-system.properties` file contains various configurations for managing IoTDB clusters, nodes, replication, directories, monitoring, SSL, connections, object storage, tier management, and REST services. Below is a detailed breakdown of the parameters: + +### 4.1 Cluster Configuration + +- cluster_name + +| Name | cluster_name | +| ----------- | --------------------------------------------------------- | +| Description | Name of the cluster. | +| Type | String | +| Default | default_cluster | +| Effective | Use CLI: `set configuration cluster_name='xxx'`. | +| Note | Changes are distributed across nodes. Changes may not propagate to all nodes in case of network issues or node failures. Nodes that fail to update must manually modify `cluster_name` in their configuration files and restart. Under normal circumstances, it is not recommended to modify `cluster_name` by manually modifying configuration files or to perform hot-loading via `load configuration` method. | + +### 4.2 Seed ConfigNode + +- cn_seed_config_node + +| Name | cn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Confignode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +- dn_seed_config_node + +| Name | dn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Datanode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +### 4.3 Node RPC Configuration + +- cn_internal_address + +| Name | cn_internal_address | +| ----------- | ---------------------------------------------- | +| Description | Internal address for ConfigNode communication. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- cn_internal_port + +| Name | cn_internal_port | +| ----------- | ------------------------------------------- | +| Description | Port for ConfigNode internal communication. | +| Type | Short Int : [0,65535] | +| Default | 10710 | +| Effective | Modify before the first startup. | + +- cn_consensus_port + +| Name | cn_consensus_port | +| ----------- | ----------------------------------------------------- | +| Description | Port for ConfigNode consensus protocol communication. | +| Type | Short Int : [0,65535] | +| Default | 10720 | +| Effective | Modify before the first startup. | + +- dn_rpc_address + +| Name | dn_rpc_address | +| ----------- |---------------------------------| +| Description | Address for client RPC service. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Restart required. | + +- dn_rpc_port + +| Name | dn_rpc_port | +| ----------- | ---------------------------- | +| Description | Port for client RPC service. | +| Type | Short Int : [0,65535] | +| Default | 6667 | +| Effective | Restart required. | + +- dn_internal_address + +| Name | dn_internal_address | +| ----------- | -------------------------------------------- | +| Description | Internal address for DataNode communication. | +| Type | string | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- dn_internal_port + +| Name | dn_internal_port | +| ----------- | ----------------------------------------- | +| Description | Port for DataNode internal communication. | +| Type | int | +| Default | 10730 | +| Effective | Modify before the first startup. | + +- dn_mpp_data_exchange_port + +| Name | dn_mpp_data_exchange_port | +| ----------- | -------------------------------- | +| Description | Port for MPP data exchange. | +| Type | int | +| Default | 10740 | +| Effective | Modify before the first startup. | + +- dn_schema_region_consensus_port + +| Name | dn_schema_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode SchemaRegion consensus protocol communication. | +| Type | int | +| Default | 10750 | +| Effective | Modify before the first startup. | + +- dn_data_region_consensus_port + +| Name | dn_data_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode DataRegion consensus protocol communication. | +| Type | int | +| Default | 10760 | +| Effective | Modify before the first startup. | + +- dn_join_cluster_retry_interval_ms + +| Name | dn_join_cluster_retry_interval_ms | +| ----------- | --------------------------------------------------- | +| Description | Interval for DataNode to retry joining the cluster. | +| Type | long | +| Default | 5000 | +| Effective | Restart required. | + +### 4.4 Replication configuration + +- config_node_consensus_protocol_class + +| Name | config_node_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for ConfigNode replication, only supports RatisConsensus | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- schema_replication_factor + +| Name | schema_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default schema replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- schema_region_consensus_protocol_class + +| Name | schema_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for schema region replication. Only supports RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- data_replication_factor + +| Name | data_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default data replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- data_region_consensus_protocol_class + +| Name | data_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for data region replication. Supports IoTConsensus or RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.iot.IoTConsensus | +| Effective | Modify before the first startup. | + +### 4.5 Directory configuration + +- cn_system_dir + +| Name | cn_system_dir | +| ----------- | ----------------------------------------------------------- | +| Description | System data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/system(Windows:data\\configndoe\\system) | +| Effective | Restart required | + +- cn_consensus_dir + +| Name | cn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| Effective | Restart required | + +- cn_pipe_receiver_file_dir + +| Name | cn_pipe_receiver_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in ConfigNode. | +| Type | String | +| Default | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- dn_system_dir + +| Name | dn_system_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Schema storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/system(Windows:data\\datanode\\system) | +| Effective | Restart required | + +- dn_data_dirs + +| Name | dn_data_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Data storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/data(Windows:data\\datanode\\data) | +| Effective | Restart required | + +- dn_multi_dir_strategy + +| Name | dn_multi_dir_strategy | +| ----------- | ------------------------------------------------------------ | +| Description | The strategy used by IoTDB to select directories in `data_dirs` for TsFiles. You can use either the simple class name or the fully qualified class name. The system provides the following two strategies: 1. SequenceStrategy: IoTDB selects directories sequentially, iterating through all directories in `data_dirs` in a round-robin manner. 2. MaxDiskUsableSpaceFirstStrategy IoTDB prioritizes the directory in `data_dirs` with the largest disk free space. To implement a custom strategy: 1. Inherit the `org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy `class and implement your own strategy method. 2. Fill in the configuration item with the fully qualified class name of your implementation (package name + class name, e.g., `UserDefineStrategyPackage`). 3. Add the JAR file containing your custom class to the project. | +| Type | String | +| Default | SequenceStrategy | +| Effective | Hot reload. | + +- dn_consensus_dir + +| Name | dn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus log storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| Effective | Restart required | + +- dn_wal_dirs + +| Name | dn_wal_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Write-ahead log (WAL) storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/wal(Windows:data\\datanode\\wal) | +| Effective | Restart required | + +- dn_tracing_dir + +| Name | dn_tracing_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Tracing root directory for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | datanode/tracing(Windows:datanode\\tracing) | +| Effective | Restart required | + +- dn_sync_dir + +| Name | dn_sync_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Sync storage path for DataNode.By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/sync(Windows:data\\datanode\\sync) | +| Effective | Restart required | + +- sort_tmp_dir + +| Name | sort_tmp_dir | +| ----------- | ------------------------------------------------- | +| Description | Temporary directory for sorting operations. | +| Type | String | +| Default | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| Effective | Restart required | + +- dn_pipe_receiver_file_dirs + +| Name | dn_pipe_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in DataNode. | +| Type | String | +| Default | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_receiver_file_dirs + +| Name | iot_consensus_v2_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 receiver files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_deletion_file_dir + +| Name | iot_consensus_v2_deletion_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 deletion files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| Effective | Restart required | + +### 4.6 Metric Configuration + +- cn_metric_reporter_list + +| Name | cn_metric_reporter_list | +| ----------- | ----------------------------------------- | +| Description | Systems for reporting ConfigNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- cn_metric_level + +| Name | cn_metric_level | +| ----------- | --------------------------------------- | +| Description | Level of detail for ConfigNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- cn_metric_async_collect_period + +| Name | cn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in ConfigNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- cn_metric_prometheus_reporter_port + +| Name | cn_metric_prometheus_reporter_port | +| ----------- | --------------------------------------------------- | +| Description | Port for Prometheus metric reporting in ConfigNode. | +| Type | int | +| Default | 9091 | +| Effective | Restart required. | + +- dn_metric_reporter_list + +| Name | dn_metric_reporter_list | +| ----------- | --------------------------------------- | +| Description | Systems for reporting DataNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- dn_metric_level + +| Name | dn_metric_level | +| ----------- | ------------------------------------- | +| Description | Level of detail for DataNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- dn_metric_async_collect_period + +| Name | dn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in DataNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- dn_metric_prometheus_reporter_port + +| Name | dn_metric_prometheus_reporter_port | +| ----------- | ------------------------------------------------- | +| Description | Port for Prometheus metric reporting in DataNode. | +| Type | int | +| Default | 9092 | +| Effective | Restart required. | + +- dn_metric_internal_reporter_type + +| Name | dn_metric_internal_reporter_type | +| ----------- | ------------------------------------------------------------ | +| Description | Internal reporter types for DataNode metrics. For internal monitoring and checking that the data has been successfully written and refreshed. | +| Type | String | +| Default | IOTDB | +| Effective | Restart required. | + +### 4.7 SSL Configuration + +- enable_thrift_ssl + +| Name | enable_thrift_ssl | +| ----------- | --------------------------------------------- | +| Description | Enables SSL encryption for RPC communication. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_https + +| Name | enable_https | +| ----------- | ------------------------------ | +| Description | Enables SSL for REST services. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- key_store_path + +| Name | key_store_path | +| ----------- | ---------------------------- | +| Description | Path to the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- key_store_pwd + +| Name | key_store_pwd | +| ----------- | --------------------------------- | +| Description | Password for the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.8 Connection Configuration + +- cn_rpc_thrift_compression_enable + +| Name | cn_rpc_thrift_compression_enable | +| ----------- | ----------------------------------- | +| Description | Enables Thrift compression for RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- cn_rpc_max_concurrent_client_num + +| Name | cn_rpc_max_concurrent_client_num | +| ----------- |-------------------------------------------| +| Description | Maximum number of concurrent RPC clients. | +| Type | int | +| Default | 3000 | +| Effective | Restart required. | + +- cn_connection_timeout_ms + +| Name | cn_connection_timeout_ms | +| ----------- | ---------------------------------------------------- | +| Description | Connection timeout for ConfigNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- cn_selector_thread_nums_of_client_manager + +| Name | cn_selector_thread_nums_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | Number of selector threads for client management in ConfigNode. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- cn_max_client_count_for_each_node_in_client_manager + +| Name | cn_max_client_count_for_each_node_in_client_manager | +| ----------- | ------------------------------------------------------ | +| Description | Maximum clients per node in ConfigNode client manager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +- dn_session_timeout_threshold + +| Name | dn_session_timeout_threshold | +| ----------- | ---------------------------------------- | +| Description | Maximum idle time for DataNode sessions. | +| Type | int | +| Default | 0 | +| Effective | Restart required.t required. | + +- dn_rpc_thrift_compression_enable + +| Name | dn_rpc_thrift_compression_enable | +| ----------- | -------------------------------------------- | +| Description | Enables Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_advanced_compression_enable + +| Name | dn_rpc_advanced_compression_enable | +| ----------- | ----------------------------------------------------- | +| Description | Enables advanced Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_selector_thread_count + +| Name | rpc_selector_thread_count | +| ----------- | -------------------------------------------- | +| Description | Number of selector threads for DataNode RPC. | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_rpc_min_concurrent_client_num + +| Name | rpc_min_concurrent_client_num | +| ----------- | ------------------------------------------------------ | +| Description | Minimum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1 | +| Effective | Restart required. | + +- dn_rpc_max_concurrent_client_num + +| Name | dn_rpc_max_concurrent_client_num | +| ----------- |--------------------------------------------------------| +| Description | Maximum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1000 | +| Effective | Restart required. | + +- dn_thrift_max_frame_size + +| Name | dn_thrift_max_frame_size | +| ----------- |------------------------------------------------| +| Description | Maximum frame size for RPC requests/responses. | +| Type | long | +| Default | 536870912 (Default 512MB) | +| Effective | Restart required. | + +- dn_thrift_init_buffer_size + +| Name | dn_thrift_init_buffer_size | +| ----------- | ----------------------------------- | +| Description | Initial buffer size for Thrift RPC. | +| Type | long | +| Default | 1024 | +| Effective | Restart required. | + +- dn_connection_timeout_ms + +| Name | dn_connection_timeout_ms | +| ----------- | -------------------------------------------------- | +| Description | Connection timeout for DataNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- dn_selector_thread_count_of_client_manager + +| Name | dn_selector_thread_count_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | selector thread (TAsyncClientManager) nums for async thread in a clientManager | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_max_client_count_for_each_node_in_client_manager + +| Name | dn_max_client_count_for_each_node_in_client_manager | +| ----------- | --------------------------------------------------- | +| Description | Maximum clients per node in DataNode clientmanager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +### 4.9 Object storage management + +- remote_tsfile_cache_dirs + +| Name | remote_tsfile_cache_dirs | +| ----------- | ---------------------------------------- | +| Description | Local cache directory for cloud storage. | +| Type | String | +| Default | data/datanode/data/cache | +| Effective | Restart required. | + +- remote_tsfile_cache_page_size_in_kb + +| Name | remote_tsfile_cache_page_size_in_kb | +| ----------- | --------------------------------------------- | +| Description | Block size for cached files in cloud storage. | +| Type | int | +| Default | 20480 | +| Effective | Restart required. | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| Name | remote_tsfile_cache_max_disk_usage_in_mb | +| ----------- | ------------------------------------------- | +| Description | Maximum disk usage for cloud storage cache. | +| Type | long | +| Default | 51200 | +| Effective | Restart required. | + +- object_storage_type + +| Name | object_storage_type | +| ----------- | ---------------------- | +| Description | Type of cloud storage. | +| Type | String | +| Default | AWS_S3 | +| Effective | Restart required. | + +- object_storage_endpoint + +| Name | object_storage_endpoint | +| ----------- | --------------------------- | +| Description | Endpoint for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_bucket + +| Name | object_storage_bucket | +| ----------- | ------------------------------ | +| Description | Bucket name for cloud storage. | +| Type | String | +| Default | iotdb_data | +| Effective | Restart required. | + +- object_storage_access_key + +| Name | object_storage_access_key | +| ----------- | ----------------------------- | +| Description | Access key for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_access_secret + +| Name | object_storage_access_secret | +| ----------- | -------------------------------- | +| Description | Access secret for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.10 Tier management + +- dn_default_space_usage_thresholds + +| Name | dn_default_space_usage_thresholds | +| ----------- | ------------------------------------------------------------ | +| Description | Disk usage threshold, data will be moved to the next tier when the usage of the tier is higher than this threshold.If tiered storage is enabled, please separate thresholds of different tiers by semicolons ";". | +| Type | double | +| Default | 0.85 | +| Effective | Hot reload. | + +- dn_tier_full_policy + +| Name | dn_tier_full_policy | +| ----------- | ------------------------------------------------------------ | +| Description | How to deal with the last tier's data when its used space has been higher than its dn_default_space_usage_thresholds. | +| Type | String | +| Default | NULL | +| Effective | Hot reload. | + +- migrate_thread_count + +| Name | migrate_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | thread pool size for migrate operation in the DataNode's data directories. | +| Type | int | +| Default | 1 | +| Effective | Hot reload. | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| Name | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ----------- | ------------------------------------------------------------ | +| Description | The migrate speed limit of different tiers can reach per second | +| Type | int | +| Default | 10485760 | +| Effective | Hot reload. | + +### 4.11 REST Service Configuration + +- enable_rest_service + +| Name | enable_rest_service | +| ----------- | --------------------------- | +| Description | Is the REST service enabled | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_service_port + +| Name | rest_service_port | +| ----------- | ------------------------------------ | +| Description | the binding port of the REST service | +| Type | int32 | +| Default | 18080 | +| Effective | Restart required. | + +- enable_swagger + +| Name | enable_swagger | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to display rest service interface information through swagger. eg: http://ip:port/swagger.json | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_query_default_row_size_limit + +| Name | rest_query_default_row_size_limit | +| ----------- | ------------------------------------------------------------ | +| Description | the default row limit to a REST query response when the rowSize parameter is not given in request | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- cache_expire_in_seconds + +| Name | cache_expire_in_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The expiration time of the user login information cache (in seconds) | +| Type | int32 | +| Default | 28800 | +| Effective | Restart required. | + +- cache_max_num + +| Name | cache_max_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of users can be stored in the user login cache. | +| Type | int32 | +| Default | 100 | +| Effective | Restart required. | + +- cache_init_num + +| Name | cache_init_num | +| ----------- | ------------------------------------------------------------ | +| Description | The initial capacity of users can be stored in the user login cache. | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- client_auth + +| Name | client_auth | +| ----------- | --------------------------------- | +| Description | Is client authentication required | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- trust_store_path + +| Name | trust_store_path | +| ----------- | -------------------- | +| Description | SSL trust store path | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- trust_store_pwd + +| Name | trust_store_pwd | +| ----------- | ------------------------- | +| Description | SSL trust store password. | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- idle_timeout_in_seconds + +| Name | idle_timeout_in_seconds | +| ----------- | ------------------------ | +| Description | SSL timeout (in seconds) | +| Type | int32 | +| Default | 5000 | +| Effective | Restart required. | + +### 4.12 Load balancing configuration + +- series_slot_num + +| Name | series_slot_num | +| ----------- | ------------------------------------------- | +| Description | Number of SeriesPartitionSlots per Database | +| Type | int32 | +| Default | 10000 | +| Effective | Modify before the first startup. | + +- series_partition_executor_class + +| Name | series_partition_executor_class | +| ----------- | ------------------------------------------------------------ | +| Description | SeriesPartitionSlot executor class | +| Type | String | +| Default | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| Effective | Modify before the first startup. | + +- schema_region_group_extension_policy + +| Name | schema_region_group_extension_policy | +| ----------- | ------------------------------------------------------------ | +| Description | The policy of extension SchemaRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_schema_region_group_num_per_database + +| Name | default_schema_region_group_num_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set schema_region_group_extension_policy=CUSTOM, this parameter is the default number of SchemaRegionGroups for each Database.When set schema_region_group_extension_policy=AUTO, this parameter is the default minimal number of SchemaRegionGroups for each Database. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- schema_region_per_data_node + +| Name | schema_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set schema_region_group_extension_policy=AUTO.This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 1.0 | +| Effective | Restart required. | + +- data_region_group_extension_policy + +| Name | data_region_group_extension_policy | +| ----------- | ---------------------------------------------------------- | +| Description | The policy of extension DataRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_data_region_group_num_per_database + +| Name | default_data_region_group_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set data_region_group_extension_policy=CUSTOM, this parameter is the default number of DataRegionGroups for each Database.When set data_region_group_extension_policy=AUTO, this parameter is the default minimal number of DataRegionGroups for each Database. | +| Type | int | +| Default | 2 | +| Effective | Restart required. | + +- data_region_per_data_node + +| Name | data_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set data_region_group_extension_policy=AUTO.This parameter is the maximum number of DataRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 5.0 | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_ratis_consensus + +| Name | enable_auto_leader_balance_for_ratis_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for Ratis consensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_iot_consensus + +| Name | enable_auto_leader_balance_for_iot_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for IoTConsensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.13 Cluster management + +- time_partition_origin + +| Name | time_partition_origin | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition origin in milliseconds, default is equal to zero. | +| Type | Long | +| Unit | ms | +| Default | 0 | +| Effective | Modify before the first startup. | + +- time_partition_interval + +| Name | time_partition_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition interval in milliseconds, and partitioning data inside each data region, default is equal to one week | +| Type | Long | +| Unit | ms | +| Default | 604800000 | +| Effective | Modify before the first startup. | + +- heartbeat_interval_in_ms + +| Name | heartbeat_interval_in_ms | +| ----------- | -------------------------------------- | +| Description | The heartbeat interval in milliseconds | +| Type | Long | +| Unit | ms | +| Default | 1000 | +| Effective | Restart required. | + +- disk_space_warning_threshold + +| Name | disk_space_warning_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | Disk remaining threshold at which DataNode is set to ReadOnly status | +| Type | double(percentage) | +| Default | 0.05 | +| Effective | Restart required. | + +### 4.14 Memory Control Configuration + +- datanode_memory_proportion + +| Name | datanode_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory Allocation Ratio: StorageEngine, QueryEngine, SchemaEngine, Consensus, StreamingEngine and Free Memory. | +| Type | Ratio | +| Default | 3:3:1:1:1:1 | +| Effective | Restart required. | + +- schema_memory_proportion + +| Name | schema_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, and PartitionCache. | +| Type | Ratio | +| Default | 5:4:1 | +| Effective | Restart required. | + +- storage_engine_memory_proportion + +| Name | storage_engine_memory_proportion | +| ----------- | ----------------------------------------------------------- | +| Description | Memory allocation ratio in StorageEngine: Write, Compaction | +| Type | Ratio | +| Default | 8:2 | +| Effective | Restart required. | + +- write_memory_proportion + +| Name | write_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory allocation ratio in writing: Memtable, TimePartitionInfo | +| Type | Ratio | +| Default | 19:1 | +| Effective | Restart required. | + +- primitive_array_size + +| Name | primitive_array_size | +| ----------- | --------------------------------------------------------- | +| Description | primitive array size (length of each array) in array pool | +| Type | int32 | +| Default | 64 | +| Effective | Restart required. | + +- chunk_metadata_size_proportion + +| Name | chunk_metadata_size_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of compaction memory for chunk metadata maintains in memory when doing compaction | +| Type | Double | +| Default | 0.1 | +| Effective | Restart required. | + +- flush_proportion + +| Name | flush_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for invoking flush disk, 0.4 by defaultIf you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 | +| Type | Double | +| Default | 0.4 | +| Effective | Restart required. | + +- buffered_arrays_memory_proportion + +| Name | buffered_arrays_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory allocated for buffered arrays, 0.6 by default | +| Type | Double | +| Default | 0.6 | +| Effective | Restart required. | + +- reject_proportion + +| Name | reject_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for rejecting insertion, 0.8 by defaultIf you have extremely high write load (like batch=1000) and the physical memory size is large enough, it can be set higher than the default value like 0.9 | +| Type | Double | +| Default | 0.8 | +| Effective | Restart required. | + +- device_path_cache_proportion + +| Name | device_path_cache_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for the DevicePathCache. DevicePathCache is the deviceId cache, keeping only one copy of the same deviceId in memory | +| Type | Double | +| Default | 0.05 | +| Effective | Restart required. | + +- write_memory_variation_report_proportion + +| Name | write_memory_variation_report_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | If memory cost of data region increased more than proportion of allocated memory for writing, report to system. The default value is 0.001 | +| Type | Double | +| Default | 0.001 | +| Effective | Restart required. | + +- check_period_when_insert_blocked + +| Name | check_period_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When an insertion is rejected, the waiting period (in ms) to check system again, 50 by default.If the insertion has been rejected and the read load is low, it can be set larger. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- io_task_queue_size_for_flushing + +| Name | io_task_queue_size_for_flushing | +| ----------- | -------------------------------------------- | +| Description | size of ioTaskQueue. The default value is 10 | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- enable_query_memory_estimation + +| Name | enable_query_memory_estimation | +| ----------- | ------------------------------------------------------------ | +| Description | If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory | +| Type | bool | +| Default | true | +| Effective | Hot reload. | + +### 4.15 Schema Engine Configuration + +- schema_engine_mode + +| Name | schema_engine_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The schema management mode of schema engine. Currently, support Memory and PBTree.This config of all DataNodes in one cluster must keep same. | +| Type | string | +| Default | Memory | +| Effective | Modify before the first startup. | + +- partition_cache_size + +| Name | partition_cache_size | +| ----------- | ------------------------- | +| Description | cache size for partition. | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- sync_mlog_period_in_ms + +| Name | sync_mlog_period_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The cycle when metadata log is periodically forced to be written to disk(in milliseconds)If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshmentSetting this parameter to 0 may slow down the operation on slow disk. | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- tag_attribute_flush_interval + +| Name | tag_attribute_flush_interval | +| ----------- | ------------------------------------------------------------ | +| Description | interval num for tag and attribute records when force flushing to disk | +| Type | int32 | +| Default | 1000 | +| Effective | Modify before the first startup. | + +- tag_attribute_total_size + +| Name | tag_attribute_total_size | +| ----------- | ------------------------------------------------------------ | +| Description | max size for a storage block for tags and attributes of a one-time series | +| Type | int32 | +| Default | 700 | +| Effective | Modify before the first startup. | + +- max_measurement_num_of_internal_request + +| Name | max_measurement_num_of_internal_request | +| ----------- | ------------------------------------------------------------ | +| Description | max measurement num of internal requestWhen creating timeseries with Session.createMultiTimeseries, the user input plan, the timeseries num ofwhich exceeds this num, will be split to several plans with timeseries no more than this num. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- datanode_schema_cache_eviction_policy + +| Name | datanode_schema_cache_eviction_policy | +| ----------- | --------------------------------------- | +| Description | Policy of DataNodeSchemaCache eviction. | +| Type | String | +| Default | FIFO | +| Effective | Restart required. | + +- cluster_timeseries_limit_threshold + +| Name | cluster_timeseries_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of time series allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- cluster_device_limit_threshold + +| Name | cluster_device_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of devices allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- database_limit_threshold + +| Name | database_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of Cluster Databases allowed. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +### 4.16 Configurations for creating schema automatically + +- enable_auto_create_schema + +| Name | enable_auto_create_schema | +| ----------- | ------------------------------------------------ | +| Description | Whether creating schema automatically is enabled | +| Value | true or false | +| Default | true | +| Effective | Restart required. | + +- default_storage_group_level + +| Name | default_storage_group_level | +| ----------- | ------------------------------------------------------------ | +| Description | Database level when creating schema automatically is enabled e.g. root.sg0.d1.s2We will set root.sg0 as the database if database level is 1If the incoming path is shorter than this value, the creation/insertion will fail. | +| Value | int32 | +| Default | 1 | +| Effective | Restart required. | + +- boolean_string_infer_type + +| Name | boolean_string_infer_type | +| ----------- |------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving boolean string "true" or "false" | +| Value | BOOLEAN or TEXT | +| Default | BOOLEAN | +| Effective | Hot_reload | + +- integer_string_infer_type + +| Name | integer_string_infer_type | +| ----------- |------------------------------------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving an integer string and using float or double may lose precision | +| Value | INT32, INT64, FLOAT, DOUBLE, TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- floating_string_infer_type + +| Name | floating_string_infer_type | +| ----------- |----------------------------------------------------------------------------------| +| Description | register time series as which type when receiving a floating number string "6.7" | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- nan_string_infer_type + +| Name | nan_string_infer_type | +| ----------- |--------------------------------------------------------------------| +| Description | register time series as which type when receiving the Literal NaN. | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- default_boolean_encoding + +| Name | default_boolean_encoding | +| ----------- |----------------------------------------------------------------| +| Description | BOOLEAN encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE | +| Default | RLE | +| Effective | Hot_reload | + +- default_int32_encoding + +| Name | default_int32_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT32 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_int64_encoding + +| Name | default_int64_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT64 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_float_encoding + +| Name | default_float_encoding | +| ----------- |--------------------------------------------------------------| +| Description | FLOAT encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_double_encoding + +| Name | default_double_encoding | +| ----------- |---------------------------------------------------------------| +| Description | DOUBLE encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_text_encoding + +| Name | default_text_encoding | +| ----------- |-------------------------------------------------------------| +| Description | TEXT encoding when creating schema automatically is enabled | +| Value | PLAIN | +| Default | PLAIN | +| Effective | Hot_reload | + + +* boolean_compressor + +| Name | boolean_compressor | +|------------------|-----------------------------------------------------------------------------------------| +| Description | BOOLEAN compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int32_compressor + +| Name | int32_compressor | +|----------------------|--------------------------------------------------------------------------------------------| +| Description | INT32/DATE compression when creating schema automatically is enabled(Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int64_compressor + +| Name | int64_compressor | +|--------------------|-------------------------------------------------------------------------------------------------| +| Description | INT64/TIMESTAMP compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* float_compressor + +| Name | float_compressor | +|-----------------------|---------------------------------------------------------------------------------------| +| Description | FLOAT compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* double_compressor + +| Name | double_compressor | +|-------------------|----------------------------------------------------------------------------------------| +| Description | DOUBLE compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* text_compressor + +| Name | text_compressor | +|--------------------|--------------------------------------------------------------------------------------------------| +| Description | TEXT/BINARY/BLOB compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + + +### 4.17 Query Configurations + +- read_consistency_level + +| Name | read_consistency_level | +| ----------- | ------------------------------------------------------------ | +| Description | The read consistency levelThese consistency levels are currently supported:strong(Default, read from the leader replica)weak(Read from a random replica) | +| Type | String | +| Default | strong | +| Effective | Restart required. | + +- meta_data_cache_enable + +| Name | meta_data_cache_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to cache meta data (BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- chunk_timeseriesmeta_free_memory_proportion + +| Name | chunk_timeseriesmeta_free_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50 | +| Type | String | +| Default | 1 : 100 : 200 : 300 : 400 | +| Effective | Restart required. | + +- enable_last_cache + +| Name | enable_last_cache | +| ----------- | ---------------------------- | +| Description | Whether to enable LAST cache | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- mpp_data_exchange_core_pool_size + +| Name | mpp_data_exchange_core_pool_size | +| ----------- | -------------------------------------------- | +| Description | Core size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_max_pool_size + +| Name | mpp_data_exchange_max_pool_size | +| ----------- | ------------------------------------------- | +| Description | Max size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_keep_alive_time_in_ms + +| Name | mpp_data_exchange_keep_alive_time_in_ms | +| ----------- | --------------------------------------- | +| Description | Max waiting time for MPP data exchange | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- driver_task_execution_time_slice_in_ms + +| Name | driver_task_execution_time_slice_in_ms | +| ----------- | -------------------------------------- | +| Description | The max execution time of a DriverTask | +| Type | int32 | +| Default | 200 | +| Effective | Restart required. | + +- max_tsblock_size_in_bytes + +| Name | max_tsblock_size_in_bytes | +| ----------- | ----------------------------- | +| Description | The max capacity of a TsBlock | +| Type | int32 | +| Default | 131072 | +| Effective | Restart required. | + +- max_tsblock_line_numbers + +| Name | max_tsblock_line_numbers | +| ----------- | ------------------------------------------- | +| Description | The max number of lines in a single TsBlock | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- slow_query_threshold + +| Name | slow_query_threshold | +| ----------- | -------------------------------------- | +| Description | Time cost(ms) threshold for slow query | +| Type | long | +| Default | 10000 | +| Effective | Hot reload | + +- query_cost_stat_window + +| Name | query_cost_stat_window | +|-------------|--------------------| +| Description | Time window threshold(min) for record of history queries. | +| Type | Int32 | +| Default | 0 | +| Effective | Hot reload | + +- query_timeout_threshold + +| Name | query_timeout_threshold | +| ----------- | ----------------------------------------- | +| Description | The max executing time of query. unit: ms | +| Type | Int32 | +| Default | 60000 | +| Effective | Restart required. | + +- max_allowed_concurrent_queries + +| Name | max_allowed_concurrent_queries | +| ----------- | -------------------------------------------------- | +| Description | The maximum allowed concurrently executing queries | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- query_thread_count + +| Name | query_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently execute query statement. When <= 0, use CPU core number. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- degree_of_query_parallelism + +| Name | degree_of_query_parallelism | +| ----------- | ------------------------------------------------------------ | +| Description | How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- mode_map_size_threshold + +| Name | mode_map_size_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of count map size when calculating the MODE aggregation function | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- batch_size + +| Name | batch_size | +| ----------- | ------------------------------------------------------------ | +| Description | The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) | +| Type | Int32 | +| Default | 100000 | +| Effective | Restart required. | + +- sort_buffer_size_in_bytes + +| Name | sort_buffer_size_in_bytes | +| ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort. | +| Type | long | +| Default | 1048576(Before V2.0.6)
0(Supports from V2.0.6), if `sort_buffer_size_in_bytes <= 0`, default value will be used, `default value = min(32MB, memory for query operators / query_thread_count / 2)`, if `sort_buffer_size_in_bytes > 0`, the specified value will be used. | +| Effective | Hot_reload | + +- merge_threshold_of_explain_analyze + +| Name | merge_threshold_of_explain_analyze | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of operator count in the result set of EXPLAIN ANALYZE, if the number of operator in the result set is larger than this threshold, operator will be merged. | +| Type | int | +| Default | 10 | +| Effective | Hot reload | + +### 4.18 TTL Configuration + +- ttl_check_interval + +| Name | ttl_check_interval | +| ----------- | ------------------------------------------------------------ | +| Description | The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours. | +| Type | int | +| Default | 7200000 | +| Effective | Restart required. | + +- max_expired_time + +| Name | max_expired_time | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum expiring time of device which has a ttl. Default is 1 month.If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction. | +| Type | int | +| Default | 2592000000 | +| Effective | Restart required. | + +- expired_data_ratio + +| Name | expired_data_ratio | +| ----------- | ------------------------------------------------------------ | +| Description | The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction. | +| Type | float | +| Default | 0.3 | +| Effective | Restart required. | + +### 4.19 Storage Engine Configuration + +- timestamp_precision + +| Name | timestamp_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Use this value to set timestamp precision as "ms", "us" or "ns". | +| Type | String | +| Default | ms | +| Effective | Modify before the first startup. | + +- timestamp_precision_check_enabled + +| Name | timestamp_precision_check_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted. | +| Type | Boolean | +| Default | true | +| Effective | Modify before the first startup. | + +- max_waiting_time_when_insert_blocked + +| Name | max_waiting_time_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- handle_system_error + +| Name | handle_system_error | +| ----------- | -------------------------------------------------------- | +| Description | What will the system do when unrecoverable error occurs. | +| Type | String | +| Default | CHANGE_TO_READ_ONLY | +| Effective | Restart required. | + +- enable_timed_flush_seq_memtable + +| Name | enable_timed_flush_seq_memtable | +| ----------- | --------------------------------------------------- | +| Description | Whether to timed flush sequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- seq_memtable_flush_interval_in_ms + +| Name | seq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- seq_memtable_flush_check_interval_in_ms + +| Name | seq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether sequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- enable_timed_flush_unseq_memtable + +| Name | enable_timed_flush_unseq_memtable | +| ----------- | ----------------------------------------------------- | +| Description | Whether to timed flush unsequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- unseq_memtable_flush_interval_in_ms + +| Name | unseq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- unseq_memtable_flush_check_interval_in_ms + +| Name | unseq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether unsequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- tvlist_sort_algorithm + +| Name | tvlist_sort_algorithm | +| ----------- | ------------------------------------------------- | +| Description | The sort algorithms used in the memtable's TVList | +| Type | String | +| Default | TIM | +| Effective | Restart required. | + +- avg_series_point_number_threshold + +| Name | avg_series_point_number_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. | +| Type | int32 | +| Default | 100000 | +| Effective | Restart required. | + +- flush_thread_count + +| Name | flush_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently flush. When <= 0, use CPU core number. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- enable_partial_insert + +| Name | enable_partial_insert | +| ----------- | ------------------------------------------------------------ | +| Description | In one insert (one device, one timestamp, multiple measurements), if enable partial insert, one measurement failure will not impact other measurements | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- recovery_log_interval_in_ms + +| Name | recovery_log_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | the interval to log recover progress of each vsg when starting iotdb | +| Type | Int32 | +| Default | 5000 | +| Effective | Restart required. | + +- 0.13_data_insert_adapt + +| Name | 0.13_data_insert_adapt | +| ----------- | ------------------------------------------------------------ | +| Description | If using a v0.13 client to insert data, please set this configuration to true. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_tsfile_validation + +| Name | enable_tsfile_validation | +| ----------- | ------------------------------------------------------------ | +| Description | Verify that TSfiles generated by Flush, Load, and Compaction are correct. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | + +- tier_ttl_in_ms + +| Name | tier_ttl_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Default tier TTL. When the survival time of the data exceeds the threshold, it will be migrated to the next tier. | +| Type | long | +| Default | -1 | +| Effective | Restart required. | + +- max_object_file_size_in_byte + +| Name | max_object_file_size_in_byte | +|-------------|-----------------------------------------------------------------------| +| Description | Maximum size limit for a single object file (supported since V2.0.8). | +| Type | long | +| Default | 4294967296 (4 GB in bytes) | +| Effective | Hot reload | + +- restrict_object_limit + +| Name | restrict_object_limit | +|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | No special restrictions on table names, column names, or device identifiers for `OBJECT` type (supported since V2.0.8). When set to `true` and the table contains `OBJECT` columns, the following restrictions apply:
1. Naming Rules: Values in TAG columns, table names, and field names must not use `.` or `..`; Prohibited characters include `./` or `.\`, otherwise metadata creation will fail; Names containing filesystem-unsupported characters will cause write errors.
2. Case Sensitivity: If the underlying filesystem is case-insensitive, device identifiers like `'d1'` and `'D1'` are treated as identical; Creating similar identifiers may overwrite `OBJECT` data files, leading to data corruption.
3. Storage Path: Actual storage path format: `${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin` | +| Type | boolean | +| Default | false | +| Effective | Can only be modified before the first service startup. | + +### 4.20 Compaction Configurations + +- enable_seq_space_compaction + +| Name | enable_seq_space_compaction | +| ----------- | ---------------------------------------------------------- | +| Description | sequence space compaction: only compact the sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_unseq_space_compaction + +| Name | enable_unseq_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | unsequence space compaction: only compact the unsequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_cross_space_compaction + +| Name | enable_cross_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | cross space compaction: compact the unsequence files into the overlapped sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_auto_repair_compaction + +| Name | enable_auto_repair_compaction | +| ----------- | ---------------------------------------------- | +| Description | enable auto repair unsorted file by compaction | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- cross_selector + +| Name | cross_selector | +| ----------- | ------------------------------------------- | +| Description | the selector of cross space compaction task | +| Type | String | +| Default | rewrite | +| Effective | Restart required. | + +- cross_performer + +| Name | cross_performer | +| ----------- |-----------------------------------------------------------| +| Description | the compaction performer of cross space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload . | + +- inner_seq_selector + +| Name | inner_seq_selector | +| ----------- |--------------------------------------------------------| +| Description | the selector of inner sequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_seq_performer + +| Name | inner_seq_performer | +| ----------- |---------------------------------------------------------| +| Description | the performer of inner sequence space compaction task, Options: read_chunk, fast | +| Type | String | +| Default | read_chunk | +| Effective | Hot reload | + +- inner_unseq_selector + +| Name | inner_unseq_selector | +| ----------- |----------------------------------------------------------| +| Description | the selector of inner unsequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_unseq_performer + +| Name | inner_unseq_performer | +| ----------- |-----------------------------------------------------------| +| Description | the performer of inner unsequence space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload | + +- compaction_priority + +| Name | compaction_priority | +| ----------- | ------------------------------------------------------------ | +| Description | The priority of compaction executionINNER_CROSS: prioritize inner space compaction, reduce the number of files firstCROSS_INNER: prioritize cross space compaction, eliminate the unsequence files firstBALANCE: alternate two compaction types | +| Type | String | +| Default | INNER_CROSS | +| Effective | Restart required. | + +- candidate_compaction_task_queue_size + +| Name | candidate_compaction_task_queue_size | +| ----------- | -------------------------------------------- | +| Description | The size of candidate compaction task queue. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- target_compaction_file_size + +| Name | target_compaction_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | This parameter is used in two places:The target tsfile size of inner space compaction.The candidate size of seq tsfile in cross space compaction will be smaller than target_compaction_file_size * 1.5.In most cases, the target file size of cross compaction won't exceed this threshold, and if it does, it will not be much larger than it. | +| Type | Int64 | +| Default | 2147483648 | +| Effective | Hot reload | + +- inner_compaction_total_file_size_threshold + +| Name | inner_compaction_total_file_size_threshold | +| ----------- | ---------------------------------------------------- | +| Description | The total file size limit in inner space compaction. | +| Type | int64 | +| Default | 10737418240 | +| Effective | Hot reload | + +- inner_compaction_total_file_num_threshold + +| Name | inner_compaction_total_file_num_threshold | +| ----------- | --------------------------------------------------- | +| Description | The total file num limit in inner space compaction. | +| Type | int32 | +| Default | 100 | +| Effective | Hot reload | + +- max_level_gap_in_inner_compaction + +| Name | max_level_gap_in_inner_compaction | +| ----------- | ----------------------------------------------- | +| Description | The max level gap in inner compaction selection | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- target_chunk_size + +| Name | target_chunk_size | +| ----------- | ------------------------------------------------------------ | +| Description | The target chunk size in flushing and compaction. If the size of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks.| +| Type | Int64 | +| Default | 1600000 | +| Effective | Restart required. | + +- target_chunk_point_num + +| Name | target_chunk_point_num | +| ----------- |-----------------------------------------------------------------| +| Description | The target point nums in one chunk in flushing and compaction. If the point number of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks. | +| Type | Int64 | +| Default | 100000 | +| Effective | Restart required. | + +- chunk_size_lower_bound_in_compaction + +| Name | chunk_size_lower_bound_in_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | If the chunk size is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 128 | +| Effective | Restart required. | + +- chunk_point_num_lower_bound_in_compaction + +| Name | chunk_point_num_lower_bound_in_compaction | +| ----------- |------------------------------------------------------------------------------------------| +| Description | If the chunk point num is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- inner_compaction_candidate_file_num + +| Name | inner_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The file num requirement when selecting inner space compaction candidate files | +| Type | int32 | +| Default | 30 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_num + +| Name | max_cross_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The max file when selecting cross space compaction candidate files | +| Type | int32 | +| Default | 500 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_size + +| Name | max_cross_compaction_candidate_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | The max total size when selecting cross space compaction candidate files | +| Type | Int64 | +| Default | 5368709120 | +| Effective | Hot reload | + +- min_cross_compaction_unseq_file_level + +| Name | min_cross_compaction_unseq_file_level | +| ----------- | ------------------------------------------------------------ | +| Description | The min inner compaction level of unsequence file which can be selected as candidate | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- compaction_thread_count + +| Name | compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads will be set up to perform compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_max_aligned_series_num_in_one_batch + +| Name | compaction_max_aligned_series_num_in_one_batch | +| ----------- | ------------------------------------------------------------ | +| Description | How many chunk will be compacted in aligned series compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_schedule_interval_in_ms + +| Name | compaction_schedule_interval_in_ms | +| ----------- | ---------------------------------------- | +| Description | The interval of compaction task schedule | +| Type | Int64 | +| Default | 60000 | +| Effective | Restart required. | + +- compaction_write_throughput_mb_per_sec + +| Name | compaction_write_throughput_mb_per_sec | +| ----------- | -------------------------------------------------------- | +| Description | The limit of write throughput merge can reach per second | +| Type | int32 | +| Default | 16 | +| Effective | Restart required. | + +- compaction_read_throughput_mb_per_sec + +| Name | compaction_read_throughput_mb_per_sec | +| ----------- | ------------------------------------------------------- | +| Description | The limit of read throughput merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- compaction_read_operation_per_sec + +| Name | compaction_read_operation_per_sec | +| ----------- | ------------------------------------------------------ | +| Description | The limit of read operation merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- sub_compaction_thread_count + +| Name | sub_compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of sub compaction threads to be set up to perform compaction. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +- inner_compaction_task_selection_disk_redundancy + +| Name | inner_compaction_task_selection_disk_redundancy | +| ----------- | ------------------------------------------------------------ | +| Description | Redundancy value of disk availability, only use for inner compaction. | +| Type | double | +| Default | 0.05 | +| Effective | Hot reload | + +- inner_compaction_task_selection_mods_file_threshold + +| Name | inner_compaction_task_selection_mods_file_threshold | +| ----------- | -------------------------------------------------------- | +| Description | Mods file size threshold, only use for inner compaction. | +| Type | long | +| Default | 131072 | +| Effective | Hot reload | + +- compaction_schedule_thread_num + +| Name | compaction_schedule_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads to be set up to select compaction task. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +### 4.21 Write Ahead Log Configuration + +- wal_mode + +| Name | wal_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The details of these three modes are as follows:DISABLE: the system will disable wal.SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully. | +| Type | String | +| Default | ASYNC | +| Effective | Restart required. | + +- max_wal_nodes_num + +| Name | max_wal_nodes_num | +| ----------- | ------------------------------------------------------------ | +| Description | each node corresponds to one wal directory The default value 0 means the number is determined by the system, the number is in the range of [data region num / 2, data region num]. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- wal_async_mode_fsync_delay_in_ms + +| Name | wal_async_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the async mode | +| Type | int32 | +| Default | 1000 | +| Effective | Hot reload | + +- wal_sync_mode_fsync_delay_in_ms + +| Name | wal_sync_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the sync mode | +| Type | int32 | +| Default | 3 | +| Effective | Hot reload | + +- wal_buffer_size_in_byte + +| Name | wal_buffer_size_in_byte | +| ----------- | ---------------------------- | +| Description | Buffer size of each wal node | +| Type | int32 | +| Default | 33554432 | +| Effective | Restart required. | + +- wal_buffer_queue_capacity + +| Name | wal_buffer_queue_capacity | +| ----------- | --------------------------------- | +| Description | Buffer capacity of each wal queue | +| Type | int32 | +| Default | 500 | +| Effective | Restart required. | + +- wal_file_size_threshold_in_byte + +| Name | wal_file_size_threshold_in_byte | +| ----------- | ------------------------------- | +| Description | Size threshold of each wal file | +| Type | int32 | +| Default | 31457280 | +| Effective | Hot reload | + +- wal_min_effective_info_ratio + +| Name | wal_min_effective_info_ratio | +| ----------- | --------------------------------------------------- | +| Description | Minimum ratio of effective information in wal files | +| Type | double | +| Default | 0.1 | +| Effective | Hot reload | + +- wal_memtable_snapshot_threshold_in_byte + +| Name | wal_memtable_snapshot_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | MemTable size threshold for triggering MemTable snapshot in wal | +| Type | int64 | +| Default | 8388608 | +| Effective | Hot reload | + +- max_wal_memtable_snapshot_num + +| Name | max_wal_memtable_snapshot_num | +| ----------- | ------------------------------------- | +| Description | MemTable's max snapshot number in wal | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- delete_wal_files_period_in_ms + +| Name | delete_wal_files_period_in_ms | +| ----------- | ----------------------------------------------------------- | +| Description | The period when outdated wal files are periodically deleted | +| Type | int64 | +| Default | 20000 | +| Effective | Hot reload | + +- wal_throttle_threshold_in_byte + +| Name | wal_throttle_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum size of wal files when throttle down in IoTConsensus | +| Type | long | +| Default | 53687091200 | +| Effective | Hot reload | + +- iot_consensus_cache_window_time_in_ms + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------------------ | +| Description | Maximum wait time of write cache in IoTConsensus | +| Type | long | +| Default | -1 | +| Effective | Hot reload | + +- enable_wal_compression + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------- | +| Description | Enable Write Ahead Log compression. | +| Type | boolean | +| Default | true | +| Effective | Hot reload | + +### 4.22 **IoTConsensus Configuration** + +- data_region_iot_max_log_entries_num_per_batch + +| Name | data_region_iot_max_log_entries_num_per_batch | +| ----------- | ------------------------------------------------- | +| Description | The maximum log entries num in IoTConsensus Batch | +| Type | int32 | +| Default | 1024 | +| Effective | Restart required. | + +- data_region_iot_max_size_per_batch + +| Name | data_region_iot_max_size_per_batch | +| ----------- | -------------------------------------- | +| Description | The maximum size in IoTConsensus Batch | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_iot_max_pending_batches_num + +| Name | data_region_iot_max_pending_batches_num | +| ----------- | ----------------------------------------------- | +| Description | The maximum pending batches num in IoTConsensus | +| Type | int32 | +| Default | 5 | +| Effective | Restart required. | + +- data_region_iot_max_memory_ratio_for_queue + +| Name | data_region_iot_max_memory_ratio_for_queue | +| ----------- | -------------------------------------------------- | +| Description | The maximum memory ratio for queue in IoTConsensus | +| Type | double | +| Default | 0.6 | +| Effective | Restart required. | + +- region_migration_speed_limit_bytes_per_second + +| Name | region_migration_speed_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum transit size in byte per second for region migration | +| Type | long | +| Default | 33554432 | +| Effective | Restart required. | + +### 4.23 TsFile Configurations + +- group_size_in_byte + +| Name | group_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of bytes written to disk each time the data in memory is written to disk | +| Type | int32 | +| Default | 134217728 | +| Effective | Hot reload | + +- page_size_in_byte + +| Name | page_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The memory size for each series writer to pack page, default value is 64KB | +| Type | int32 | +| Default | 65536 | +| Effective | Hot reload | + +- max_number_of_points_in_page + +| Name | max_number_of_points_in_page | +| ----------- | ------------------------------------------- | +| Description | The maximum number of data points in a page | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- pattern_matching_threshold + +| Name | pattern_matching_threshold | +| ----------- | ------------------------------------------- | +| Description | The threshold for pattern matching in regex | +| Type | int32 | +| Default | 1000000 | +| Effective | Hot reload | + +- float_precision + +| Name | float_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Floating-point precision of query results.Only effective for RLE and TS_2DIFF encodings.Due to the limitation of machine precision, some values may not be interpreted strictly. | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- value_encoder + +| Name | value_encoder | +| ----------- | ------------------------------------------------------------ | +| Description | Encoder of value series. default value is PLAIN. | +| Type | For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG. | +| Default | PLAIN | +| Effective | Hot reload | + +- compressor + +| Name | compressor | +| ----------- | ------------------------------------------------------------ | +| Description | Compression configuration And it is also used as the default compressor of time column in aligned timeseries. | +| Type | Data compression method, supports UNCOMPRESSED, SNAPPY, ZSTD, LZMA2 or LZ4. Default value is LZ4 | +| Default | LZ4 | +| Effective | Hot reload | + +- encrypt_flag + +| Name | encrypt_flag | +| ----------- | ---------------------- | +| Description | Enable data encryption | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- encrypt_type + +| Name | encrypt_type | +| ----------- |---------------------------------------| +| Description | The method of data encrytion | +| Type | String | +| Default | org.apache.tsfile.encrypt.UNENCRYPTED | +| Effective | Restart required. | + +- encrypt_key_path + +| Name | encrypt_key_path | +| ----------- | ----------------------------------- | +| Description | The path of key for data encryption | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.24 Authorization Configuration + +- authorizer_provider_class + +| Name | authorizer_provider_class | +| ----------- | ------------------------------------------------------------ | +| Description | which class to serve for authorization. | +| Type | String | +| Default | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| Effective | Restart required. | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| Name | openID_url | +| ----------- | ------------------------------------------------------------ | +| Description | The url of openID server If OpenIdAuthorizer is enabled, then openID_url must be set. | +| Type | String(a http link) | +| Default | None | +| Effective | Restart required. | + +- iotdb_server_encrypt_decrypt_provider + +| Name | iotdb_server_encrypt_decrypt_provider | +| ----------- | ------------------------------------------------------------ | +| Description | encryption provider class | +| Type | String | +| Default | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| Effective | Modify before the first startup. | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| Name | iotdb_server_encrypt_decrypt_provider_parameter | +| ----------- | ----------------------------------------------- | +| Description | encryption provided class parameter | +| Type | String | +| Default | None | +| Effective | Modify before the first startup. | + +- author_cache_size + +| Name | author_cache_size | +| ----------- | --------------------------- | +| Description | Cache size of user and role | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- author_cache_expire_time + +| Name | author_cache_expire_time | +| ----------- | ---------------------------------- | +| Description | Cache expire time of user and role | +| Type | int32 | +| Default | 30 | +| Effective | Restart required. | + +### 4.25 UDF Configuration + +- udf_initial_byte_array_length_for_memory_control + +| Name | udf_initial_byte_array_length_for_memory_control | +| ----------- | ------------------------------------------------------------ | +| Description | Used to estimate the memory usage of text fields in a UDF query.It is recommended to set this value to be slightly larger than the average length of all text records. | +| Type | int32 | +| Default | 48 | +| Effective | Restart required. | + +- udf_memory_budget_in_mb + +| Name | udf_memory_budget_in_mb | +| ----------- | ------------------------------------------------------------ | +| Description | How much memory may be used in ONE UDF query (in MB). The upper limit is 20% of allocated memory for read. | +| Type | Float | +| Default | 30.0 | +| Effective | Restart required. | + +- udf_reader_transformer_collector_memory_proportion + +| Name | udf_reader_transformer_collector_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | UDF memory allocation ratio.The parameter form is a:b:c, where a, b, and c are integers. | +| Type | String | +| Default | 1:1:1 | +| Effective | Restart required. | + +- udf_lib_dir + +| Name | udf_lib_dir | +| ----------- | ---------------------------- | +| Description | the udf lib directory | +| Type | String | +| Default | ext/udf(Windows:ext\\udf) | +| Effective | Restart required. | + +### 4.26 Trigger Configuration + +- trigger_lib_dir + +| Name | trigger_lib_dir | +| ----------- | ------------------------- | +| Description | the trigger lib directory | +| Type | String | +| Default | ext/trigger | +| Effective | Restart required. | + +- stateful_trigger_retry_num_when_not_found + +| Name | stateful_trigger_retry_num_when_not_found | +| ----------- | ------------------------------------------------------------ | +| Description | How many times will we retry to found an instance of stateful trigger on DataNodes | +| Type | Int32 | +| Default | 3 | +| Effective | Restart required. | + +### 4.27 **Select-Into Configuration** + +- into_operation_buffer_size_in_byte + +| Name | into_operation_buffer_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum memory occupied by the data to be written when executing select-into statements. | +| Type | long | +| Default | 104857600 | +| Effective | Hot reload | + +- select_into_insert_tablet_plan_row_limit + +| Name | select_into_insert_tablet_plan_row_limit | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements. | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- into_operation_execution_thread_count + +| Name | into_operation_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the thread pool that execute insert-tablet tasks | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +### 4.28 Continuous Query Configuration + +- continuous_query_submit_thread_count + +| Name | continuous_query_execution_thread | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +- continuous_query_min_every_interval_in_ms + +| Name | continuous_query_min_every_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum value of the continuous query execution time interval | +| Type | long (duration) | +| Default | 1000 | +| Effective | Restart required. | + +### 4.29 Pipe Configuration + +- pipe_lib_dir + +| Name | pipe_lib_dir | +| ----------- | ----------------------- | +| Description | the pipe lib directory. | +| Type | string | +| Default | ext/pipe | +| Effective | Not support modify | + +- pipe_subtask_executor_max_thread_num + +| Name | pipe_subtask_executor_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- pipe_sink_timeout_ms + +| Name | pipe_sink_timeout_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The connection timeout (in milliseconds) for the thrift client. | +| Type | int | +| Default | 900000 | +| Effective | Restart required. | + +- pipe_sink_selector_number + +| Name | pipe_sink_selector_number | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of selectors that can be used in the sink.Recommend to set this value to less than or equal to pipe_sink_max_client_number. | +| Type | int | +| Default | 4 | +| Effective | Restart required. | + +- pipe_sink_max_client_number + +| Name | pipe_sink_max_client_number | +| ----------- | ----------------------------------------------------------- | +| Description | The maximum number of clients that can be used in the sink. | +| Type | int | +| Default | 16 | +| Effective | Restart required. | + +- pipe_air_gap_receiver_enabled + +| Name | pipe_air_gap_receiver_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable receiving pipe data through air gap.The receiver can only return 0 or 1 in TCP mode to indicate whether the data is received successfully. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- pipe_air_gap_receiver_port + +| Name | pipe_air_gap_receiver_port | +| ----------- | ------------------------------------------------------------ | +| Description | The port for the server to receive pipe data through air gap. | +| Type | int | +| Default | 9780 | +| Effective | Restart required. | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| Name | pipe_all_sinks_rate_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The total bytes that all pipe sinks can transfer per second.When given a value less than or equal to 0, it means no limit. default value is -1, which means no limit. | +| Type | double | +| Default | -1 | +| Effective | Hot reload | + +### 4.30 RatisConsensus Configuration + +- config_node_ratis_log_appender_buffer_size_max + +| Name | config_node_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of ConfigNode (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- schema_region_ratis_log_appender_buffer_size_max + +| Name | schema_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of SchemaRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_ratis_log_appender_buffer_size_max + +| Name | data_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of DataRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- config_node_ratis_snapshot_trigger_threshold + +| Name | config_node_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of Confignode | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- schema_region_ratis_snapshot_trigger_threshold + +| Name | schema_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of SchemaRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- data_region_ratis_snapshot_trigger_threshold + +| Name | data_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of DataRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- config_node_ratis_log_unsafe_flush_enable + +| Name | config_node_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is confignode allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- schema_region_ratis_log_unsafe_flush_enable + +| Name | schema_region_ratis_log_unsafe_flush_enable | +| ----------- | -------------------------------------------------------- | +| Description | Is schemaregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- data_region_ratis_log_unsafe_flush_enable + +| Name | data_region_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is dataregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- config_node_ratis_log_segment_size_max_in_byte + +| Name | config_node_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of confignode (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- schema_region_ratis_log_segment_size_max_in_byte + +| Name | schema_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of schemaregion (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- data_region_ratis_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of dataregion(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a simple log segment file of confignode(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_ratis_grpc_flow_control_window + +| Name | config_node_ratis_grpc_flow_control_window | +| ----------- | ---------------------------------------------------------- | +| Description | confignode flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_flow_control_window + +| Name | schema_region_ratis_grpc_flow_control_window | +| ----------- | ------------------------------------------------------------ | +| Description | schema region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- data_region_ratis_grpc_flow_control_window + +| Name | data_region_ratis_grpc_flow_control_window | +| ----------- | ----------------------------------------------------------- | +| Description | data region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| Name | config_node_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | config node grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| Name | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ------------------------------------------------------- | +| Description | schema region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| Name | data_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | data region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_log_force_sync_num + +| Name | config_node_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | config node fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_log_force_sync_num + +| Name | schema_region_ratis_log_force_sync_num | +| ----------- | -------------------------------------- | +| Description | schema region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_log_force_sync_num + +| Name | data_region_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | data region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- config_node_ratis_request_timeout_ms + +| Name | config_node_ratis_request_timeout_ms | +| ----------- | --------------------------------------- | +| Description | confignode ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_request_timeout_ms + +| Name | schema_region_ratis_request_timeout_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- data_region_ratis_request_timeout_ms + +| Name | data_region_ratis_request_timeout_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- config_node_ratis_max_retry_attempts + +| Name | config_node_ratis_max_retry_attempts | +| ----------- | ------------------------------------ | +| Description | confignode ratis client retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- config_node_ratis_initial_sleep_time_ms + +| Name | config_node_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | confignode ratis client initial sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- config_node_ratis_max_sleep_time_ms + +| Name | config_node_ratis_max_sleep_time_ms | +| ----------- | -------------------------------------------- | +| Description | confignode ratis client max retry sleep time | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_max_retry_attempts + +| Name | schema_region_ratis_max_retry_attempts | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client max retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- schema_region_ratis_initial_sleep_time_ms + +| Name | schema_region_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- schema_region_ratis_max_sleep_time_ms + +| Name | schema_region_ratis_max_sleep_time_ms | +| ----------- | ----------------------------------------- | +| Description | schema region ratis client max sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_max_retry_attempts + +| Name | data_region_ratis_max_retry_attempts | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- data_region_ratis_initial_sleep_time_ms + +| Name | data_region_ratis_initial_sleep_time_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- data_region_ratis_max_sleep_time_ms + +| Name | data_region_ratis_max_sleep_time_ms | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- ratis_first_election_timeout_min_ms + +| Name | ratis_first_election_timeout_min_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election min timeout | +| Type | int64 | +| Default | 50 (ms) | +| Effective | Restart required. | + +- ratis_first_election_timeout_max_ms + +| Name | ratis_first_election_timeout_max_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election max timeout | +| Type | int64 | +| Default | 150 (ms) | +| Effective | Restart required. | + +- config_node_ratis_preserve_logs_num_when_purge + +| Name | config_node_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | confignode snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- schema_region_ratis_preserve_logs_num_when_purge + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | schema region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_preserve_logs_num_when_purge + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | data region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- config_node_ratis_log_max_size + +| Name | config_node_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | config node Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- schema_region_ratis_log_max_size + +| Name | schema_region_ratis_log_max_size | +| ----------- | ---------------------------------------- | +| Description | schema region Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- data_region_ratis_log_max_size + +| Name | data_region_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | data region Raft Log disk size control | +| Type | int64 | +| Default | 21474836480 (20GB) | +| Effective | Restart required. | + +- config_node_ratis_periodic_snapshot_interval + +| Name | config_node_ratis_periodic_snapshot_interval | +| ----------- | -------------------------------------------- | +| Description | config node Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- schema_region_ratis_periodic_snapshot_interval + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------ | +| Description | schema region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- data_region_ratis_periodic_snapshot_interval + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ---------------------------------------------- | +| Description | data region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +### 4.31 IoTConsensusV2 Configuration + +- iot_consensus_v2_pipeline_size + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------------------------------------ | +| Description | Default event buffer size for connector and receiver in iot consensus v2 | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- iot_consensus_v2_mode + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------ | +| Description | IoTConsensusV2 mode. | +| Type | String | +| Default | batch | +| Effective | Restart required. | + +### 4.32 Procedure Configuration + +- procedure_core_worker_thread_count + +| Name | procedure_core_worker_thread_count | +| ----------- | ------------------------------------- | +| Description | Default number of worker thread count | +| Type | int32 | +| Default | 4 | +| Effective | Restart required. | + +- procedure_completed_clean_interval + +| Name | procedure_completed_clean_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Default time interval of completed procedure cleaner work in, time unit is second | +| Type | int32 | +| Default | 30(s) | +| Effective | Restart required. | + +- procedure_completed_evict_ttl + +| Name | procedure_completed_evict_ttl | +| ----------- | ------------------------------------------------------- | +| Description | Default ttl of completed procedure, time unit is second | +| Type | int32 | +| Default | 60(s) | +| Effective | Restart required. | + +### 4.33 MQTT Broker Configuration + +- enable_mqtt_service + +| Name | enable_mqtt_service。 | +| ----------- | ----------------------------------- | +| Description | whether to enable the mqtt service. | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +- mqtt_host + +| Name | mqtt_host | +| ----------- | ------------------------------ | +| Description | the mqtt service binding host. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Hot reload | + +- mqtt_port + +| Name | mqtt_port | +| ----------- | ------------------------------ | +| Description | the mqtt service binding port. | +| Type | int32 | +| Default | 1883 | +| Effective | Hot reload | + +- mqtt_handler_pool_size + +| Name | mqtt_handler_pool_size | +| ----------- | ---------------------------------------------------- | +| Description | the handler pool size for handing the mqtt messages. | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- mqtt_payload_formatter + +| Name | mqtt_payload_formatter | +| ----------- | ----------------------------------- | +| Description | the mqtt message payload formatter. | +| Type | String | +| Default | json | +| Effective | Hot reload | + +- mqtt_max_message_size + +| Name | mqtt_max_message_size | +| ----------- | ---------------------------------- | +| Description | max length of mqtt message in byte | +| Type | int32 | +| Default | 1048576 | +| Effective | Hot reload | + +### 4.34 Audit log Configuration + +- enable_audit_log + +| Name | enable_audit_log | +| ----------- | -------------------------------- | +| Description | whether to enable the audit log. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- audit_log_storage + +| Name | audit_log_storage | +| ----------- | ----------------------------- | +| Description | Output location of audit logs | +| Type | String | +| Default | IOTDB,LOGGER | +| Effective | Restart required. | + +- audit_log_operation + +| Name | audit_log_operation | +| ----------- | ------------------------------------------------------------ | +| Description | whether enable audit log for DML operation of datawhether enable audit log for DDL operation of schemawhether enable audit log for QUERY operation of data and schema | +| Type | String | +| Default | DML,DDL,QUERY | +| Effective | Restart required. | + +- enable_audit_log_for_native_insert_api + +| Name | enable_audit_log_for_native_insert_api | +| ----------- | ---------------------------------------------- | +| Description | whether the local write api records audit logs | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.35 White List Configuration + +- enable_white_list + +| Name | enable_white_list | +| ----------- | ------------------------- | +| Description | whether enable white list | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +### 4.36 IoTDB-AI Configuration + +- model_inference_execution_thread_count + +| Name | model_inference_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The thread count which can be used for model inference operation. | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +### 4.37 Load TsFile Configuration + +- load_clean_up_task_execution_delay_time_seconds + +| Name | load_clean_up_task_execution_delay_time_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | Load clean up task is used to clean up the unsuccessful loaded tsfile after a certain period of time. | +| Type | int | +| Default | 1800 | +| Effective | Hot reload | + +- load_write_throughput_bytes_per_second + +| Name | load_write_throughput_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum bytes per second of disk write throughput when loading tsfile. | +| Type | int | +| Default | -1 | +| Effective | Hot reload | + +- load_active_listening_enable + +| Name | load_active_listening_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable the active listening mode for tsfile loading. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- load_active_listening_dirs + +| Name | load_active_listening_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | The directory to be actively listened for tsfile loading.Multiple directories should be separated by a ','. | +| Type | String | +| Default | ext/load/pending | +| Effective | Hot reload | + +- load_active_listening_fail_dir + +| Name | load_active_listening_fail_dir | +| ----------- | ------------------------------------------------------------ | +| Description | The directory where tsfiles are moved if the active listening mode fails to load them. | +| Type | String | +| Default | ext/load/failed | +| Effective | Hot reload | + +- load_active_listening_max_thread_num + +| Name | load_active_listening_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to load tsfile actively.The default value, when this parameter is commented out or <= 0, use CPU core number. | +| Type | Long | +| Default | 0 | +| Effective | Restart required. | + +- load_active_listening_check_interval_seconds + +| Name | load_active_listening_check_interval_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The interval specified in seconds for the active listening mode to check the directory specified in `load_active_listening_dirs`.The active listening mode will check the directory every `load_active_listening_check_interval_seconds seconds`. | +| Type | Long | +| Default | 5 | +| Effective | Restart required. | + +* last_cache_operation_on_load + +|Name| last_cache_operation_on_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| The operation performed to LastCache when a TsFile is successfully loaded. `UPDATE`: use the data in the TsFile to update LastCache; `UPDATE_NO_BLOB`: similar to UPDATE, but will invalidate LastCache for blob series; `CLEAN_DEVICE`: invalidate LastCache of devices contained in the TsFile; `CLEAN_ALL`: clean the whole LastCache. | +|Type| String | +|Default| UPDATE_NO_BLOB | +|Effective| Effective after restart | + +* cache_last_values_for_load + +|Name| cache_last_values_for_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| Whether to cache last values before loading a TsFile. Only effective when `last_cache_operation_on_load=UPDATE_NO_BLOB` or `last_cache_operation_on_load=UPDATE`. When set to true, blob series will be ignored even with `last_cache_operation_on_load=UPDATE`. Enabling this will increase the memory footprint during loading TsFiles. | +|Type| Boolean | +|Default| true | +|Effective| Effective after restart | + +* cache_last_values_memory_budget_in_byte + +|Name| cache_last_values_memory_budget_in_byte | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| When `cache_last_values_for_load=true`, the maximum memory that can be used to cache last values. If this value is exceeded, the cached values will be abandoned and last values will be read from the TsFile in a streaming manner. | +|Type| int32 | +|Default| 4194304 | +|Effective| Effective after restart | + + +### 4.38 Dispatch Retry Configuration + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum retrying time for write request remotely dispatching, time unit is milliseconds. | +| Type | Long | +| Default | 60000 | +| Effective | Hot reload | + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------ | +| Description | Whether retrying for unknown errors. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | \ No newline at end of file diff --git a/src/UserGuide/Master/Table/SQL-Manual/Basis-Function.md b/src/UserGuide/Master/Table/SQL-Manual/Basis-Function.md index 4b4cdf97d..46a8529cd 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/Basis-Function.md +++ b/src/UserGuide/Master/Table/SQL-Manual/Basis-Function.md @@ -1,3 +1,6 @@ +--- +redirectTo: Basis-Function_apache.html +--- - -# Basic Functions - -## 1. Comparison Functions and Operators - -### 1.1 Basic Comparison Operators - -Comparison operators are used to compare two values and return the comparison result (`true` or `false`). - -| Operators | Description | -| :-------- | :----------------------- | -| < | Less than | -| > | Greater than | -| <= | Less than or equal to | -| >= | Greater than or equal to | -| = | Equal to | -| <> | Not equal to | -| != | Not equal to | - -#### 1.1.1 Comparison rules: - -1. All types can be compared with themselves. -2. Numeric types (INT32, INT64, FLOAT, DOUBLE, TIMESTAMP) can be compared with each other. -3. Character types (STRING, TEXT) can also be compared with each other. -4. Comparisons between types other than those mentioned above will result in an error. - -### 1.2 BETWEEN Operator - -1. The `BETWEEN `operator is used to determine whether a value falls within a specified range. -2. The `NOT BETWEEN` operator is used to determine whether a value does not fall within a specified range. -3. The `BETWEEN` and `NOT BETWEEN` operators can be used to evaluate any sortable type. -4. The value, minimum, and maximum parameters for `BETWEEN` and `NOT BETWEEN` must be of the same type, otherwise an error will occur. - -Syntax: - -```SQL - value BETWEEN min AND max: - value NOT BETWEEN min AND max: -``` - -Example 1 :BETWEEN - -```SQL --- Query records where temperature is between 85.0 and 90.0 -SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; -``` - -Example 2 : NOT BETWEEN - -``` --- Query records where humidity is not between 35.0 and 40.0 -SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; -``` - -### 1.3 IS NULL Operator - -1. These operators apply to all data types. - -Example 1: Query records where temperature is NULL - -```SQL -SELECT * FROM table1 WHERE temperature IS NULL; -``` - -Example 2: Query records where humidity is not NULL - -```SQL -SELECT * FROM table1 WHERE humidity IS NOT NULL; -``` - -### 1.4 IN Operator - -1. The `IN` operator can be used in the `WHERE `clause to compare a column with a list of values. -2. These values can be provided by a static array or scalar expressions. - -Syntax: - -```SQL -... WHERE column [NOT] IN ('value1','value2', expression1) -``` - -Example 1: Static array: Query records where region is 'Beijing' or 'Shanghai' - -```SQL -SELECT * FROM table1 WHERE region IN ('Beijing', 'Shanghai'); ---Equivalent to -SELECT * FROM region WHERE name = 'Beijing' OR name = 'Shanghai'; -``` - -Example 2: Scalar expression: Query records where temperature is among specific values - -```SQL -SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); -``` - -Example 3: Query records where region is not 'Beijing' or 'Shanghai' - -```SQL -SELECT * FROM table1 WHERE region NOT IN ('Beijing', 'Shanghai'); -``` - -### 1.5 GREATEST and LEAST - -The `GREATEST` function returns the maximum value from a list of arguments, while the `LEAST` function returns the minimum value. The return type matches the input data type. - -Key Behaviors: -1. NULL Handling: Returns NULL if all arguments are NULL. -2. Parameter Requirements: Requires at least 2 arguments. -3. Type Constraints: All arguments must have the same data type. -4. Supported Types: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` - -**Syntax:** - -```sql - greatest(value1, value2, ..., valueN) - least(value1, value2, ..., valueN) -``` - -**Examples:** - -```sql --- Retrieve the maximum value between `temperature` and `humidity` in `table2` -SELECT GREATEST(temperature,humidity) FROM table2; - --- Retrieve the minimum value between `temperature` and `humidity` in `table2` -SELECT LEAST(temperature,humidity) FROM table2; -``` - -## 2. Aggregate functions - -### 2.1 Overview - -1. Aggregate functions are many-to-one functions. They perform aggregate calculations on a set of values to obtain a single aggregate result. - -2. Except for `COUNT()`, all other aggregate functions ignore null values and return null when there are no input rows or all values are null. For example, `SUM()` returns null instead of zero, and `AVG()` does not include null values in the count. - -### 2.2 Supported Aggregate Functions - -| Function Name | Description | Allowed Input Types | Output Type | -|:-----------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------| -| COUNT | Counts the number of data points. | All types | INT64 | -| COUNT_IF | COUNT_IF(exp) counts the number of rows that satisfy a specified boolean expression. | `exp` must be a boolean expression,(e.g. `count_if(temperature>20)`) | INT64 | -| APPROX_COUNT_DISTINCT | The APPROX_COUNT_DISTINCT(x[, maxStandardError]) function provides an approximation of COUNT(DISTINCT x), returning the estimated number of distinct input values. | `x`: The target column to be calculated, supports all data types.
`maxStandardError` (optional): Specifies the maximum standard error allowed for the function's result. Valid range is [0.0040625, 0.26]. Defaults to 0.023 if not specified. | INT64 | -| APPROX_MOST_FREQUENT | The APPROX_MOST_FREQUENT(x, k, capacity) function is used to approximately calculate the top k most frequent elements in a dataset. It returns a JSON-formatted string where the keys are the element values and the values are their corresponding approximate frequencies. (Available since V2.0.5.1) | `x` : The column to be calculated, supporting all existing data types in IoTDB;
`k`: The number of top-k most frequent values to return;
`capacity`: The number of buckets used for computation, which relates to memory usage—a larger value reduces error but consumes more memory, while a smaller value increases error but uses less memory. | STRING | -| SUM | Calculates the sum. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| AVG | Calculates the average. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| MAX | Finds the maximum value. | All types | Same as input type | -| MIN | Finds the minimum value. | All types | Same as input type | -| FIRST | Finds the value with the smallest timestamp that is not NULL. | All types | Same as input type | -| LAST | Finds the value with the largest timestamp that is not NULL. | All types | Same as input type | -| STDDEV | Alias for STDDEV_SAMP, calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_POP | Calculates the population standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_SAMP | Calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VARIANCE | Alias for VAR_SAMP, calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_POP | Calculates the population variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_SAMP | Calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| EXTREME | Finds the value with the largest absolute value. If the largest absolute values of positive and negative values are equal, returns the positive value. | INT32 INT64 FLOAT DOUBLE | Same as input type | -| MODE | Finds the mode. Note: 1. There is a risk of memory exception when the number of distinct values in the input sequence is too large; 2. If all elements have the same frequency, i.e., there is no mode, a random element is returned; 3. If there are multiple modes, a random mode is returned; 4. NULL values are also counted in frequency, so even if not all values in the input sequence are NULL, the final result may still be NULL. | All types | Same as input type | -| MAX_BY | MAX_BY(x, y) finds the value of x corresponding to the maximum y in the binary input x and y. MAX_BY(time, x) returns the timestamp when x is at its maximum. | x and y can be of any type | Same as the data type of the first input x | -| MIN_BY | MIN_BY(x, y) finds the value of x corresponding to the minimum y in the binary input x and y. MIN_BY(time, x) returns the timestamp when x is at its minimum. | x and y can be of any type | Same as the data type of the first input x | -| FIRST_BY | FIRST_BY(x, y) finds the value of x in the same row when y is the first non-null value. | x and y can be of any type | Same as the data type of the first input x | -| LAST_BY | LAST_BY(x, y) finds the value of x in the same row when y is the last non-null value. | x and y can be of any type | Same as the data type of the first input x | - - -### 2.3 Examples - -#### 2.3.1 Example Data - -The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. - -#### 2.3.2 Count - -Counts the number of rows in the entire table and the number of non-null values in the `temperature` column. - -```SQL -IoTDB> select count(*), count(temperature) from table1; -``` - -The execution result is as follows: - -> Note: Only the COUNT function can be used with *, otherwise an error will occur. - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 18| 12| -+-----+-----+ -Total line number = 1 -It costs 0.834s -``` - - -#### 2.3.3 Count_if - -Count `Non-Null` `arrival_time` Records in `table2` - -```sql -select count_if(arrival_time is not null) from table2; -``` - -The execution result is as follows: - -```sql -+-----+ -|_col0| -+-----+ -| 4| -+-----+ -Total line number = 1 -It costs 0.047s -``` - -#### 2.3.4 Approx_count_distinct - -Retrieve the number of distinct values in the `temperature` column from `table1`. - -```sql -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; -``` - -The execution result is as follows: - -```sql -+------+------+ -|origin|approx| -+------+------+ -| 3| 3| -+------+------+ -Total line number = 1 -It costs 0.022s -``` - -#### 2.3.5 Approx_most_frequent - -Query the ​​top 2 most frequent values​​ in the `temperature` column of `table1`. - -```sql -IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; -``` - -The execution result is as follows: - -```sql -+-------------------+ -| topk| -+-------------------+ -|{"85.0":6,"90.0":5}| -+-------------------+ -Total line number = 1 -It costs 0.064s -``` - - -#### 2.3.6 First - -Finds the values with the smallest timestamp that are not NULL in the `temperature` and `humidity` columns. - -```SQL -IoTDB> select first(temperature), first(humidity) from table1; -``` - -The execution result is as follows: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 35.1| -+-----+-----+ -Total line number = 1 -It costs 0.170s -``` - -#### 2.3.7 Last - -Finds the values with the largest timestamp that are not NULL in the `temperature` and `humidity` columns. - -```SQL -IoTDB> select last(temperature), last(humidity) from table1; -``` - -The execution result is as follows: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 34.8| -+-----+-----+ -Total line number = 1 -It costs 0.211s -``` - -#### 2.3.8 First_by - -Finds the `time` value of the row with the smallest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the smallest timestamp that is not NULL in the `temperature` column. - -```SQL -IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-26T13:37:00.000+08:00| 35.1| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.269s -``` - -#### 2.3.9 Last_by - -Queries the `time` value of the row with the largest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the largest timestamp that is not NULL in the `temperature` column. - -```SQL -IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T14:30:00.000+08:00| 34.8| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.070s -``` - -#### 2.3.10 Max_by - -Queries the `time` value of the row where the `temperature` column is at its maximum, and the `humidity` value of the row where the `temperature` column is at its maximum. - -```SQL -IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T09:30:00.000+08:00| 35.2| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.172s -``` - -#### 2.3.11 Min_by - -Queries the `time` value of the row where the `temperature` column is at its minimum, and the `humidity` value of the row where the `temperature` column is at its minimum. - -```SQL -select min_by(time, temperature), min_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-29T10:00:00.000+08:00| null| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.244s -``` - - -## 3. Logical operators - -### 3.1 Overview - -Logical operators are used to combine conditions or negate conditions, returning a Boolean result (`true` or `false`). - -Below are the commonly used logical operators along with their descriptions: - -| Operator | Description | Example | -| :------- | :-------------------------------- | :------ | -| AND | True only if both values are true | a AND b | -| OR | True if either value is true | a OR b | -| NOT | True when the value is false | NOT a | - -### 3.2 Impact of NULL on Logical Operators - -#### 3.2.1 AND Operator - -- If one or both sides of the expression are `NULL`, the result may be `NULL`. -- If one side of the `AND` operator is `FALSE`, the expression result is `FALSE`. - -Examples: - -```SQL -NULL AND true -- null -NULL AND false -- false -NULL AND NULL -- null -``` - -#### 3.2.2 OR Operator - -- If one or both sides of the expression are `NULL`, the result may be `NULL`. -- If one side of the `OR` operator is `TRUE`, the expression result is `TRUE`. - -Examples: - -```SQL -NULL OR NULL -- null -NULL OR false -- null -NULL OR true -- true -``` - -##### 3.2.2.1 Truth Table - -The following truth table illustrates how `NULL` is handled in `AND` and `OR` operators: - -| a | b | a AND b | a OR b | -| :---- | :---- | :------ | :----- | -| TRUE | TRUE | TRUE | TRUE | -| TRUE | FALSE | FALSE | TRUE | -| TRUE | NULL | NULL | TRUE | -| FALSE | TRUE | FALSE | TRUE | -| FALSE | FALSE | FALSE | FALSE | -| FALSE | NULL | FALSE | NULL | -| NULL | TRUE | NULL | TRUE | -| NULL | FALSE | FALSE | NULL | -| NULL | NULL | NULL | NULL | - -#### 3.2.3 NOT Operator - -The logical negation of `NULL` remains `NULL`. - -Example: - -```SQL -NOT NULL -- null -``` - -##### 3.2.3.1 Truth Table - -The following truth table illustrates how `NULL` is handled in the `NOT` operator: - -| a | NOT a | -| :---- | :---- | -| TRUE | FALSE | -| FALSE | TRUE | -| NULL | NULL | - -## 4. Date and Time Functions and Operators - -### 4.1 now() -> Timestamp - -Returns the current timestamp. - -### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp - -The `date_bin` function is used for handling time data by rounding a timestamp (`Timestamp`) to the boundary of a specified time interval (`interval`). - -#### **Syntax:** - -```SQL --- Calculates the time interval starting from timestamp 0 and returns the nearest interval boundary to the specified timestamp. -date_bin(interval,source) - --- Calculates the time interval starting from the origin timestamp and returns the nearest interval boundary to the specified timestamp. -date_bin(interval,source,origin) - ---Supported time units for interval: ---Years (y), months (mo), weeks (week), days (d), hours (h), minutes (M), seconds (s), milliseconds (ms), microseconds (µs), nanoseconds (ns). ---source: Must be of timestamp type. -``` - -#### **Parameters**: - -| Parameter | Description | -| :-------- | :----------------------------------------------------------- | -| interval | 1. Time interval 2. Supported units: `y`, `mo`, `week`, `d`, `h`, `M`, `s`, `ms`, `µs`, `ns`. | -| source | 1. The timestamp column or expression to be calculated. 2. Must be of timestamp type. | -| origin | The reference timestamp. | - -#### 4.2.1Syntax Rules : - -1. If `origin` is not specified, the default reference timestamp is `1970-01-01T00:00:00Z` (Beijing time: `1970-01-01 08:00:00`). -2. `interval` must be a non-negative number with a time unit. If `interval` is `0ms`, the function returns `source` directly without calculation. -3. If `origin` or `source` is negative, it represents a time point before the epoch. `date_bin` will calculate and return the relevant time period. -4. If `source` is `null`, the function returns `null`. -5. Mixing months and non-month time units (e.g., `1 MONTH 1 DAY`) is not supported due to ambiguity. - -> For example, if the starting point is **April 30, 2000**, calculating `1 DAY` first and then `1 MONTH` results in **June 1, 2000**, whereas calculating `1 MONTH` first and then `1 DAY` results in **May 31, 2000**. The resulting dates are different. - -#### 4.2.2 Examples - -##### Example Data - -The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. - -#### Example 1: Without Specifying the Origin Timestamp - -```SQL -SELECT - time, - date_bin(1h,time) as time_bin -FROM - table1; -``` - -Result**:** - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.683s -``` - -#### Example 2: Specifying the Origin Timestamp - -```SQL -SELECT - time, - date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin -FROM - table1; -``` - -Result: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.056s -``` - -#### Example 3: Negative Origin - -```SQL -SELECT - time, - date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin -FROM - table1; -``` - -Result: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.203s -``` - -#### Example 4: Interval of 0 - -```SQL -SELECT - time, - date_bin(0ms, time) as time_bin -FROM - table1; -``` - -Result**:** - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.107s -``` - -#### Example 5: Source is NULL - -```SQL -SELECT - arrival_time, - date_bin(1h,arrival_time) as time_bin -FROM - table1; -``` - -Result: - -```Plain -+-----------------------------+-----------------------------+ -| arrival_time| time_bin| -+-----------------------------+-----------------------------+ -| null| null| -|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -| null| null| -|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| -| null| null| -|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.319s -``` - -### 4.3 Extract Function - -This function is used to extract the value of a specific part of a date. (Supported from version V2.0.6) - -#### 4.3.1 Syntax Definition - -```SQL -EXTRACT (identifier FROM expression) -``` - -* Parameter Description - * **expression**: `TIMESTAMP` type or a time constant - * **identifier**: The valid ranges and corresponding return value types are shown in the table below. - - | Valid Range | Return Type | Return Range | - |----------------------|---------------|--------------------| - | `YEAR` | `INT64` | `/` | - | `QUARTER` | `INT64` | `1-4` | - | `MONTH` | `INT64` | `1-12` | - | `WEEK` | `INT64` | `1-53` | - | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | - | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | - | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | - | `HOUR` | `INT64` | `0-23` | - | `MINUTE` | `INT64` | `0-59` | - | `SECOND` | `INT64` | `0-59` | - | `MS` | `INT64` | `0-999` | - | `US` | `INT64` | `0-999` | - | `NS` | `INT64` | `0-999` | - - -#### 4.3.2 Usage Example - -Using table1 from the [Sample Data](../Reference/Sample-Data.md) as the source data, query the average temperature for the first 12 hours of each day within a certain period. - -```SQL -IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) -+----------+-----+ -| fmtdate|avgtp| -+----------+-----+ -|2024-11-28| 86.0| -|2024-11-29| 85.0| -|2024-11-30| 90.0| -+----------+-----+ -Total line number = 3 -It costs 0.041s -``` - -Introduction to the `Format` function: [Format Function](../SQL-Manual/Basis-Function.md#_7-2-format-function) - -Introduction to the `Date_bin` function: [Date_bin Funtion](../SQL-Manual/Basis-Function.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) - - -## 5. Mathematical Functions and Operators - -### 5.1 Mathematical Operators - -| **Operator** | **Description** | -| :----------- | :---------------------------------------------- | -| + | Addition | -| - | Subtraction | -| * | Multiplication | -| / | Division (integer division performs truncation) | -| % | Modulus (remainder) | -| - | Negation | - -### 5.2 Mathematical functions - -| Function Name | Description | Input | Output | Usage | -|:--------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------|:-------------------| :--------- | -| sin | Sine | double, float, INT64, INT32 | double | sin(x) | -| cos | Cosine | double, float, INT64, INT32 | double | cos(x) | -| tan | Tangent | double, float, INT64, INT32 | double | tan(x) | -| asin | Inverse Sine | double, float, INT64, INT32 | double | asin(x) | -| acos | Inverse Cosine | double, float, INT64, INT32 | double | acos(x) | -| atan | Inverse Tangent | double, float, INT64, INT32 | double | atan(x) | -| sinh | Hyperbolic Sine | double, float, INT64, INT32 | double | sinh(x) | -| cosh | Hyperbolic Cosine | double, float, INT64, INT32 | double | cosh(x) | -| tanh | Hyperbolic Tangent | double, float, INT64, INT32 | double | tanh(x) | -| degrees | Converts angle `x` in radians to degrees | double, float, INT64, INT32 | double | degrees(x) | -| radians | Radian Conversion from Degrees | double, float, INT64, INT32 | double | radians(x) | -| abs | Absolute Value | double, float, INT64, INT32 | Same as input type | abs(x) | -| sign | Returns the sign of `x`: - If `x = 0`, returns `0` - If `x > 0`, returns `1` - If `x < 0`, returns `-1` For `double/float` inputs: - If `x = NaN`, returns `NaN` - If `x = +Infinity`, returns `1.0` - If `x = -Infinity`, returns `-1.0` | double, float, INT64, INT32 | Same as input type | sign(x) | -| ceil | Rounds `x` up to the nearest integer | double, float, INT64, INT32 | double | ceil(x) | -| floor | Rounds `x` down to the nearest integer | double, float, INT64, INT32 | double | floor(x) | -| exp | Returns `e^x` (Euler's number raised to the power of `x`) | double, float, INT64, INT32 | double | exp(x) | -| ln | Returns the natural logarithm of `x` | double, float, INT64, INT32 | double | ln(x) | -| log10 | Returns the base 10 logarithm of `x` | double, float, INT64, INT32 | double | log10(x) | -| round | Rounds `x` to the nearest integer | double, float, INT64, INT32 | double | round(x) | -| round | Rounds `x` to `d` decimal places | double, float, INT64, INT32 | double | round(x, d) | -| sqrt | Returns the square root of `x`. | double, float, INT64, INT32 | double | sqrt(x) | -| e | Returns Euler’s number `e`. | | double | e() | -| pi | Pi (π) | | double | pi() | - -## 6. Bitwise Functions - -> Supported from version V2.0.6 - -Example raw data is as follows: - -``` -IoTDB:database1> select * from bit_table -+-----------------------------+---------+------+-----+ -| time|device_id|length|width| -+-----------------------------+---------+------+-----+ -|2025-10-29T15:59:42.957+08:00| d1| 14| 12| -|2025-10-29T15:58:59.399+08:00| d3| 15| 10| -|2025-10-29T15:59:32.769+08:00| d2| 13| 12| -+-----------------------------+---------+------+-----+ - --- Table creation statement -CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); - --- Write data -INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); -``` - -### 6.1 bit\_count(num, bits) - -The `bit_count(num, bits)`function is used to count the number of 1s in the binary representation of the integer `num`under the specified bit width `bits`. - -#### 6.1.1 Syntax Definition - -``` -bit_count(num, bits) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * **​num:​**​ Any integer value (int32 or int64) - * **​bits:​**​ Integer value, with a valid range of 2\~64 - -Note: An error will be raised if the number of `bits`is insufficient to represent `num`(using ​**two's complement signed representation**​): `Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` - -* Usage Methods - - * Two specific numbers: `bit_count(9, 64)` - * Column and a number: `bit_count(column1, 64)` - * Between two columns: `bit_count(column1, column2)` - -#### 6.1.2 Usage Examples - -``` --- Two specific numbers -IoTDB:database1> select distinct bit_count(2,8) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ --- Two specific numbers -IoTDB:database1> select distinct bit_count(-5,8) from bit_table -+-----+ -|_col0| -+-----+ -| 7| -+-----+ --- Column and a number -IoTDB:database1> select length,bit_count(length,8) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 3| -| 15| 4| -| 13| 3| -+------+-----+ --- Insufficient bits -IoTDB:database1> select length,bit_count(length,2) from bit_table -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. -``` - -### 6.2 bitwise\_and(x, y) - -The `bitwise_and(x, y)`function performs a logical AND operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise AND operation result. - -#### 6.2.1 Syntax Definition - -``` -bitwise_and(x, y) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x, y**​: Must be integer values of data type Int32 or Int64 -* Usage Methods - - * Two specific numbers: `bitwise_and(19, 25)` - * Column and a number: `bitwise_and(column1, 25)` - * Between two columns: `bitwise_and(column1, column2)` - -#### 6.2.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_and(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 17| -+-----+ ---Column and a number -IoTDB:database1> select length, bitwise_and(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 8| -| 15| 9| -| 13| 9| -+------+-----+ ---Between two columns -IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 12| -| 15| 10| 10| -| 13| 12| 12| -+------+-----+-----+ -``` - -### 6.3 bitwise\_not(x) - -The `bitwise_not(x)`function performs a logical NOT operation on each bit of the integer x based on its two's complement representation, and returns the bitwise NOT operation result. - -#### 6.3.1 Syntax Definition - -``` -bitwise_not(x) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x**​: Must be an integer value of data type Int32 or Int64 -* Usage Methods - - * Specific number: `bitwise_not(5)` - * Single column operation: `bitwise_not(column1)` - -#### 6.3.2 Usage Examples - -``` --- Specific number -IoTDB:database1> select distinct bitwise_not(5) from bit_table -+-----+ -|_col0| -+-----+ -| -6| -+-----+ --- Single column -IoTDB:database1> select length, bitwise_not(length) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| -15| -| 15| -16| -| 13| -14| -+------+-----+ -``` - -### 6.4 bitwise\_or(x, y) - -The `bitwise_or(x,y)`function performs a logical OR operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise OR operation result. - -#### 6.4.1 Syntax Definition - -``` -bitwise_or(x, y) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x, y**​: Must be integer values of data type Int32 or Int64 -* Usage Methods - - * Two specific numbers: `bitwise_or(19, 25)` - * Column and a number: `bitwise_or(column1, 25)` - * Between two columns: `bitwise_or(column1, column2)` - -#### 6.4.2 Usage Examples - -``` --- Two specific numbers -IoTDB:database1> select distinct bitwise_or(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 27| -+-----+ --- Column and a number -IoTDB:database1> select length,bitwise_or(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 31| -| 15| 31| -| 13| 29| -+------+-----+ --- Between two columns -IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 14| -| 15| 10| 15| -| 13| 12| 13| -+------+-----+-----+ -``` - -### 6.5 bitwise\_xor(x, y) - -The `bitwise_xor(x,y)`function performs a logical XOR (exclusive OR) operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise XOR operation result. XOR rule: same bits result in 0, different bits result in 1. - -#### 6.5.1 Syntax Definition - -``` -bitwise_xor(x, y) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x, y**​: Must be integer values of data type Int32 or Int64 -* Usage Methods - - * Two specific numbers: `bitwise_xor(19, 25)` - * Column and a number: `bitwise_xor(column1, 25)` - * Between two columns: `bitwise_xor(column1, column2)` - -#### 6.5.2 Usage Examples - -``` --- Two specific numbers -IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 10| -+-----+ --- Column and a number -IoTDB:database1> select length,bitwise_xor(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 23| -| 15| 22| -| 13| 20| -+------+-----+ --- Between two columns -IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 2| -| 15| 10| 5| -| 13| 12| 1| -+------+-----+-----+ -``` - -### 6.6 bitwise\_left\_shift(value, shift) - -The `bitwise_left_shift(value, shift)`function returns the result of shifting the binary representation of integer `value`left by `shift`bits. The left shift operation moves bits towards the higher-order direction, filling the vacated lower-order bits with 0s, and discarding the higher-order bits that overflow. Equivalent to: `value << shift`. - -#### 6.6.1 Syntax Definition - -``` -bitwise_left_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value -``` - -* Parameter Description - - * ​**value**​: The integer value to shift left. Must be of data type Int32 or Int64. - * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. -* Usage Methods - - * Two specific numbers: `bitwise_left_shift(1, 2)` - * Column and a number: `bitwise_left_shift(column1, 2)` - * Between two columns: `bitwise_left_shift(column1, column2)` - -#### 6.6.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table -+-----+ -|_col0| -+-----+ -| 4| -+-----+ --- Column and a number -IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 56| -| 15| 60| -| 13| 52| -+------+-----+ --- Between two columns -IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - -### 6.7 bitwise\_right\_shift(value, shift) - -The `bitwise_right_shift(value, shift)`function returns the result of logically (unsigned) right shifting the binary representation of integer `value`by `shift`bits. The logical right shift operation moves bits towards the lower-order direction, filling the vacated higher-order bits with 0s, and discarding the lower-order bits that overflow. - -#### 6.7.1 Syntax Definition - -``` -bitwise_right_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value -``` - -* Parameter Description - - * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. - * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. -* Usage Methods - - * Two specific numbers: `bitwise_right_shift(8, 3)` - * Column and a number: `bitwise_right_shift(column1, 3)` - * Between two columns: `bitwise_right_shift(column1, column2)` - -#### 6.7.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ ---Column and a number -IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---Between two columns -IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -``` - -### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) - -The `bitwise_right_shift_arithmetic(value, shift)`function returns the result of arithmetically right shifting the binary representation of integer `value`by `shift`bits. The arithmetic right shift operation moves bits towards the lower-order direction, discarding the lower-order bits that overflow, and filling the vacated higher-order bits with the sign bit (0 for positive numbers, 1 for negative numbers) to preserve the sign of the number. - -#### 6.8.1 Syntax Definition - -``` -bitwise_right_shift_arithmetic(value, shift) -> [same as value]-- The return type is the same as the data type of value -``` - -* Parameter Description - - * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. - * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. -* Usage Methods: - - * Two specific numbers: `bitwise_right_shift_arithmetic(12, 2)` - * Column and a number: `bitwise_right_shift_arithmetic(column1, 64)` - * Between two columns: `bitwise_right_shift_arithmetic(column1, column2)` - -#### 6.8.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table -+-----+ -|_col0| -+-----+ -| 3| -+-----+ --- Column and a number -IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---Between two columns -IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - - -## 7. Conditional Expressions - -### 7.1 CASE - -CASE expressions come in two forms: **Simple CASE** and **Searched CASE**. - -#### 7.1.1 Simple CASE - -The simple form evaluates each value expression from left to right until it finds a match with the given expression: - -```SQL -CASE expression - WHEN value THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -If a matching value is found, the corresponding result is returned. If no match is found, the result from the `ELSE` clause (if provided) is returned; otherwise, `NULL` is returned. - -Example: - -```SQL -SELECT a, - CASE a - WHEN 1 THEN 'one' - WHEN 2 THEN 'two' - ELSE 'many' - END -``` - -#### 7.1.2 Searched CASE - -The searched form evaluates each Boolean condition from left to right until a `TRUE` condition is found, then returns the corresponding result: - -```SQL -CASE - WHEN condition THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -If no condition evaluates to `TRUE`, the `ELSE` clause result (if provided) is returned; otherwise, `NULL` is returned. - -Example: - -```SQL -SELECT a, b, - CASE - WHEN a = 1 THEN 'aaa' - WHEN b = 2 THEN 'bbb' - ELSE 'ccc' - END -``` - -### 7.2 COALESCE - -Returns the first non-null value from the given list of parameters. - -```SQL -coalesce(value1, value2[, ...]) -``` - -## 8. Conversion Functions - -### 8.1 Conversion Functions - -#### 8.1.1 cast(value AS type) → type - -Explicitly converts a value to the specified type. This can be used to convert strings (`VARCHAR`) to numeric types or numeric values to string types. Starting from V2.0.8-beta, OBJECT type can be explicitly cast to STRING type. - -If the conversion fails, a runtime error is thrown. - -Example: - -```SQL -SELECT * - FROM table1 - WHERE CAST(time AS DATE) - IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); -``` - -#### 8.1.2 try_cast(value AS type) → type - -Similar to `CAST()`. If the conversion fails, returns `NULL` instead of throwing an error. - -Example: - -```SQL -SELECT * - FROM table1 - WHERE try_cast(time AS DATE) - IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); -``` - -### 8.2 Format Function - -This function generates and returns a formatted string based on a specified format string and input arguments. Similar to Java’s `String.format` or C’s `printf`, it allows developers to construct dynamic string templates using placeholder syntax. Predefined format specifiers in the template are replaced precisely with corresponding argument values, producing a complete string that adheres to specific formatting requirements. - -#### 8.2.1 Syntax - -```SQL -format(pattern, ...args) -> STRING -``` - -**Parameters** - -* `pattern`: A format string containing static text and one or more format specifiers (e.g., `%s`, `%d`), or any expression returning a `STRING`/`TEXT` type. -* `args`: Input arguments to replace format specifiers. Constraints: - * Number of arguments ≥ 1. - * Multiple arguments must be comma-separated (e.g., `arg1, arg2`). - * Total arguments can exceed the number of specifiers in `pattern` but cannot be fewer, otherwise an exception is triggered. - -**Return Value** - -* Formatted result string of type `STRING`. - -#### 8.2.2 Usage Examples - -1. Format Floating-Point Numbers - ```SQL - IoTDB:database1> SELECT format('%.5f', humidity) FROM table1 WHERE humidity = 35.4; - +--------+ - | _col0| - +--------+ - |35.40000| - +--------+ - ``` -2. Format Integers - ```SQL - IoTDB:database1> SELECT format('%03d', 8) FROM table1 LIMIT 1; - +-----+ - |_col0| - +-----+ - | 008| - +-----+ - ``` -3. Format Dates and Timestamps - -* Locale-Specific Date - -```SQL -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) FROM table1 LIMIT 1; -+--------------------+ -| _col0| -+--------------------+ -|Monday, January 1, 2024| -+--------------------+ -``` - -* Remove Timezone Information - -```SQL -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; -+-----------------------+ -| _col0| -+-----------------------+ -|2024-01-01 00:00:00.000| -+-----------------------+ -``` - -* Second-Level Timestamp Precision - -```SQL -IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; -+-------------------+ -| _col0| -+-------------------+ -|2024-01-01 00:00:00| -+-------------------+ -``` - -* Date/Time Format Symbols - -| **Symbol** | **​ Description** | -| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 'H' | 24-hour format (two digits, zero-padded), i.e. 00 - 23 | -| 'I' | 12-hour format (two digits, zero-padded), i.e. 01 - 12 | -| 'k' | 24-hour format (no padding), i.e. 0 - 23 | -| 'l' | 12-hour format (no padding), i.e. 1 - 12 | -| 'M' | Minute (two digits, zero-padded), i.e. 00 - 59 | -| 'S' | Second (two digits, zero-padded; supports leap seconds), i.e. 00 - 60 | -| 'L' | Millisecond (three digits, zero-padded), i.e. 000 - 999 | -| 'N' | Nanosecond (nine digits, zero-padded), i.e. 000000000 - 999999999。 | -| 'p' | Locale-specific lowercase AM/PM marker (e.g., "am", "pm"). Prefix with`T`to force uppercase (e.g., "AM"). | -| 'z' | RFC 822 timezone offset from GMT (e.g.,`-0800`). Adjusts for daylight saving. Uses the JVM's default timezone for`long`/`Long`/`Date`. | -| 'Z' | Timezone abbreviation (e.g., "PST"). Adjusts for daylight saving. Uses the JVM's default timezone; Formatter's timezone overrides the argument's timezone if specified. | -| 's' | Seconds since Unix epoch (1970-01-01 00:00:00 UTC), i.e. Long.MIN\_VALUE/1000 to Long.MAX\_VALUE/1000。 | -| 'Q' | Milliseconds since Unix epoch, i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | - -* Common Date/Time Conversion Characters - -| **Symbol** | **​ Description** | -| ---------------- | -------------------------------------------------------------------- | -| 'B' | Locale-specific full month name, for example "January", "February" | -| 'b' | Locale-specific abbreviated month name, for example "Jan", "Feb" | -| 'h' | Same as`b` | -| 'A' | Locale-specific full weekday name, for example "Sunday", "Monday" | -| 'a' | Locale-specific short weekday name, for example "Sun", "Mon" | -| 'C' | Year divided by 100 (two digits, zero-padded) | -| 'Y' | Year (minimum 4 digits, zero-padded) | -| 'y' | Last two digits of year (zero-padded) | -| 'j' | Day of year (three digits, zero-padded) | -| 'm' | Month (two digits, zero-padded) | -| 'd' | Day of month (two digits, zero-padded) | -| 'e' | Day of month (no padding) | - -4. Format Strings - ```SQL - IoTDB:database1> SELECT format('The measurement status is: %s', status) FROM table2 LIMIT 1; - +-------------------------------+ - | _col0| - +-------------------------------+ - |The measurement status is: true| - +-------------------------------+ - ``` -5. Format Percentage Sign - ```SQL - IoTDB:database1> SELECT format('%s%%', 99.9) FROM table1 LIMIT 1; - +-----+ - |_col0| - +-----+ - |99.9%| - +-----+ - ``` - -#### 8.2.3 Format Conversion Failure Scenarios - -1. Type Mismatch Errors - -* Timestamp Type Conflict - - If the format specifier includes time-related tokens (e.g., `%Y-%m-%d`) but the argument: - - * Is a non-`DATE`/`TIMESTAMP` type value. ◦ - * Requires sub-day precision (e.g., `%H`, `%M`) but the argument is not `TIMESTAMP`. - -```SQL --- Example 1 -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) - --- Example 2 -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) -``` - -* Floating-Point Type Conflict - - Using `%f` with non-numeric arguments (e.g., strings or booleans): - -```SQL -IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) -``` - -2. Argument Count Mismatch - The number of arguments must equal or exceed the number of format specifiers. - - ```SQL - IoTDB:database1> SELECT format('%.5f %03d', humidity) FROM table1 WHERE humidity = 35.4; - Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') - ``` -3. Invalid Invocation Errors - - Triggered if: - - * Total arguments < 2 (must include `pattern` and at least one argument).• - * `pattern` is not of type `STRING`/`TEXT`. - -```SQL --- Example 1 -IoTDB:database1> select format('%s') from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. - ---Example 2 -IoTDB:database1> select format(123, humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. -``` - - -## 9. String Functions and Operators - -### 9.1 String operators - -#### 9.1.1 || Operator - -The `||` operator is used for string concatenation and functions the same as the `concat` function. - -#### 9.1.2 LIKE Statement - - The `LIKE` statement is used for pattern matching. For detailed usage, refer to Pattern Matching:[LIKE](#1-like-运算符). - -### 9.2 String Functions - -| Function Name | Description | Input | Output | Usage | -| :------------ |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------| :------ | :----------------------------------------------------------- | -| `length` | Returns the number of characters in a string (not byte length). | `string` (the string whose length is to be calculated) | INT32 | length(string) | -| `upper` | Converts all letters in a string to uppercase. | string | String | upper(string) | -| `lower` | Converts all letters in a string to lowercase. | string | String | lower(string) | -| `trim` | Removes specified leading and/or trailing characters from a string. **Parameters:** - `specification` (optional): Specifies which side to trim: - `BOTH`: Removes characters from both sides (default). - `LEADING`: Removes characters from the beginning. - `TRAILING`: Removes characters from the end. - `trimcharacter` (optional): Character to be removed (default is whitespace). - `string`: The target string. | string | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) Example:`trim('!' FROM '!foo!');` —— `'foo'` | -| `strpos` | Returns the position of the first occurrence of `subStr` in `sourceStr`. **Notes:** - Position starts at `1`. - Returns `0` if `subStr` is not found. - Positioning is based on characters, not byte arrays. | `sourceStr` (string to be searched), `subStr` (substring to find) | INT32 | strpos(sourceStr, subStr) | -| `starts_with` | Checks if `sourceStr` starts with the specified `prefix`. | `sourceStr`, `prefix` | Boolean | starts_with(sourceStr, prefix) | -| `ends_with` | Checks if `sourceStr` ends with the specified `suffix`. | `sourceStr`, `suffix` | Boolean | ends_with(sourceStr, suffix) | -| `concat` | Concatenates `string1, string2, ..., stringN`. Equivalent to the `\|\|` operator. | `string`, `text` | String | concat(str1, str2, ...) or str1 \|\| str2 ... | -| `strcmp` | Compares two strings lexicographically. **Returns:** - `-1` if `str1 < str2` - `0` if `str1 = str2` - `1` if `str1 > str2` - `NULL` if either `str1` or `str2` is `NULL` | `string1`, `string2` | INT32 | strcmp(str1, str2) | -| `replace` | Removes all occurrences of `search` in `string`. | `string`, `search` | String | replace(string, search) | -| `replace` | Replaces all occurrences of `search` in `string` with `replace`. | `string`, `search`, `replace` | String | replace(string, search, replace) | -| `substring` | Extracts a substring from `start_index` to the end of the string. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. | `string`, `start_index` | String | substring(string from start_index)or substring(string, start_index) | -| `substring` | Extracts a substring of `length` characters starting from `start_index`. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. - Throws an error if `length` is negative. - If `start_index + length` exceeds `int.MAX`, an overflow error may occur. | `string`, `start_index`, `length` | String | substring(string from start_index for length) or substring(string, start_index, length) | - -## 10. Pattern Matching Functions - -### 10.1 LIKE - -#### 10.1.1 Usage - -The `LIKE `operator is used to compare a value with a pattern. It is commonly used in the `WHERE `clause to match specific patterns within strings. - -#### 10.1.2 Syntax - -```SQL -... column [NOT] LIKE 'pattern' ESCAPE 'character'; -``` - -#### 10.1.3 Match rules - -- Matching characters is case-sensitive -- The pattern supports two wildcard characters: - - `_` matches any single character - - `%` matches zero or more characters - -#### 10.1.4 Notes - -- `LIKE` pattern matching applies to the entire string by default. Therefore, if it's desired to match a sequence anywhere within a string, the pattern must start and end with a percent sign. -- To match the escape character itself, double it (e.g., `\\` to match `\`). For example, you can use `\\` to match for `\`. - -#### 10.1.5 Examples - -#### **Example 1: Match Strings Starting with a Specific Character** - -- **Description:** Find all names that start with the letter `E` (e.g., `Europe`). - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'E%'; -``` - -#### **Example 2: Exclude a Specific Pattern** - -- **Description:** Find all names that do **not** start with the letter `E`. - -```SQL -SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; -``` - -#### **Example 3: Match Strings of a Specific Length** - -- **Description:** Find all names that start with `A`, end with `a`, and have exactly two characters in between (e.g., `Asia`). - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'A__a'; -``` - -#### **Example 4: Escape Special Characters** - -- **Description:** Find all names that start with `South_` (e.g., `South_America`). The underscore (`_`) is a wildcard character, so it needs to be escaped using `\`. - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; -``` - -#### **Example 5: Match the Escape Character Itself** - -- **Description:** Find all names that start with 'South\'. Since `\` is the escape character, it must be escaped using `\\`. - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; -``` - -### 10.2 regexp_like - -#### 10.2.1 Usage - -Evaluates whether the regular expression pattern is present within the given string. - -#### 10.2.2 Syntax - -```SQL -regexp_like(string, pattern); -``` - -#### 10.2.3 Notes - -- The pattern for `regexp_like` only needs to be contained within the string, and does not need to match the entire string. -- To match the entire string, use the `^` and `$` anchors. -- `^` signifies the "start of the string," and `$` signifies the "end of the string." -- Regular expressions use the Java-defined regular syntax, but there are the following exceptions to be aware of: - - Multiline mode - 1. Enabled by: `(?m)`. - 2. Recognizes only `\n` as the line terminator. - 3. Does not support the `(?d)` flag, and its use is prohibited. - - Case-insensitive matching - 1. Enabled by: `(?i)`. - 2. Based on Unicode rules, it does not support context-dependent and localized matching. - 3. Does not support the `(?u)` flag, and its use is prohibited. - - Character classes - 1. Within character classes (e.g., `[A-Z123]`), `\Q` and `\E` are not supported and are treated as literals. - - Unicode character classes (`\p{prop}`) - 1. Underscores in names: All underscores in names must be removed (e.g., `OldItalic `instead of `Old_Italic`). - 2. Scripts: Specify directly, without the need for `Is`, `script=`, or `sc=` prefixes (e.g., `\p{Hiragana}`). - 3. Blocks: Must use the `In` prefix, `block=` or `blk=` prefixes are not supported (e.g., `\p{InMongolian}`). - 4. Categories: Specify directly, without the need for `Is`, `general_category=`, or `gc=` prefixes (e.g., `\p{L}`). - 5. Binary properties: Specify directly, without `Is` (e.g., `\p{NoncharacterCodePoint}`). - -#### 10.2.4 Examples - -#### Example 1: **Matching strings containing a specific pattern** - -```SQL -SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true -``` - -- **Explanation**: Determines whether the string '1a 2b 14m' contains a substring that matches the pattern `\d+b`. - - `\d+` means "one or more digits". - - `b` represents the letter b. - - In `'1a 2b 14m'`, the substring `'2b'` matches this pattern, so it returns `true`. - - -#### **Example 2: Matching the entire string** - -```SQL -SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false -``` - -- **Explanation**: Checks if the string `'1a 2b 14m'` matches the pattern `^\\d+b$` exactly. - - `\d+` means "one or more digits". - - `b` represents the letter b. - - `'1a 2b 14m'` does not match this pattern because it does not start with digits and does not end with `b`, so it returns `false`. - -## 11. Timeseries Windowing Functions - -The sample data is as follows: - -```SQL -IoTDB> SELECT * FROM bid; -+-----------------------------+--------+-----+ -| time|stock_id|price| -+-----------------------------+--------+-----+ -|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+--------+-----+ - --- Create table statement -CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); --- Insert data -INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); -``` - -### 11.1 HOP - -#### 11.1.1 Function Description - -The HOP function segments data into overlapping time windows for analysis, assigning each row to all windows that overlap with its timestamp. If windows overlap (when SLIDE < SIZE), data will be duplicated across multiple windows. - -#### 11.1.2 Function Definition - -```SQL -HOP(data, timecol, size, slide[, origin]) -``` - -#### 11.1.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | --------------------------------- | ------------------------- | -| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | -| TIMECOL | Scalar | String (default: 'time') | Time column | -| SIZE | Scalar | Long integer | Window size | -| SLIDE | Scalar | Long integer | Sliding step | -| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | - - -#### 11.1.4 Returned Results - -The HOP function returns: - -* `window_start`: Window start time (inclusive) -* `window_end`: Window end time (exclusive) -* Pass-through columns: All input columns from DATA - -#### 11.1.5 Usage Example - -```SQL -IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree model's GROUP BY TIME when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.2 SESSION - -#### 11.2.1 Function Description - -The SESSION function groups data into sessions based on time intervals. It checks the time gap between consecutive rows—rows with gaps smaller than the threshold (GAP) are grouped into the current window, while larger gaps trigger a new window. - -#### 11.2.2 Function Definition - -```SQL -SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) -``` -#### 11.2.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | ---------------------------- | -------------------------------------- | -| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | -| TIMECOL | Scalar | String (default: 'time') | Time column name | -| GAP | Scalar | Long integer | Session gap threshold | - -#### 11.2.4 Returned Results - -The SESSION function returns: - -* `window_start`: Time of the first row in the session -* `window_end`: Time of the last row in the session -* Pass-through columns: All input columns from DATA - -#### 11.2.5 Usage Example - -```SQL -IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree model's GROUP BY SESSION when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.3 VARIATION - -#### 11.3.1 Function Description - -The VARIATION function groups data based on value differences. The first row becomes the baseline for the first window. Subsequent rows are compared to the baseline—if the difference is within the threshold (DELTA), they join the current window; otherwise, a new window starts with that row as the new baseline. - -#### 11.3.2 Function Definition - -```sql -VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) -``` - -#### 11.3.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | ---------------------------- | -------------------------------------- | -| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | -| COL | Scalar | String | Column for difference calculation | -| DELTA | Scalar | Float | Difference threshold | - -#### 11.3.4 Returned Results - -The VARIATION function returns: - -* `window_index`: Window identifier -* Pass-through columns: All input columns from DATA - -#### 11.3.5 Usage Example - -```sql -IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- Equivalent to tree model's GROUP BY VARIATION when combined with GROUP BY -IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.4 CAPACITY - -#### 11.4.1 Function Description - -The CAPACITY function groups data into fixed-size windows, where each window contains up to SIZE rows. - -#### 11.4.2 Function Definition - -```sql -CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) -``` - -#### 11.4.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | ---------------------------- | -------------------------------------- | -| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | -| SIZE | Scalar | Long integer | Window size (row count) | - -#### 11.4.4 Returned Results - -The CAPACITY function returns: - -* `window_index`: Window identifier -* Pass-through columns: All input columns from DATA - -#### 11.4.5 Usage Example - -```sql -IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- Equivalent to tree model's GROUP BY COUNT when combined with GROUP BY -IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| start_time| end_time|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| -|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.5 TUMBLE - -#### 11.5.1 Function Description - -The TUMBLE function assigns each row to a non-overlapping, fixed-size time window based on a timestamp attribute. - -#### 11.5.2 Function Definition - -```sql -TUMBLE(data, timecol, size[, origin]) -``` -#### 11.5.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | --------------------------------- | ------------------------- | -| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | -| TIMECOL | Scalar | String (default: 'time') | Time column | -| SIZE | Scalar | Long integer (positive) | Window size | -| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | - -#### 11.5.4 Returned Results - -The TUMBLE function returns: - -* `window_start`: Window start time (inclusive) -* `window_end`: Window end time (exclusive) -* Pass-through columns: All input columns from DATA - -#### 11.5.5 Usage Example - -```SQL -IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree model's GROUP BY TIME when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.6 CUMULATE - -#### 11.6.1 Function Description - -The CUMULATE function creates expanding windows from an initial window, maintaining the same start time while incrementally extending the end time by STEP until reaching SIZE. Each window contains all elements within its range. For example, with a 1-hour STEP and 24-hour SIZE, daily windows would be: `[00:00, 01:00)`, `[00:00, 02:00)`, ..., `[00:00, 24:00)`. - -#### 11.6.2 Function Definition - -```sql -CUMULATE(data, timecol, size, step[, origin]) -``` - -#### 11.6.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | --------------------------------- | --------------------------------------------------- | -| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | -| TIMECOL | Scalar | String (default: 'time') | Time column | -| SIZE | Scalar | Long integer (positive) | Window size (must be an integer multiple of STEP) | -| STEP | Scalar | Long integer (positive) | Expansion step | -| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | - -> Note: An error `Cumulative table function requires size must be an integral multiple of step` occurs if SIZE is not divisible by STEP. - -#### 11.6.4 Returned Results - -The CUMULATE function returns: - -* `window_start`: Window start time (inclusive) -* `window_end`: Window end time (exclusive) -* Pass-through columns: All input columns from DATA - -#### 11.6.5 Usage Example - -```sql -IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree model's GROUP BY TIME when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/Basis-Function_apache.md b/src/UserGuide/Master/Table/SQL-Manual/Basis-Function_apache.md new file mode 100644 index 000000000..c6e11f5c9 --- /dev/null +++ b/src/UserGuide/Master/Table/SQL-Manual/Basis-Function_apache.md @@ -0,0 +1,2036 @@ + + +# Basic Functions + +## 1. Comparison Functions and Operators + +### 1.1 Basic Comparison Operators + +Comparison operators are used to compare two values and return the comparison result (`true` or `false`). + +| Operators | Description | +| :-------- | :----------------------- | +| < | Less than | +| > | Greater than | +| <= | Less than or equal to | +| >= | Greater than or equal to | +| = | Equal to | +| <> | Not equal to | +| != | Not equal to | + +#### 1.1.1 Comparison rules: + +1. All types can be compared with themselves. +2. Numeric types (INT32, INT64, FLOAT, DOUBLE, TIMESTAMP) can be compared with each other. +3. Character types (STRING, TEXT) can also be compared with each other. +4. Comparisons between types other than those mentioned above will result in an error. + +### 1.2 BETWEEN Operator + +1. The `BETWEEN `operator is used to determine whether a value falls within a specified range. +2. The `NOT BETWEEN` operator is used to determine whether a value does not fall within a specified range. +3. The `BETWEEN` and `NOT BETWEEN` operators can be used to evaluate any sortable type. +4. The value, minimum, and maximum parameters for `BETWEEN` and `NOT BETWEEN` must be of the same type, otherwise an error will occur. + +Syntax: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +Example 1 :BETWEEN + +```SQL +-- Query records where temperature is between 85.0 and 90.0 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +Example 2 : NOT BETWEEN + +``` +-- Query records where humidity is not between 35.0 and 40.0 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL Operator + +1. These operators apply to all data types. + +Example 1: Query records where temperature is NULL + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +Example 2: Query records where humidity is not NULL + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN Operator + +1. The `IN` operator can be used in the `WHERE `clause to compare a column with a list of values. +2. These values can be provided by a static array or scalar expressions. + +Syntax: + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +Example 1: Static array: Query records where region is 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region IN ('Beijing', 'Shanghai'); +--Equivalent to +SELECT * FROM region WHERE name = 'Beijing' OR name = 'Shanghai'; +``` + +Example 2: Scalar expression: Query records where temperature is among specific values + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +Example 3: Query records where region is not 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('Beijing', 'Shanghai'); +``` + +### 1.5 GREATEST and LEAST + +The `GREATEST` function returns the maximum value from a list of arguments, while the `LEAST` function returns the minimum value. The return type matches the input data type. + +Key Behaviors: +1. NULL Handling: Returns NULL if all arguments are NULL. +2. Parameter Requirements: Requires at least 2 arguments. +3. Type Constraints: All arguments must have the same data type. +4. Supported Types: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**Syntax:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**Examples:** + +```sql +-- Retrieve the maximum value between `temperature` and `humidity` in `table2` +SELECT GREATEST(temperature,humidity) FROM table2; + +-- Retrieve the minimum value between `temperature` and `humidity` in `table2` +SELECT LEAST(temperature,humidity) FROM table2; +``` + +## 2. Aggregate functions + +### 2.1 Overview + +1. Aggregate functions are many-to-one functions. They perform aggregate calculations on a set of values to obtain a single aggregate result. + +2. Except for `COUNT()`, all other aggregate functions ignore null values and return null when there are no input rows or all values are null. For example, `SUM()` returns null instead of zero, and `AVG()` does not include null values in the count. + +### 2.2 Supported Aggregate Functions + +| Function Name | Description | Allowed Input Types | Output Type | +|:-----------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------| +| COUNT | Counts the number of data points. | All types | INT64 | +| COUNT_IF | COUNT_IF(exp) counts the number of rows that satisfy a specified boolean expression. | `exp` must be a boolean expression,(e.g. `count_if(temperature>20)`) | INT64 | +| APPROX_COUNT_DISTINCT | The APPROX_COUNT_DISTINCT(x[, maxStandardError]) function provides an approximation of COUNT(DISTINCT x), returning the estimated number of distinct input values. | `x`: The target column to be calculated, supports all data types.
`maxStandardError` (optional): Specifies the maximum standard error allowed for the function's result. Valid range is [0.0040625, 0.26]. Defaults to 0.023 if not specified. | INT64 | +| APPROX_MOST_FREQUENT | The APPROX_MOST_FREQUENT(x, k, capacity) function is used to approximately calculate the top k most frequent elements in a dataset. It returns a JSON-formatted string where the keys are the element values and the values are their corresponding approximate frequencies. (Available since V2.0.5.1) | `x` : The column to be calculated, supporting all existing data types in IoTDB;
`k`: The number of top-k most frequent values to return;
`capacity`: The number of buckets used for computation, which relates to memory usage—a larger value reduces error but consumes more memory, while a smaller value increases error but uses less memory. | STRING | +| SUM | Calculates the sum. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | Calculates the average. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | Finds the maximum value. | All types | Same as input type | +| MIN | Finds the minimum value. | All types | Same as input type | +| FIRST | Finds the value with the smallest timestamp that is not NULL. | All types | Same as input type | +| LAST | Finds the value with the largest timestamp that is not NULL. | All types | Same as input type | +| STDDEV | Alias for STDDEV_SAMP, calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | Calculates the population standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | Calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | Alias for VAR_SAMP, calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | Calculates the population variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | Calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | Finds the value with the largest absolute value. If the largest absolute values of positive and negative values are equal, returns the positive value. | INT32 INT64 FLOAT DOUBLE | Same as input type | +| MODE | Finds the mode. Note: 1. There is a risk of memory exception when the number of distinct values in the input sequence is too large; 2. If all elements have the same frequency, i.e., there is no mode, a random element is returned; 3. If there are multiple modes, a random mode is returned; 4. NULL values are also counted in frequency, so even if not all values in the input sequence are NULL, the final result may still be NULL. | All types | Same as input type | +| MAX_BY | MAX_BY(x, y) finds the value of x corresponding to the maximum y in the binary input x and y. MAX_BY(time, x) returns the timestamp when x is at its maximum. | x and y can be of any type | Same as the data type of the first input x | +| MIN_BY | MIN_BY(x, y) finds the value of x corresponding to the minimum y in the binary input x and y. MIN_BY(time, x) returns the timestamp when x is at its minimum. | x and y can be of any type | Same as the data type of the first input x | +| FIRST_BY | FIRST_BY(x, y) finds the value of x in the same row when y is the first non-null value. | x and y can be of any type | Same as the data type of the first input x | +| LAST_BY | LAST_BY(x, y) finds the value of x in the same row when y is the last non-null value. | x and y can be of any type | Same as the data type of the first input x | + + +### 2.3 Examples + +#### 2.3.1 Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### 2.3.2 Count + +Counts the number of rows in the entire table and the number of non-null values in the `temperature` column. + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +The execution result is as follows: + +> Note: Only the COUNT function can be used with *, otherwise an error will occur. + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +Count `Non-Null` `arrival_time` Records in `table2` + +```sql +select count_if(arrival_time is not null) from table2; +``` + +The execution result is as follows: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +Retrieve the number of distinct values in the `temperature` column from `table1`. + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +The execution result is as follows: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +Query the ​​top 2 most frequent values​​ in the `temperature` column of `table1`. + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +The execution result is as follows: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +Finds the values with the smallest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +Finds the values with the largest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +Finds the `time` value of the row with the smallest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the smallest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +Queries the `time` value of the row with the largest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the largest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +Queries the `time` value of the row where the `temperature` column is at its maximum, and the `humidity` value of the row where the `temperature` column is at its maximum. + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +Queries the `time` value of the row where the `temperature` column is at its minimum, and the `humidity` value of the row where the `temperature` column is at its minimum. + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. Logical operators + +### 3.1 Overview + +Logical operators are used to combine conditions or negate conditions, returning a Boolean result (`true` or `false`). + +Below are the commonly used logical operators along with their descriptions: + +| Operator | Description | Example | +| :------- | :-------------------------------- | :------ | +| AND | True only if both values are true | a AND b | +| OR | True if either value is true | a OR b | +| NOT | True when the value is false | NOT a | + +### 3.2 Impact of NULL on Logical Operators + +#### 3.2.1 AND Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `AND` operator is `FALSE`, the expression result is `FALSE`. + +Examples: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `OR` operator is `TRUE`, the expression result is `TRUE`. + +Examples: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 Truth Table + +The following truth table illustrates how `NULL` is handled in `AND` and `OR` operators: + +| a | b | a AND b | a OR b | +| :---- | :---- | :------ | :----- | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT Operator + +The logical negation of `NULL` remains `NULL`. + +Example: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1 Truth Table + +The following truth table illustrates how `NULL` is handled in the `NOT` operator: + +| a | NOT a | +| :---- | :---- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + +## 4. Date and Time Functions and Operators + +### 4.1 now() -> Timestamp + +Returns the current timestamp. + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +The `date_bin` function is used for handling time data by rounding a timestamp (`Timestamp`) to the boundary of a specified time interval (`interval`). + +#### **Syntax:** + +```SQL +-- Calculates the time interval starting from timestamp 0 and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source) + +-- Calculates the time interval starting from the origin timestamp and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source,origin) + +--Supported time units for interval: +--Years (y), months (mo), weeks (week), days (d), hours (h), minutes (M), seconds (s), milliseconds (ms), microseconds (µs), nanoseconds (ns). +--source: Must be of timestamp type. +``` + +#### **Parameters**: + +| Parameter | Description | +| :-------- | :----------------------------------------------------------- | +| interval | 1. Time interval 2. Supported units: `y`, `mo`, `week`, `d`, `h`, `M`, `s`, `ms`, `µs`, `ns`. | +| source | 1. The timestamp column or expression to be calculated. 2. Must be of timestamp type. | +| origin | The reference timestamp. | + +#### 4.2.1Syntax Rules : + +1. If `origin` is not specified, the default reference timestamp is `1970-01-01T00:00:00Z` (Beijing time: `1970-01-01 08:00:00`). +2. `interval` must be a non-negative number with a time unit. If `interval` is `0ms`, the function returns `source` directly without calculation. +3. If `origin` or `source` is negative, it represents a time point before the epoch. `date_bin` will calculate and return the relevant time period. +4. If `source` is `null`, the function returns `null`. +5. Mixing months and non-month time units (e.g., `1 MONTH 1 DAY`) is not supported due to ambiguity. + +> For example, if the starting point is **April 30, 2000**, calculating `1 DAY` first and then `1 MONTH` results in **June 1, 2000**, whereas calculating `1 MONTH` first and then `1 DAY` results in **May 31, 2000**. The resulting dates are different. + +#### 4.2.2 Examples + +##### Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### Example 1: Without Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +#### Example 2: Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +#### Example 3: Negative Origin + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +#### Example 4: Interval of 0 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +#### Example 5: Source is NULL + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract Function + +This function is used to extract the value of a specific part of a date. (Supported from version V2.0.6) + +#### 4.3.1 Syntax Definition + +```SQL +EXTRACT (identifier FROM expression) +``` + +* Parameter Description + * **expression**: `TIMESTAMP` type or a time constant + * **identifier**: The valid ranges and corresponding return value types are shown in the table below. + + | Valid Range | Return Type | Return Range | + |----------------------|---------------|--------------------| + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 Usage Example + +Using table1 from the [Sample Data](../Reference/Sample-Data.md) as the source data, query the average temperature for the first 12 hours of each day within a certain period. + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +Introduction to the `Format` function: [Format Function](../SQL-Manual/Basis-Function_apache.md#_7-2-format-function) + +Introduction to the `Date_bin` function: [Date_bin Funtion](../SQL-Manual/Basis-Function_apache.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. Mathematical Functions and Operators + +### 5.1 Mathematical Operators + +| **Operator** | **Description** | +| :----------- | :---------------------------------------------- | +| + | Addition | +| - | Subtraction | +| * | Multiplication | +| / | Division (integer division performs truncation) | +| % | Modulus (remainder) | +| - | Negation | + +### 5.2 Mathematical functions + +| Function Name | Description | Input | Output | Usage | +|:--------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------|:-------------------| :--------- | +| sin | Sine | double, float, INT64, INT32 | double | sin(x) | +| cos | Cosine | double, float, INT64, INT32 | double | cos(x) | +| tan | Tangent | double, float, INT64, INT32 | double | tan(x) | +| asin | Inverse Sine | double, float, INT64, INT32 | double | asin(x) | +| acos | Inverse Cosine | double, float, INT64, INT32 | double | acos(x) | +| atan | Inverse Tangent | double, float, INT64, INT32 | double | atan(x) | +| sinh | Hyperbolic Sine | double, float, INT64, INT32 | double | sinh(x) | +| cosh | Hyperbolic Cosine | double, float, INT64, INT32 | double | cosh(x) | +| tanh | Hyperbolic Tangent | double, float, INT64, INT32 | double | tanh(x) | +| degrees | Converts angle `x` in radians to degrees | double, float, INT64, INT32 | double | degrees(x) | +| radians | Radian Conversion from Degrees | double, float, INT64, INT32 | double | radians(x) | +| abs | Absolute Value | double, float, INT64, INT32 | Same as input type | abs(x) | +| sign | Returns the sign of `x`: - If `x = 0`, returns `0` - If `x > 0`, returns `1` - If `x < 0`, returns `-1` For `double/float` inputs: - If `x = NaN`, returns `NaN` - If `x = +Infinity`, returns `1.0` - If `x = -Infinity`, returns `-1.0` | double, float, INT64, INT32 | Same as input type | sign(x) | +| ceil | Rounds `x` up to the nearest integer | double, float, INT64, INT32 | double | ceil(x) | +| floor | Rounds `x` down to the nearest integer | double, float, INT64, INT32 | double | floor(x) | +| exp | Returns `e^x` (Euler's number raised to the power of `x`) | double, float, INT64, INT32 | double | exp(x) | +| ln | Returns the natural logarithm of `x` | double, float, INT64, INT32 | double | ln(x) | +| log10 | Returns the base 10 logarithm of `x` | double, float, INT64, INT32 | double | log10(x) | +| round | Rounds `x` to the nearest integer | double, float, INT64, INT32 | double | round(x) | +| round | Rounds `x` to `d` decimal places | double, float, INT64, INT32 | double | round(x, d) | +| sqrt | Returns the square root of `x`. | double, float, INT64, INT32 | double | sqrt(x) | +| e | Returns Euler’s number `e`. | | double | e() | +| pi | Pi (π) | | double | pi() | + +## 6. Bitwise Functions + +> Supported from version V2.0.6 + +Example raw data is as follows: + +``` +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +-- Table creation statement +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +-- Write data +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +The `bit_count(num, bits)`function is used to count the number of 1s in the binary representation of the integer `num`under the specified bit width `bits`. + +#### 6.1.1 Syntax Definition + +``` +bit_count(num, bits) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * **​num:​**​ Any integer value (int32 or int64) + * **​bits:​**​ Integer value, with a valid range of 2\~64 + +Note: An error will be raised if the number of `bits`is insufficient to represent `num`(using ​**two's complement signed representation**​): `Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* Usage Methods + + * Two specific numbers: `bit_count(9, 64)` + * Column and a number: `bit_count(column1, 64)` + * Between two columns: `bit_count(column1, column2)` + +#### 6.1.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- Two specific numbers +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +-- Column and a number +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +-- Insufficient bits +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +The `bitwise_and(x, y)`function performs a logical AND operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise AND operation result. + +#### 6.2.1 Syntax Definition + +``` +bitwise_and(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_and(19, 25)` + * Column and a number: `bitwise_and(column1, 25)` + * Between two columns: `bitwise_and(column1, column2)` + +#### 6.2.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +The `bitwise_not(x)`function performs a logical NOT operation on each bit of the integer x based on its two's complement representation, and returns the bitwise NOT operation result. + +#### 6.3.1 Syntax Definition + +``` +bitwise_not(x) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x**​: Must be an integer value of data type Int32 or Int64 +* Usage Methods + + * Specific number: `bitwise_not(5)` + * Single column operation: `bitwise_not(column1)` + +#### 6.3.2 Usage Examples + +``` +-- Specific number +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- Single column +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +The `bitwise_or(x,y)`function performs a logical OR operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise OR operation result. + +#### 6.4.1 Syntax Definition + +``` +bitwise_or(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_or(19, 25)` + * Column and a number: `bitwise_or(column1, 25)` + * Between two columns: `bitwise_or(column1, column2)` + +#### 6.4.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +The `bitwise_xor(x,y)`function performs a logical XOR (exclusive OR) operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise XOR operation result. XOR rule: same bits result in 0, different bits result in 1. + +#### 6.5.1 Syntax Definition + +``` +bitwise_xor(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_xor(19, 25)` + * Column and a number: `bitwise_xor(column1, 25)` + * Between two columns: `bitwise_xor(column1, column2)` + +#### 6.5.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +The `bitwise_left_shift(value, shift)`function returns the result of shifting the binary representation of integer `value`left by `shift`bits. The left shift operation moves bits towards the higher-order direction, filling the vacated lower-order bits with 0s, and discarding the higher-order bits that overflow. Equivalent to: `value << shift`. + +#### 6.6.1 Syntax Definition + +``` +bitwise_left_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift left. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_left_shift(1, 2)` + * Column and a number: `bitwise_left_shift(column1, 2)` + * Between two columns: `bitwise_left_shift(column1, column2)` + +#### 6.6.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +The `bitwise_right_shift(value, shift)`function returns the result of logically (unsigned) right shifting the binary representation of integer `value`by `shift`bits. The logical right shift operation moves bits towards the lower-order direction, filling the vacated higher-order bits with 0s, and discarding the lower-order bits that overflow. + +#### 6.7.1 Syntax Definition + +``` +bitwise_right_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_right_shift(8, 3)` + * Column and a number: `bitwise_right_shift(column1, 3)` + * Between two columns: `bitwise_right_shift(column1, column2)` + +#### 6.7.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +The `bitwise_right_shift_arithmetic(value, shift)`function returns the result of arithmetically right shifting the binary representation of integer `value`by `shift`bits. The arithmetic right shift operation moves bits towards the lower-order direction, discarding the lower-order bits that overflow, and filling the vacated higher-order bits with the sign bit (0 for positive numbers, 1 for negative numbers) to preserve the sign of the number. + +#### 6.8.1 Syntax Definition + +``` +bitwise_right_shift_arithmetic(value, shift) -> [same as value]-- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods: + + * Two specific numbers: `bitwise_right_shift_arithmetic(12, 2)` + * Column and a number: `bitwise_right_shift_arithmetic(column1, 64)` + * Between two columns: `bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + + +## 7. Conditional Expressions + +### 7.1 CASE + +CASE expressions come in two forms: **Simple CASE** and **Searched CASE**. + +#### 7.1.1 Simple CASE + +The simple form evaluates each value expression from left to right until it finds a match with the given expression: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If a matching value is found, the corresponding result is returned. If no match is found, the result from the `ELSE` clause (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 Searched CASE + +The searched form evaluates each Boolean condition from left to right until a `TRUE` condition is found, then returns the corresponding result: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If no condition evaluates to `TRUE`, the `ELSE` clause result (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE + +Returns the first non-null value from the given list of parameters. + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. Conversion Functions + +### 8.1 Conversion Functions + +#### 8.1.1 cast(value AS type) → type + +Explicitly converts a value to the specified type. This can be used to convert strings (`VARCHAR`) to numeric types or numeric values to string types. Starting from V2.0.8-beta, OBJECT type can be explicitly cast to STRING type. + +If the conversion fails, a runtime error is thrown. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +Similar to `CAST()`. If the conversion fails, returns `NULL` instead of throwing an error. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format Function + +This function generates and returns a formatted string based on a specified format string and input arguments. Similar to Java’s `String.format` or C’s `printf`, it allows developers to construct dynamic string templates using placeholder syntax. Predefined format specifiers in the template are replaced precisely with corresponding argument values, producing a complete string that adheres to specific formatting requirements. + +#### 8.2.1 Syntax + +```SQL +format(pattern, ...args) -> STRING +``` + +**Parameters** + +* `pattern`: A format string containing static text and one or more format specifiers (e.g., `%s`, `%d`), or any expression returning a `STRING`/`TEXT` type. +* `args`: Input arguments to replace format specifiers. Constraints: + * Number of arguments ≥ 1. + * Multiple arguments must be comma-separated (e.g., `arg1, arg2`). + * Total arguments can exceed the number of specifiers in `pattern` but cannot be fewer, otherwise an exception is triggered. + +**Return Value** + +* Formatted result string of type `STRING`. + +#### 8.2.2 Usage Examples + +1. Format Floating-Point Numbers + ```SQL + IoTDB:database1> SELECT format('%.5f', humidity) FROM table1 WHERE humidity = 35.4; + +--------+ + | _col0| + +--------+ + |35.40000| + +--------+ + ``` +2. Format Integers + ```SQL + IoTDB:database1> SELECT format('%03d', 8) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + | 008| + +-----+ + ``` +3. Format Dates and Timestamps + +* Locale-Specific Date + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) FROM table1 LIMIT 1; ++--------------------+ +| _col0| ++--------------------+ +|Monday, January 1, 2024| ++--------------------+ +``` + +* Remove Timezone Information + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* Second-Level Timestamp Precision + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* Date/Time Format Symbols + +| **Symbol** | **​ Description** | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'H' | 24-hour format (two digits, zero-padded), i.e. 00 - 23 | +| 'I' | 12-hour format (two digits, zero-padded), i.e. 01 - 12 | +| 'k' | 24-hour format (no padding), i.e. 0 - 23 | +| 'l' | 12-hour format (no padding), i.e. 1 - 12 | +| 'M' | Minute (two digits, zero-padded), i.e. 00 - 59 | +| 'S' | Second (two digits, zero-padded; supports leap seconds), i.e. 00 - 60 | +| 'L' | Millisecond (three digits, zero-padded), i.e. 000 - 999 | +| 'N' | Nanosecond (nine digits, zero-padded), i.e. 000000000 - 999999999。 | +| 'p' | Locale-specific lowercase AM/PM marker (e.g., "am", "pm"). Prefix with`T`to force uppercase (e.g., "AM"). | +| 'z' | RFC 822 timezone offset from GMT (e.g.,`-0800`). Adjusts for daylight saving. Uses the JVM's default timezone for`long`/`Long`/`Date`. | +| 'Z' | Timezone abbreviation (e.g., "PST"). Adjusts for daylight saving. Uses the JVM's default timezone; Formatter's timezone overrides the argument's timezone if specified. | +| 's' | Seconds since Unix epoch (1970-01-01 00:00:00 UTC), i.e. Long.MIN\_VALUE/1000 to Long.MAX\_VALUE/1000。 | +| 'Q' | Milliseconds since Unix epoch, i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* Common Date/Time Conversion Characters + +| **Symbol** | **​ Description** | +| ---------------- | -------------------------------------------------------------------- | +| 'B' | Locale-specific full month name, for example "January", "February" | +| 'b' | Locale-specific abbreviated month name, for example "Jan", "Feb" | +| 'h' | Same as`b` | +| 'A' | Locale-specific full weekday name, for example "Sunday", "Monday" | +| 'a' | Locale-specific short weekday name, for example "Sun", "Mon" | +| 'C' | Year divided by 100 (two digits, zero-padded) | +| 'Y' | Year (minimum 4 digits, zero-padded) | +| 'y' | Last two digits of year (zero-padded) | +| 'j' | Day of year (three digits, zero-padded) | +| 'm' | Month (two digits, zero-padded) | +| 'd' | Day of month (two digits, zero-padded) | +| 'e' | Day of month (no padding) | + +4. Format Strings + ```SQL + IoTDB:database1> SELECT format('The measurement status is: %s', status) FROM table2 LIMIT 1; + +-------------------------------+ + | _col0| + +-------------------------------+ + |The measurement status is: true| + +-------------------------------+ + ``` +5. Format Percentage Sign + ```SQL + IoTDB:database1> SELECT format('%s%%', 99.9) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + |99.9%| + +-----+ + ``` + +#### 8.2.3 Format Conversion Failure Scenarios + +1. Type Mismatch Errors + +* Timestamp Type Conflict + + If the format specifier includes time-related tokens (e.g., `%Y-%m-%d`) but the argument: + + * Is a non-`DATE`/`TIMESTAMP` type value. ◦ + * Requires sub-day precision (e.g., `%H`, `%M`) but the argument is not `TIMESTAMP`. + +```SQL +-- Example 1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- Example 2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* Floating-Point Type Conflict + + Using `%f` with non-numeric arguments (e.g., strings or booleans): + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. Argument Count Mismatch + The number of arguments must equal or exceed the number of format specifiers. + + ```SQL + IoTDB:database1> SELECT format('%.5f %03d', humidity) FROM table1 WHERE humidity = 35.4; + Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') + ``` +3. Invalid Invocation Errors + + Triggered if: + + * Total arguments < 2 (must include `pattern` and at least one argument).• + * `pattern` is not of type `STRING`/`TEXT`. + +```SQL +-- Example 1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--Example 2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + +## 9. String Functions and Operators + +### 9.1 String operators + +#### 9.1.1 || Operator + +The `||` operator is used for string concatenation and functions the same as the `concat` function. + +#### 9.1.2 LIKE Statement + + The `LIKE` statement is used for pattern matching. For detailed usage, refer to Pattern Matching:[LIKE](#1-like-运算符). + +### 9.2 String Functions + +| Function Name | Description | Input | Output | Usage | +| :------------ |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------| :------ | :----------------------------------------------------------- | +| `length` | Returns the number of characters in a string (not byte length). | `string` (the string whose length is to be calculated) | INT32 | length(string) | +| `upper` | Converts all letters in a string to uppercase. | string | String | upper(string) | +| `lower` | Converts all letters in a string to lowercase. | string | String | lower(string) | +| `trim` | Removes specified leading and/or trailing characters from a string. **Parameters:** - `specification` (optional): Specifies which side to trim: - `BOTH`: Removes characters from both sides (default). - `LEADING`: Removes characters from the beginning. - `TRAILING`: Removes characters from the end. - `trimcharacter` (optional): Character to be removed (default is whitespace). - `string`: The target string. | string | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) Example:`trim('!' FROM '!foo!');` —— `'foo'` | +| `strpos` | Returns the position of the first occurrence of `subStr` in `sourceStr`. **Notes:** - Position starts at `1`. - Returns `0` if `subStr` is not found. - Positioning is based on characters, not byte arrays. | `sourceStr` (string to be searched), `subStr` (substring to find) | INT32 | strpos(sourceStr, subStr) | +| `starts_with` | Checks if `sourceStr` starts with the specified `prefix`. | `sourceStr`, `prefix` | Boolean | starts_with(sourceStr, prefix) | +| `ends_with` | Checks if `sourceStr` ends with the specified `suffix`. | `sourceStr`, `suffix` | Boolean | ends_with(sourceStr, suffix) | +| `concat` | Concatenates `string1, string2, ..., stringN`. Equivalent to the `\|\|` operator. | `string`, `text` | String | concat(str1, str2, ...) or str1 \|\| str2 ... | +| `strcmp` | Compares two strings lexicographically. **Returns:** - `-1` if `str1 < str2` - `0` if `str1 = str2` - `1` if `str1 > str2` - `NULL` if either `str1` or `str2` is `NULL` | `string1`, `string2` | INT32 | strcmp(str1, str2) | +| `replace` | Removes all occurrences of `search` in `string`. | `string`, `search` | String | replace(string, search) | +| `replace` | Replaces all occurrences of `search` in `string` with `replace`. | `string`, `search`, `replace` | String | replace(string, search, replace) | +| `substring` | Extracts a substring from `start_index` to the end of the string. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. | `string`, `start_index` | String | substring(string from start_index)or substring(string, start_index) | +| `substring` | Extracts a substring of `length` characters starting from `start_index`. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. - Throws an error if `length` is negative. - If `start_index + length` exceeds `int.MAX`, an overflow error may occur. | `string`, `start_index`, `length` | String | substring(string from start_index for length) or substring(string, start_index, length) | + +## 10. Pattern Matching Functions + +### 10.1 LIKE + +#### 10.1.1 Usage + +The `LIKE `operator is used to compare a value with a pattern. It is commonly used in the `WHERE `clause to match specific patterns within strings. + +#### 10.1.2 Syntax + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 Match rules + +- Matching characters is case-sensitive +- The pattern supports two wildcard characters: + - `_` matches any single character + - `%` matches zero or more characters + +#### 10.1.4 Notes + +- `LIKE` pattern matching applies to the entire string by default. Therefore, if it's desired to match a sequence anywhere within a string, the pattern must start and end with a percent sign. +- To match the escape character itself, double it (e.g., `\\` to match `\`). For example, you can use `\\` to match for `\`. + +#### 10.1.5 Examples + +#### **Example 1: Match Strings Starting with a Specific Character** + +- **Description:** Find all names that start with the letter `E` (e.g., `Europe`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +#### **Example 2: Exclude a Specific Pattern** + +- **Description:** Find all names that do **not** start with the letter `E`. + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +#### **Example 3: Match Strings of a Specific Length** + +- **Description:** Find all names that start with `A`, end with `a`, and have exactly two characters in between (e.g., `Asia`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +#### **Example 4: Escape Special Characters** + +- **Description:** Find all names that start with `South_` (e.g., `South_America`). The underscore (`_`) is a wildcard character, so it needs to be escaped using `\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +#### **Example 5: Match the Escape Character Itself** + +- **Description:** Find all names that start with 'South\'. Since `\` is the escape character, it must be escaped using `\\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like + +#### 10.2.1 Usage + +Evaluates whether the regular expression pattern is present within the given string. + +#### 10.2.2 Syntax + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 Notes + +- The pattern for `regexp_like` only needs to be contained within the string, and does not need to match the entire string. +- To match the entire string, use the `^` and `$` anchors. +- `^` signifies the "start of the string," and `$` signifies the "end of the string." +- Regular expressions use the Java-defined regular syntax, but there are the following exceptions to be aware of: + - Multiline mode + 1. Enabled by: `(?m)`. + 2. Recognizes only `\n` as the line terminator. + 3. Does not support the `(?d)` flag, and its use is prohibited. + - Case-insensitive matching + 1. Enabled by: `(?i)`. + 2. Based on Unicode rules, it does not support context-dependent and localized matching. + 3. Does not support the `(?u)` flag, and its use is prohibited. + - Character classes + 1. Within character classes (e.g., `[A-Z123]`), `\Q` and `\E` are not supported and are treated as literals. + - Unicode character classes (`\p{prop}`) + 1. Underscores in names: All underscores in names must be removed (e.g., `OldItalic `instead of `Old_Italic`). + 2. Scripts: Specify directly, without the need for `Is`, `script=`, or `sc=` prefixes (e.g., `\p{Hiragana}`). + 3. Blocks: Must use the `In` prefix, `block=` or `blk=` prefixes are not supported (e.g., `\p{InMongolian}`). + 4. Categories: Specify directly, without the need for `Is`, `general_category=`, or `gc=` prefixes (e.g., `\p{L}`). + 5. Binary properties: Specify directly, without `Is` (e.g., `\p{NoncharacterCodePoint}`). + +#### 10.2.4 Examples + +#### Example 1: **Matching strings containing a specific pattern** + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **Explanation**: Determines whether the string '1a 2b 14m' contains a substring that matches the pattern `\d+b`. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - In `'1a 2b 14m'`, the substring `'2b'` matches this pattern, so it returns `true`. + + +#### **Example 2: Matching the entire string** + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **Explanation**: Checks if the string `'1a 2b 14m'` matches the pattern `^\\d+b$` exactly. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - `'1a 2b 14m'` does not match this pattern because it does not start with digits and does not end with `b`, so it returns `false`. + +## 11. Timeseries Windowing Functions + +The sample data is as follows: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- Create table statement +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- Insert data +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 Function Description + +The HOP function segments data into overlapping time windows for analysis, assigning each row to all windows that overlap with its timestamp. If windows overlap (when SLIDE < SIZE), data will be duplicated across multiple windows. + +#### 11.1.2 Function Definition + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer | Window size | +| SLIDE | Scalar | Long integer | Sliding step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + + +#### 11.1.4 Returned Results + +The HOP function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.1.5 Usage Example + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 Function Description + +The SESSION function groups data into sessions based on time intervals. It checks the time gap between consecutive rows—rows with gaps smaller than the threshold (GAP) are grouped into the current window, while larger gaps trigger a new window. + +#### 11.2.2 Function Definition + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| TIMECOL | Scalar | String (default: 'time') | Time column name | +| GAP | Scalar | Long integer | Session gap threshold | + +#### 11.2.4 Returned Results + +The SESSION function returns: + +* `window_start`: Time of the first row in the session +* `window_end`: Time of the last row in the session +* Pass-through columns: All input columns from DATA + +#### 11.2.5 Usage Example + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY SESSION when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 Function Description + +The VARIATION function groups data based on value differences. The first row becomes the baseline for the first window. Subsequent rows are compared to the baseline—if the difference is within the threshold (DELTA), they join the current window; otherwise, a new window starts with that row as the new baseline. + +#### 11.3.2 Function Definition + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| COL | Scalar | String | Column for difference calculation | +| DELTA | Scalar | Float | Difference threshold | + +#### 11.3.4 Returned Results + +The VARIATION function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.3.5 Usage Example + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY VARIATION when combined with GROUP BY +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 Function Description + +The CAPACITY function groups data into fixed-size windows, where each window contains up to SIZE rows. + +#### 11.4.2 Function Definition + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| SIZE | Scalar | Long integer | Window size (row count) | + +#### 11.4.4 Returned Results + +The CAPACITY function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.4.5 Usage Example + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY COUNT when combined with GROUP BY +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 Function Description + +The TUMBLE function assigns each row to a non-overlapping, fixed-size time window based on a timestamp attribute. + +#### 11.5.2 Function Definition + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +#### 11.5.4 Returned Results + +The TUMBLE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.5.5 Usage Example + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 Function Description + +The CUMULATE function creates expanding windows from an initial window, maintaining the same start time while incrementally extending the end time by STEP until reaching SIZE. Each window contains all elements within its range. For example, with a 1-hour STEP and 24-hour SIZE, daily windows would be: `[00:00, 01:00)`, `[00:00, 02:00)`, ..., `[00:00, 24:00)`. + +#### 11.6.2 Function Definition + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | --------------------------------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size (must be an integer multiple of STEP) | +| STEP | Scalar | Long integer (positive) | Expansion step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +> Note: An error `Cumulative table function requires size must be an integral multiple of step` occurs if SIZE is not divisible by STEP. + +#### 11.6.4 Returned Results + +The CUMULATE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.6.5 Usage Example + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/Basis-Function_timecho.md b/src/UserGuide/Master/Table/SQL-Manual/Basis-Function_timecho.md new file mode 100644 index 000000000..7f354dc89 --- /dev/null +++ b/src/UserGuide/Master/Table/SQL-Manual/Basis-Function_timecho.md @@ -0,0 +1,2037 @@ + + + +# Basic Functions + +## 1. Comparison Functions and Operators + +### 1.1 Basic Comparison Operators + +Comparison operators are used to compare two values and return the comparison result (`true` or `false`). + +| Operators | Description | +| :-------- | :----------------------- | +| < | Less than | +| > | Greater than | +| <= | Less than or equal to | +| >= | Greater than or equal to | +| = | Equal to | +| <> | Not equal to | +| != | Not equal to | + +#### 1.1.1 Comparison rules: + +1. All types can be compared with themselves. +2. Numeric types (INT32, INT64, FLOAT, DOUBLE, TIMESTAMP) can be compared with each other. +3. Character types (STRING, TEXT) can also be compared with each other. +4. Comparisons between types other than those mentioned above will result in an error. + +### 1.2 BETWEEN Operator + +1. The `BETWEEN `operator is used to determine whether a value falls within a specified range. +2. The `NOT BETWEEN` operator is used to determine whether a value does not fall within a specified range. +3. The `BETWEEN` and `NOT BETWEEN` operators can be used to evaluate any sortable type. +4. The value, minimum, and maximum parameters for `BETWEEN` and `NOT BETWEEN` must be of the same type, otherwise an error will occur. + +Syntax: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +Example 1 :BETWEEN + +```SQL +-- Query records where temperature is between 85.0 and 90.0 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +Example 2 : NOT BETWEEN + +``` +-- Query records where humidity is not between 35.0 and 40.0 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL Operator + +1. These operators apply to all data types. + +Example 1: Query records where temperature is NULL + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +Example 2: Query records where humidity is not NULL + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN Operator + +1. The `IN` operator can be used in the `WHERE `clause to compare a column with a list of values. +2. These values can be provided by a static array or scalar expressions. + +Syntax: + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +Example 1: Static array: Query records where region is 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region IN ('Beijing', 'Shanghai'); +--Equivalent to +SELECT * FROM region WHERE name = 'Beijing' OR name = 'Shanghai'; +``` + +Example 2: Scalar expression: Query records where temperature is among specific values + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +Example 3: Query records where region is not 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('Beijing', 'Shanghai'); +``` + +### 1.5 GREATEST and LEAST + +The `GREATEST` function returns the maximum value from a list of arguments, while the `LEAST` function returns the minimum value. The return type matches the input data type. + +Key Behaviors: +1. NULL Handling: Returns NULL if all arguments are NULL. +2. Parameter Requirements: Requires at least 2 arguments. +3. Type Constraints: All arguments must have the same data type. +4. Supported Types: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**Syntax:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**Examples:** + +```sql +-- Retrieve the maximum value between `temperature` and `humidity` in `table2` +SELECT GREATEST(temperature,humidity) FROM table2; + +-- Retrieve the minimum value between `temperature` and `humidity` in `table2` +SELECT LEAST(temperature,humidity) FROM table2; +``` + +## 2. Aggregate functions + +### 2.1 Overview + +1. Aggregate functions are many-to-one functions. They perform aggregate calculations on a set of values to obtain a single aggregate result. + +2. Except for `COUNT()`, all other aggregate functions ignore null values and return null when there are no input rows or all values are null. For example, `SUM()` returns null instead of zero, and `AVG()` does not include null values in the count. + +### 2.2 Supported Aggregate Functions + +| Function Name | Description | Allowed Input Types | Output Type | +|:-----------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------| +| COUNT | Counts the number of data points. | All types | INT64 | +| COUNT_IF | COUNT_IF(exp) counts the number of rows that satisfy a specified boolean expression. | `exp` must be a boolean expression,(e.g. `count_if(temperature>20)`) | INT64 | +| APPROX_COUNT_DISTINCT | The APPROX_COUNT_DISTINCT(x[, maxStandardError]) function provides an approximation of COUNT(DISTINCT x), returning the estimated number of distinct input values. | `x`: The target column to be calculated, supports all data types.
`maxStandardError` (optional): Specifies the maximum standard error allowed for the function's result. Valid range is [0.0040625, 0.26]. Defaults to 0.023 if not specified. | INT64 | +| APPROX_MOST_FREQUENT | The APPROX_MOST_FREQUENT(x, k, capacity) function is used to approximately calculate the top k most frequent elements in a dataset. It returns a JSON-formatted string where the keys are the element values and the values are their corresponding approximate frequencies. (Available since V2.0.5.1) | `x` : The column to be calculated, supporting all existing data types in IoTDB;
`k`: The number of top-k most frequent values to return;
`capacity`: The number of buckets used for computation, which relates to memory usage—a larger value reduces error but consumes more memory, while a smaller value increases error but uses less memory. | STRING | +| SUM | Calculates the sum. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | Calculates the average. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | Finds the maximum value. | All types | Same as input type | +| MIN | Finds the minimum value. | All types | Same as input type | +| FIRST | Finds the value with the smallest timestamp that is not NULL. | All types | Same as input type | +| LAST | Finds the value with the largest timestamp that is not NULL. | All types | Same as input type | +| STDDEV | Alias for STDDEV_SAMP, calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | Calculates the population standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | Calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | Alias for VAR_SAMP, calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | Calculates the population variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | Calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | Finds the value with the largest absolute value. If the largest absolute values of positive and negative values are equal, returns the positive value. | INT32 INT64 FLOAT DOUBLE | Same as input type | +| MODE | Finds the mode. Note: 1. There is a risk of memory exception when the number of distinct values in the input sequence is too large; 2. If all elements have the same frequency, i.e., there is no mode, a random element is returned; 3. If there are multiple modes, a random mode is returned; 4. NULL values are also counted in frequency, so even if not all values in the input sequence are NULL, the final result may still be NULL. | All types | Same as input type | +| MAX_BY | MAX_BY(x, y) finds the value of x corresponding to the maximum y in the binary input x and y. MAX_BY(time, x) returns the timestamp when x is at its maximum. | x and y can be of any type | Same as the data type of the first input x | +| MIN_BY | MIN_BY(x, y) finds the value of x corresponding to the minimum y in the binary input x and y. MIN_BY(time, x) returns the timestamp when x is at its minimum. | x and y can be of any type | Same as the data type of the first input x | +| FIRST_BY | FIRST_BY(x, y) finds the value of x in the same row when y is the first non-null value. | x and y can be of any type | Same as the data type of the first input x | +| LAST_BY | LAST_BY(x, y) finds the value of x in the same row when y is the last non-null value. | x and y can be of any type | Same as the data type of the first input x | + + +### 2.3 Examples + +#### 2.3.1 Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### 2.3.2 Count + +Counts the number of rows in the entire table and the number of non-null values in the `temperature` column. + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +The execution result is as follows: + +> Note: Only the COUNT function can be used with *, otherwise an error will occur. + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +Count `Non-Null` `arrival_time` Records in `table2` + +```sql +select count_if(arrival_time is not null) from table2; +``` + +The execution result is as follows: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +Retrieve the number of distinct values in the `temperature` column from `table1`. + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +The execution result is as follows: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +Query the ​​top 2 most frequent values​​ in the `temperature` column of `table1`. + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +The execution result is as follows: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +Finds the values with the smallest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +Finds the values with the largest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +Finds the `time` value of the row with the smallest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the smallest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +Queries the `time` value of the row with the largest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the largest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +Queries the `time` value of the row where the `temperature` column is at its maximum, and the `humidity` value of the row where the `temperature` column is at its maximum. + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +Queries the `time` value of the row where the `temperature` column is at its minimum, and the `humidity` value of the row where the `temperature` column is at its minimum. + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. Logical operators + +### 3.1 Overview + +Logical operators are used to combine conditions or negate conditions, returning a Boolean result (`true` or `false`). + +Below are the commonly used logical operators along with their descriptions: + +| Operator | Description | Example | +| :------- | :-------------------------------- | :------ | +| AND | True only if both values are true | a AND b | +| OR | True if either value is true | a OR b | +| NOT | True when the value is false | NOT a | + +### 3.2 Impact of NULL on Logical Operators + +#### 3.2.1 AND Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `AND` operator is `FALSE`, the expression result is `FALSE`. + +Examples: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `OR` operator is `TRUE`, the expression result is `TRUE`. + +Examples: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 Truth Table + +The following truth table illustrates how `NULL` is handled in `AND` and `OR` operators: + +| a | b | a AND b | a OR b | +| :---- | :---- | :------ | :----- | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT Operator + +The logical negation of `NULL` remains `NULL`. + +Example: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1 Truth Table + +The following truth table illustrates how `NULL` is handled in the `NOT` operator: + +| a | NOT a | +| :---- | :---- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + +## 4. Date and Time Functions and Operators + +### 4.1 now() -> Timestamp + +Returns the current timestamp. + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +The `date_bin` function is used for handling time data by rounding a timestamp (`Timestamp`) to the boundary of a specified time interval (`interval`). + +#### **Syntax:** + +```SQL +-- Calculates the time interval starting from timestamp 0 and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source) + +-- Calculates the time interval starting from the origin timestamp and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source,origin) + +--Supported time units for interval: +--Years (y), months (mo), weeks (week), days (d), hours (h), minutes (M), seconds (s), milliseconds (ms), microseconds (µs), nanoseconds (ns). +--source: Must be of timestamp type. +``` + +#### **Parameters**: + +| Parameter | Description | +| :-------- | :----------------------------------------------------------- | +| interval | 1. Time interval 2. Supported units: `y`, `mo`, `week`, `d`, `h`, `M`, `s`, `ms`, `µs`, `ns`. | +| source | 1. The timestamp column or expression to be calculated. 2. Must be of timestamp type. | +| origin | The reference timestamp. | + +#### 4.2.1Syntax Rules : + +1. If `origin` is not specified, the default reference timestamp is `1970-01-01T00:00:00Z` (Beijing time: `1970-01-01 08:00:00`). +2. `interval` must be a non-negative number with a time unit. If `interval` is `0ms`, the function returns `source` directly without calculation. +3. If `origin` or `source` is negative, it represents a time point before the epoch. `date_bin` will calculate and return the relevant time period. +4. If `source` is `null`, the function returns `null`. +5. Mixing months and non-month time units (e.g., `1 MONTH 1 DAY`) is not supported due to ambiguity. + +> For example, if the starting point is **April 30, 2000**, calculating `1 DAY` first and then `1 MONTH` results in **June 1, 2000**, whereas calculating `1 MONTH` first and then `1 DAY` results in **May 31, 2000**. The resulting dates are different. + +#### 4.2.2 Examples + +##### Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### Example 1: Without Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +#### Example 2: Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +#### Example 3: Negative Origin + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +#### Example 4: Interval of 0 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +#### Example 5: Source is NULL + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract Function + +This function is used to extract the value of a specific part of a date. (Supported from version V2.0.6) + +#### 4.3.1 Syntax Definition + +```SQL +EXTRACT (identifier FROM expression) +``` + +* Parameter Description + * **expression**: `TIMESTAMP` type or a time constant + * **identifier**: The valid ranges and corresponding return value types are shown in the table below. + + | Valid Range | Return Type | Return Range | + |----------------------|---------------|--------------------| + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 Usage Example + +Using table1 from the [Sample Data](../Reference/Sample-Data.md) as the source data, query the average temperature for the first 12 hours of each day within a certain period. + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +Introduction to the `Format` function: [Format Function](../SQL-Manual/Basis-Function_timecho.md#_7-2-format-function) + +Introduction to the `Date_bin` function: [Date_bin Funtion](../SQL-Manual/Basis-Function_timecho.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. Mathematical Functions and Operators + +### 5.1 Mathematical Operators + +| **Operator** | **Description** | +| :----------- | :---------------------------------------------- | +| + | Addition | +| - | Subtraction | +| * | Multiplication | +| / | Division (integer division performs truncation) | +| % | Modulus (remainder) | +| - | Negation | + +### 5.2 Mathematical functions + +| Function Name | Description | Input | Output | Usage | +|:--------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------|:-------------------| :--------- | +| sin | Sine | double, float, INT64, INT32 | double | sin(x) | +| cos | Cosine | double, float, INT64, INT32 | double | cos(x) | +| tan | Tangent | double, float, INT64, INT32 | double | tan(x) | +| asin | Inverse Sine | double, float, INT64, INT32 | double | asin(x) | +| acos | Inverse Cosine | double, float, INT64, INT32 | double | acos(x) | +| atan | Inverse Tangent | double, float, INT64, INT32 | double | atan(x) | +| sinh | Hyperbolic Sine | double, float, INT64, INT32 | double | sinh(x) | +| cosh | Hyperbolic Cosine | double, float, INT64, INT32 | double | cosh(x) | +| tanh | Hyperbolic Tangent | double, float, INT64, INT32 | double | tanh(x) | +| degrees | Converts angle `x` in radians to degrees | double, float, INT64, INT32 | double | degrees(x) | +| radians | Radian Conversion from Degrees | double, float, INT64, INT32 | double | radians(x) | +| abs | Absolute Value | double, float, INT64, INT32 | Same as input type | abs(x) | +| sign | Returns the sign of `x`: - If `x = 0`, returns `0` - If `x > 0`, returns `1` - If `x < 0`, returns `-1` For `double/float` inputs: - If `x = NaN`, returns `NaN` - If `x = +Infinity`, returns `1.0` - If `x = -Infinity`, returns `-1.0` | double, float, INT64, INT32 | Same as input type | sign(x) | +| ceil | Rounds `x` up to the nearest integer | double, float, INT64, INT32 | double | ceil(x) | +| floor | Rounds `x` down to the nearest integer | double, float, INT64, INT32 | double | floor(x) | +| exp | Returns `e^x` (Euler's number raised to the power of `x`) | double, float, INT64, INT32 | double | exp(x) | +| ln | Returns the natural logarithm of `x` | double, float, INT64, INT32 | double | ln(x) | +| log10 | Returns the base 10 logarithm of `x` | double, float, INT64, INT32 | double | log10(x) | +| round | Rounds `x` to the nearest integer | double, float, INT64, INT32 | double | round(x) | +| round | Rounds `x` to `d` decimal places | double, float, INT64, INT32 | double | round(x, d) | +| sqrt | Returns the square root of `x`. | double, float, INT64, INT32 | double | sqrt(x) | +| e | Returns Euler’s number `e`. | | double | e() | +| pi | Pi (π) | | double | pi() | + +## 6. Bitwise Functions + +> Supported from version V2.0.6 + +Example raw data is as follows: + +``` +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +-- Table creation statement +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +-- Write data +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +The `bit_count(num, bits)`function is used to count the number of 1s in the binary representation of the integer `num`under the specified bit width `bits`. + +#### 6.1.1 Syntax Definition + +``` +bit_count(num, bits) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * **​num:​**​ Any integer value (int32 or int64) + * **​bits:​**​ Integer value, with a valid range of 2\~64 + +Note: An error will be raised if the number of `bits`is insufficient to represent `num`(using ​**two's complement signed representation**​): `Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* Usage Methods + + * Two specific numbers: `bit_count(9, 64)` + * Column and a number: `bit_count(column1, 64)` + * Between two columns: `bit_count(column1, column2)` + +#### 6.1.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- Two specific numbers +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +-- Column and a number +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +-- Insufficient bits +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +The `bitwise_and(x, y)`function performs a logical AND operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise AND operation result. + +#### 6.2.1 Syntax Definition + +``` +bitwise_and(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_and(19, 25)` + * Column and a number: `bitwise_and(column1, 25)` + * Between two columns: `bitwise_and(column1, column2)` + +#### 6.2.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +The `bitwise_not(x)`function performs a logical NOT operation on each bit of the integer x based on its two's complement representation, and returns the bitwise NOT operation result. + +#### 6.3.1 Syntax Definition + +``` +bitwise_not(x) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x**​: Must be an integer value of data type Int32 or Int64 +* Usage Methods + + * Specific number: `bitwise_not(5)` + * Single column operation: `bitwise_not(column1)` + +#### 6.3.2 Usage Examples + +``` +-- Specific number +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- Single column +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +The `bitwise_or(x,y)`function performs a logical OR operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise OR operation result. + +#### 6.4.1 Syntax Definition + +``` +bitwise_or(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_or(19, 25)` + * Column and a number: `bitwise_or(column1, 25)` + * Between two columns: `bitwise_or(column1, column2)` + +#### 6.4.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +The `bitwise_xor(x,y)`function performs a logical XOR (exclusive OR) operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise XOR operation result. XOR rule: same bits result in 0, different bits result in 1. + +#### 6.5.1 Syntax Definition + +``` +bitwise_xor(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_xor(19, 25)` + * Column and a number: `bitwise_xor(column1, 25)` + * Between two columns: `bitwise_xor(column1, column2)` + +#### 6.5.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +The `bitwise_left_shift(value, shift)`function returns the result of shifting the binary representation of integer `value`left by `shift`bits. The left shift operation moves bits towards the higher-order direction, filling the vacated lower-order bits with 0s, and discarding the higher-order bits that overflow. Equivalent to: `value << shift`. + +#### 6.6.1 Syntax Definition + +``` +bitwise_left_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift left. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_left_shift(1, 2)` + * Column and a number: `bitwise_left_shift(column1, 2)` + * Between two columns: `bitwise_left_shift(column1, column2)` + +#### 6.6.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +The `bitwise_right_shift(value, shift)`function returns the result of logically (unsigned) right shifting the binary representation of integer `value`by `shift`bits. The logical right shift operation moves bits towards the lower-order direction, filling the vacated higher-order bits with 0s, and discarding the lower-order bits that overflow. + +#### 6.7.1 Syntax Definition + +``` +bitwise_right_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_right_shift(8, 3)` + * Column and a number: `bitwise_right_shift(column1, 3)` + * Between two columns: `bitwise_right_shift(column1, column2)` + +#### 6.7.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +The `bitwise_right_shift_arithmetic(value, shift)`function returns the result of arithmetically right shifting the binary representation of integer `value`by `shift`bits. The arithmetic right shift operation moves bits towards the lower-order direction, discarding the lower-order bits that overflow, and filling the vacated higher-order bits with the sign bit (0 for positive numbers, 1 for negative numbers) to preserve the sign of the number. + +#### 6.8.1 Syntax Definition + +``` +bitwise_right_shift_arithmetic(value, shift) -> [same as value]-- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods: + + * Two specific numbers: `bitwise_right_shift_arithmetic(12, 2)` + * Column and a number: `bitwise_right_shift_arithmetic(column1, 64)` + * Between two columns: `bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + + +## 7. Conditional Expressions + +### 7.1 CASE + +CASE expressions come in two forms: **Simple CASE** and **Searched CASE**. + +#### 7.1.1 Simple CASE + +The simple form evaluates each value expression from left to right until it finds a match with the given expression: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If a matching value is found, the corresponding result is returned. If no match is found, the result from the `ELSE` clause (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 Searched CASE + +The searched form evaluates each Boolean condition from left to right until a `TRUE` condition is found, then returns the corresponding result: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If no condition evaluates to `TRUE`, the `ELSE` clause result (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE + +Returns the first non-null value from the given list of parameters. + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. Conversion Functions + +### 8.1 Conversion Functions + +#### 8.1.1 cast(value AS type) → type + +Explicitly converts a value to the specified type. This can be used to convert strings (`VARCHAR`) to numeric types or numeric values to string types. Starting from V2.0.8, OBJECT type can be explicitly cast to STRING type. + +If the conversion fails, a runtime error is thrown. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +Similar to `CAST()`. If the conversion fails, returns `NULL` instead of throwing an error. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format Function + +This function generates and returns a formatted string based on a specified format string and input arguments. Similar to Java’s `String.format` or C’s `printf`, it allows developers to construct dynamic string templates using placeholder syntax. Predefined format specifiers in the template are replaced precisely with corresponding argument values, producing a complete string that adheres to specific formatting requirements. + +#### 8.2.1 Syntax + +```SQL +format(pattern, ...args) -> STRING +``` + +**Parameters** + +* `pattern`: A format string containing static text and one or more format specifiers (e.g., `%s`, `%d`), or any expression returning a `STRING`/`TEXT` type. +* `args`: Input arguments to replace format specifiers. Constraints: + * Number of arguments ≥ 1. + * Multiple arguments must be comma-separated (e.g., `arg1, arg2`). + * Total arguments can exceed the number of specifiers in `pattern` but cannot be fewer, otherwise an exception is triggered. + +**Return Value** + +* Formatted result string of type `STRING`. + +#### 8.2.2 Usage Examples + +1. Format Floating-Point Numbers + ```SQL + IoTDB:database1> SELECT format('%.5f', humidity) FROM table1 WHERE humidity = 35.4; + +--------+ + | _col0| + +--------+ + |35.40000| + +--------+ + ``` +2. Format Integers + ```SQL + IoTDB:database1> SELECT format('%03d', 8) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + | 008| + +-----+ + ``` +3. Format Dates and Timestamps + +* Locale-Specific Date + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) FROM table1 LIMIT 1; ++--------------------+ +| _col0| ++--------------------+ +|Monday, January 1, 2024| ++--------------------+ +``` + +* Remove Timezone Information + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* Second-Level Timestamp Precision + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* Date/Time Format Symbols + +| **Symbol** | **​ Description** | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'H' | 24-hour format (two digits, zero-padded), i.e. 00 - 23 | +| 'I' | 12-hour format (two digits, zero-padded), i.e. 01 - 12 | +| 'k' | 24-hour format (no padding), i.e. 0 - 23 | +| 'l' | 12-hour format (no padding), i.e. 1 - 12 | +| 'M' | Minute (two digits, zero-padded), i.e. 00 - 59 | +| 'S' | Second (two digits, zero-padded; supports leap seconds), i.e. 00 - 60 | +| 'L' | Millisecond (three digits, zero-padded), i.e. 000 - 999 | +| 'N' | Nanosecond (nine digits, zero-padded), i.e. 000000000 - 999999999。 | +| 'p' | Locale-specific lowercase AM/PM marker (e.g., "am", "pm"). Prefix with`T`to force uppercase (e.g., "AM"). | +| 'z' | RFC 822 timezone offset from GMT (e.g.,`-0800`). Adjusts for daylight saving. Uses the JVM's default timezone for`long`/`Long`/`Date`. | +| 'Z' | Timezone abbreviation (e.g., "PST"). Adjusts for daylight saving. Uses the JVM's default timezone; Formatter's timezone overrides the argument's timezone if specified. | +| 's' | Seconds since Unix epoch (1970-01-01 00:00:00 UTC), i.e. Long.MIN\_VALUE/1000 to Long.MAX\_VALUE/1000。 | +| 'Q' | Milliseconds since Unix epoch, i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* Common Date/Time Conversion Characters + +| **Symbol** | **​ Description** | +| ---------------- | -------------------------------------------------------------------- | +| 'B' | Locale-specific full month name, for example "January", "February" | +| 'b' | Locale-specific abbreviated month name, for example "Jan", "Feb" | +| 'h' | Same as`b` | +| 'A' | Locale-specific full weekday name, for example "Sunday", "Monday" | +| 'a' | Locale-specific short weekday name, for example "Sun", "Mon" | +| 'C' | Year divided by 100 (two digits, zero-padded) | +| 'Y' | Year (minimum 4 digits, zero-padded) | +| 'y' | Last two digits of year (zero-padded) | +| 'j' | Day of year (three digits, zero-padded) | +| 'm' | Month (two digits, zero-padded) | +| 'd' | Day of month (two digits, zero-padded) | +| 'e' | Day of month (no padding) | + +4. Format Strings + ```SQL + IoTDB:database1> SELECT format('The measurement status is: %s', status) FROM table2 LIMIT 1; + +-------------------------------+ + | _col0| + +-------------------------------+ + |The measurement status is: true| + +-------------------------------+ + ``` +5. Format Percentage Sign + ```SQL + IoTDB:database1> SELECT format('%s%%', 99.9) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + |99.9%| + +-----+ + ``` + +#### 8.2.3 Format Conversion Failure Scenarios + +1. Type Mismatch Errors + +* Timestamp Type Conflict + + If the format specifier includes time-related tokens (e.g., `%Y-%m-%d`) but the argument: + + * Is a non-`DATE`/`TIMESTAMP` type value. ◦ + * Requires sub-day precision (e.g., `%H`, `%M`) but the argument is not `TIMESTAMP`. + +```SQL +-- Example 1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- Example 2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* Floating-Point Type Conflict + + Using `%f` with non-numeric arguments (e.g., strings or booleans): + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. Argument Count Mismatch + The number of arguments must equal or exceed the number of format specifiers. + + ```SQL + IoTDB:database1> SELECT format('%.5f %03d', humidity) FROM table1 WHERE humidity = 35.4; + Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') + ``` +3. Invalid Invocation Errors + + Triggered if: + + * Total arguments < 2 (must include `pattern` and at least one argument).• + * `pattern` is not of type `STRING`/`TEXT`. + +```SQL +-- Example 1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--Example 2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + +## 9. String Functions and Operators + +### 9.1 String operators + +#### 9.1.1 || Operator + +The `||` operator is used for string concatenation and functions the same as the `concat` function. + +#### 9.1.2 LIKE Statement + + The `LIKE` statement is used for pattern matching. For detailed usage, refer to Pattern Matching:[LIKE](#1-like-运算符). + +### 9.2 String Functions + +| Function Name | Description | Input | Output | Usage | +| :------------ |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------| :------ | :----------------------------------------------------------- | +| `length` | Returns the number of characters in a string (not byte length). | `string` (the string whose length is to be calculated) | INT32 | length(string) | +| `upper` | Converts all letters in a string to uppercase. | string | String | upper(string) | +| `lower` | Converts all letters in a string to lowercase. | string | String | lower(string) | +| `trim` | Removes specified leading and/or trailing characters from a string. **Parameters:** - `specification` (optional): Specifies which side to trim: - `BOTH`: Removes characters from both sides (default). - `LEADING`: Removes characters from the beginning. - `TRAILING`: Removes characters from the end. - `trimcharacter` (optional): Character to be removed (default is whitespace). - `string`: The target string. | string | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) Example:`trim('!' FROM '!foo!');` —— `'foo'` | +| `strpos` | Returns the position of the first occurrence of `subStr` in `sourceStr`. **Notes:** - Position starts at `1`. - Returns `0` if `subStr` is not found. - Positioning is based on characters, not byte arrays. | `sourceStr` (string to be searched), `subStr` (substring to find) | INT32 | strpos(sourceStr, subStr) | +| `starts_with` | Checks if `sourceStr` starts with the specified `prefix`. | `sourceStr`, `prefix` | Boolean | starts_with(sourceStr, prefix) | +| `ends_with` | Checks if `sourceStr` ends with the specified `suffix`. | `sourceStr`, `suffix` | Boolean | ends_with(sourceStr, suffix) | +| `concat` | Concatenates `string1, string2, ..., stringN`. Equivalent to the `\|\|` operator. | `string`, `text` | String | concat(str1, str2, ...) or str1 \|\| str2 ... | +| `strcmp` | Compares two strings lexicographically. **Returns:** - `-1` if `str1 < str2` - `0` if `str1 = str2` - `1` if `str1 > str2` - `NULL` if either `str1` or `str2` is `NULL` | `string1`, `string2` | INT32 | strcmp(str1, str2) | +| `replace` | Removes all occurrences of `search` in `string`. | `string`, `search` | String | replace(string, search) | +| `replace` | Replaces all occurrences of `search` in `string` with `replace`. | `string`, `search`, `replace` | String | replace(string, search, replace) | +| `substring` | Extracts a substring from `start_index` to the end of the string. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. | `string`, `start_index` | String | substring(string from start_index)or substring(string, start_index) | +| `substring` | Extracts a substring of `length` characters starting from `start_index`. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. - Throws an error if `length` is negative. - If `start_index + length` exceeds `int.MAX`, an overflow error may occur. | `string`, `start_index`, `length` | String | substring(string from start_index for length) or substring(string, start_index, length) | + +## 10. Pattern Matching Functions + +### 10.1 LIKE + +#### 10.1.1 Usage + +The `LIKE `operator is used to compare a value with a pattern. It is commonly used in the `WHERE `clause to match specific patterns within strings. + +#### 10.1.2 Syntax + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 Match rules + +- Matching characters is case-sensitive +- The pattern supports two wildcard characters: + - `_` matches any single character + - `%` matches zero or more characters + +#### 10.1.4 Notes + +- `LIKE` pattern matching applies to the entire string by default. Therefore, if it's desired to match a sequence anywhere within a string, the pattern must start and end with a percent sign. +- To match the escape character itself, double it (e.g., `\\` to match `\`). For example, you can use `\\` to match for `\`. + +#### 10.1.5 Examples + +#### **Example 1: Match Strings Starting with a Specific Character** + +- **Description:** Find all names that start with the letter `E` (e.g., `Europe`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +#### **Example 2: Exclude a Specific Pattern** + +- **Description:** Find all names that do **not** start with the letter `E`. + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +#### **Example 3: Match Strings of a Specific Length** + +- **Description:** Find all names that start with `A`, end with `a`, and have exactly two characters in between (e.g., `Asia`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +#### **Example 4: Escape Special Characters** + +- **Description:** Find all names that start with `South_` (e.g., `South_America`). The underscore (`_`) is a wildcard character, so it needs to be escaped using `\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +#### **Example 5: Match the Escape Character Itself** + +- **Description:** Find all names that start with 'South\'. Since `\` is the escape character, it must be escaped using `\\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like + +#### 10.2.1 Usage + +Evaluates whether the regular expression pattern is present within the given string. + +#### 10.2.2 Syntax + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 Notes + +- The pattern for `regexp_like` only needs to be contained within the string, and does not need to match the entire string. +- To match the entire string, use the `^` and `$` anchors. +- `^` signifies the "start of the string," and `$` signifies the "end of the string." +- Regular expressions use the Java-defined regular syntax, but there are the following exceptions to be aware of: + - Multiline mode + 1. Enabled by: `(?m)`. + 2. Recognizes only `\n` as the line terminator. + 3. Does not support the `(?d)` flag, and its use is prohibited. + - Case-insensitive matching + 1. Enabled by: `(?i)`. + 2. Based on Unicode rules, it does not support context-dependent and localized matching. + 3. Does not support the `(?u)` flag, and its use is prohibited. + - Character classes + 1. Within character classes (e.g., `[A-Z123]`), `\Q` and `\E` are not supported and are treated as literals. + - Unicode character classes (`\p{prop}`) + 1. Underscores in names: All underscores in names must be removed (e.g., `OldItalic `instead of `Old_Italic`). + 2. Scripts: Specify directly, without the need for `Is`, `script=`, or `sc=` prefixes (e.g., `\p{Hiragana}`). + 3. Blocks: Must use the `In` prefix, `block=` or `blk=` prefixes are not supported (e.g., `\p{InMongolian}`). + 4. Categories: Specify directly, without the need for `Is`, `general_category=`, or `gc=` prefixes (e.g., `\p{L}`). + 5. Binary properties: Specify directly, without `Is` (e.g., `\p{NoncharacterCodePoint}`). + +#### 10.2.4 Examples + +#### Example 1: **Matching strings containing a specific pattern** + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **Explanation**: Determines whether the string '1a 2b 14m' contains a substring that matches the pattern `\d+b`. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - In `'1a 2b 14m'`, the substring `'2b'` matches this pattern, so it returns `true`. + + +#### **Example 2: Matching the entire string** + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **Explanation**: Checks if the string `'1a 2b 14m'` matches the pattern `^\\d+b$` exactly. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - `'1a 2b 14m'` does not match this pattern because it does not start with digits and does not end with `b`, so it returns `false`. + +## 11. Timeseries Windowing Functions + +The sample data is as follows: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- Create table statement +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- Insert data +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 Function Description + +The HOP function segments data into overlapping time windows for analysis, assigning each row to all windows that overlap with its timestamp. If windows overlap (when SLIDE < SIZE), data will be duplicated across multiple windows. + +#### 11.1.2 Function Definition + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer | Window size | +| SLIDE | Scalar | Long integer | Sliding step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + + +#### 11.1.4 Returned Results + +The HOP function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.1.5 Usage Example + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 Function Description + +The SESSION function groups data into sessions based on time intervals. It checks the time gap between consecutive rows—rows with gaps smaller than the threshold (GAP) are grouped into the current window, while larger gaps trigger a new window. + +#### 11.2.2 Function Definition + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| TIMECOL | Scalar | String (default: 'time') | Time column name | +| GAP | Scalar | Long integer | Session gap threshold | + +#### 11.2.4 Returned Results + +The SESSION function returns: + +* `window_start`: Time of the first row in the session +* `window_end`: Time of the last row in the session +* Pass-through columns: All input columns from DATA + +#### 11.2.5 Usage Example + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY SESSION when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 Function Description + +The VARIATION function groups data based on value differences. The first row becomes the baseline for the first window. Subsequent rows are compared to the baseline—if the difference is within the threshold (DELTA), they join the current window; otherwise, a new window starts with that row as the new baseline. + +#### 11.3.2 Function Definition + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| COL | Scalar | String | Column for difference calculation | +| DELTA | Scalar | Float | Difference threshold | + +#### 11.3.4 Returned Results + +The VARIATION function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.3.5 Usage Example + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY VARIATION when combined with GROUP BY +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 Function Description + +The CAPACITY function groups data into fixed-size windows, where each window contains up to SIZE rows. + +#### 11.4.2 Function Definition + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| SIZE | Scalar | Long integer | Window size (row count) | + +#### 11.4.4 Returned Results + +The CAPACITY function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.4.5 Usage Example + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY COUNT when combined with GROUP BY +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 Function Description + +The TUMBLE function assigns each row to a non-overlapping, fixed-size time window based on a timestamp attribute. + +#### 11.5.2 Function Definition + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +#### 11.5.4 Returned Results + +The TUMBLE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.5.5 Usage Example + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 Function Description + +The CUMULATE function creates expanding windows from an initial window, maintaining the same start time while incrementally extending the end time by STEP until reaching SIZE. Each window contains all elements within its range. For example, with a 1-hour STEP and 24-hour SIZE, daily windows would be: `[00:00, 01:00)`, `[00:00, 02:00)`, ..., `[00:00, 24:00)`. + +#### 11.6.2 Function Definition + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | --------------------------------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size (must be an integer multiple of STEP) | +| STEP | Scalar | Long integer (positive) | Expansion step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +> Note: An error `Cumulative table function requires size must be an integral multiple of step` occurs if SIZE is not divisible by STEP. + +#### 11.6.4 Returned Results + +The CUMULATE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.6.5 Usage Example + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md b/src/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md index 715fcba68..97cefa7a3 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md +++ b/src/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md @@ -823,7 +823,7 @@ Result: **Description**: Reads binary content from an `OBJECT` type column and returns a `BLOB` type (raw binary data of the object). -> Supported since V2.0.8-beta +> Supported since V2.0.8 **Syntax:** ```SQL diff --git a/src/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md b/src/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md index fa7a7f49f..ed6c776bb 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md +++ b/src/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md @@ -67,7 +67,7 @@ intervalField IoTDB supports the following three methods to fill NULL values: -1. **PREVIOUS Fill:** Uses the most recent non-NULL value from the same column to fill NULL values. Starting from V2.0.8-beta, only this method supports the OBJECT type. +1. **PREVIOUS Fill:** Uses the most recent non-NULL value from the same column to fill NULL values. Starting from V2.0.8, only this method supports the OBJECT type. 2. **LINEAR Fill:** Applies linear interpolation using the nearest previous and next non-NULL values in the same column. 3. **CONSTANT Fill:** Fills NULL values with a specified constant. diff --git a/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md b/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md index 0500f17f0..35fee992c 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md +++ b/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md @@ -1,3 +1,6 @@ +--- +redirectTo: SQL-Maintenance-Statements_apache.html +--- - -# Management Statements - -## 1. Status Inspection - -### 1.1 View Current Tree/Table Model - -**Syntax:** - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 1.2 View Current User - -**Syntax:** - -```SQL -showCurrentUserStatement - : SHOW CURRENT_USER - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_USER -+-----------+ -|CurrentUser| -+-----------+ -| root| -+-----------+ -``` - -### 1.3 View Connected Database - -**Syntax:** - -```SQL -showCurrentDatabaseStatement - : SHOW CURRENT_DATABASE - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| null| -+---------------+ - -IoTDB> USE test; - -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| test| -+---------------+ -``` - -### 1.4 View Cluster Version - -**Syntax:** - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW VERSION -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.5 View Key Cluster Parameters - -**Syntax:** - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW VARIABLES -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.6 View Cluster ID - -**Syntax:** - -```SQL -showClusterIdStatement - : SHOW (CLUSTERID | CLUSTER_ID) - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CLUSTER_ID -+------------------------------------+ -| ClusterId| -+------------------------------------+ -|40163007-9ec1-4455-aa36-8055d740fcda| -``` - -### 1.7 View Server Time - -Shows time of the DataNode server directly connected to client - -**Syntax:** - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - - -### 1.8 View Region Information - -**Description**: Displays regions' information of the current cluster. - -**Syntax**: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW REGIONS -``` - -**Result**: - -```SQL -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | -| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| -| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.9 View Available Nodes - -**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. - -> This feature is supported starting from v2.0.8. - -**Syntax**: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -**Result**: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. Status Configuration - -### 2.1 Set Connection Tree/Table Model - -**Syntax:** - -```SQL -SET SQL_DIALECT EQ (TABLE | TREE) -``` - -**Example:** - -```SQL -IoTDB> SET SQL_DIALECT=TABLE -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 2.2 Update Configuration Items - -**Syntax:** - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**Example:** - -```SQL -IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; -``` - -### 2.3 Load Manually Modified Configuration - -**Syntax:** - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 Set System Status - -**Syntax:** - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - -## 3. Data Management - -### 3.1 Flush Memory Table to Disk - -**Syntax:** - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> FLUSH test_db TRUE ON LOCAL; -``` - -### 3.2 Clear DataNode Cache - -**Syntax:** - -```SQL -clearCacheStatement - : CLEAR clearCacheOptions? CACHE localOrClusterMode? - ; - -clearCacheOptions - : ATTRIBUTE - | QUERY - | ALL - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> CLEAR ALL CACHE ON LOCAL; -``` - -## 4. Data Repair - -### 4.1 Start Background TsFile Repair - -**Syntax:** - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 Pause TsFile Repair - -**Syntax:** - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. Query Operations - -### 5.1 View Active Queries - -**Syntax:** - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW QUERIES WHERE elapsed_time > 30 -+-----------------------+-----------------------------+-----------+------------+------------+----+ -| query_id| start_time|datanode_id|elapsed_time| statement|user| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -``` - -### 5.2 Terminate Queries - -**Syntax:** - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**Example:** - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -- teminate specific query -IoTDB> KILL ALL QUERIES; -- teminate all query -``` - -### 5.3 Query Performance Analysis - -#### 5.3.1 View Execution Plan - -**Syntax:** - -```SQL -EXPLAIN -``` - -Detailed syntax reference: [EXPLAIN STATEMENT](../User-Manual/Query-Performance-Analysis.md#_1-explain-statement) - -**Example:** - -```SQL -IoTDB> explain select * from t1 -+-----------------------------------------------------------------------------------------------+ -| distribution plan| -+-----------------------------------------------------------------------------------------------+ -| ┌─────────────────────────────────────────────┐ | -| │OutputNode-4 │ | -| │OutputColumns-[time, device_id, type, speed] │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| │ | -| │ | -| ┌─────────────────────────────────────────────┐ | -| │Collect-21 │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| ┌───────────────────────┴───────────────────────┐ | -| │ │ | -|┌─────────────────────────────────────────────┐ ┌───────────┐ | -|│TableScan-19 │ │Exchange-28│ | -|│QualifiedTableName: test.t1 │ └───────────┘ | -|│OutputSymbols: [time, device_id, type, speed]│ │ | -|│DeviceNumber: 1 │ │ | -|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| -|│PushDownOffset: 0 │ │TableScan-20 │| -|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| -|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| -|│RegionId: 2 │ │DeviceNumber: 1 │| -|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| -| │PushDownOffset: 0 │| -| │PushDownLimit: 0 │| -| │PushDownLimitToEachDevice: false │| -| │RegionId: 1 │| -| └─────────────────────────────────────────────┘| -+-----------------------------------------------------------------------------------------------+ -``` - -#### 5.3.2 Analyze Query Performance - -**Syntax:** - -```SQL -EXPLAIN ANALYZE [VERBOSE] -``` - -Detailed syntax reference: [EXPLAIN ANALYZE STATEMENT](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-statement) - -**Example:** - -```SQL -IoTDB> explain analyze verbose select * from t1 -+-----------------------------------------------------------------------------------------------+ -| Explain Analyze| -+-----------------------------------------------------------------------------------------------+ -|Analyze Cost: 38.860 ms | -|Fetch Partition Cost: 9.888 ms | -|Fetch Schema Cost: 54.046 ms | -|Logical Plan Cost: 10.102 ms | -|Logical Optimization Cost: 17.396 ms | -|Distribution Plan Cost: 2.508 ms | -|Dispatch Cost: 22.126 ms | -|Fragment Instances Count: 2 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| -| Total Wall Time: 18 ms | -| Cost of initDataQuerySource: 6.153 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.294 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.047 | -| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 5.523 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 31]: CollectNode(CollectOperator) | -| CPU Time: 5.512 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 29]: TableScanNode(TableScanOperator) | -| CPU Time: 5.439 ms | -| output: 1 rows | -| HasNext() Called Count: 3 -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | -| CPU Time: 0.053 ms | -| output: 1 rows | -| HasNext() Called Count: 2 | -| Next() Called Count: 1 | -| Estimated Memory Size: : 131072 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| -| Total Wall Time: 13 ms | -| Cost of initDataQuerySource: 5.725 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.001 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.007 | -| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 0.270 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 30]: TableScanNode(TableScanOperator) | -| CPU Time: 0.250 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -+-----------------------------------------------------------------------------------------------+ -``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_apache.md b/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_apache.md new file mode 100644 index 000000000..36808b379 --- /dev/null +++ b/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_apache.md @@ -0,0 +1,652 @@ + + +# Management Statements + +## 1. Status Inspection + +### 1.1 View Current Tree/Table Mode + +**Syntax:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 View Current User + +**Syntax:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 View Connected Database + +**Syntax:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 View Cluster Version + +**Syntax:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 View Key Cluster Parameters + +**Syntax:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 View Cluster ID + +**Syntax:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 View Server Time + +Shows time of the DataNode server directly connected to client + +**Syntax:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + + +### 1.8 View Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 View Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8-beta. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Configuration + +### 2.1 Set Connection Tree/Table Mode + +**Syntax:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**Example:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 Update Configuration Items + +**Syntax:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Example:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 Load Manually Modified Configuration + +**Syntax:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Set System Status + +**Syntax:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flush Memory Table to Disk + +**Syntax:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 Clear DataNode Cache + +**Syntax:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Start Background TsFile Repair + +**Syntax:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pause TsFile Repair + +**Syntax:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Operations + +### 5.1 View Active Queries + +**Syntax:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 Terminate Queries + +**Syntax:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Example:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- teminate specific query +IoTDB> KILL ALL QUERIES; -- teminate all query +``` + +### 5.3 Query Performance Analysis + +#### 5.3.1 View Execution Plan + +**Syntax:** + +```SQL +EXPLAIN +``` + +Detailed syntax reference: [EXPLAIN STATEMENT](../User-Manual/Query-Performance-Analysis.md#_1-explain-statement) + +**Example:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 Analyze Query Performance + +**Syntax:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +Detailed syntax reference: [EXPLAIN ANALYZE STATEMENT](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-statement) + +**Example:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md b/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md new file mode 100644 index 000000000..8b975d660 --- /dev/null +++ b/src/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md @@ -0,0 +1,652 @@ + + +# Management Statements + +## 1. Status Inspection + +### 1.1 View Current Tree/Table Mode + +**Syntax:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 View Current User + +**Syntax:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 View Connected Database + +**Syntax:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 View Cluster Version + +**Syntax:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 View Key Cluster Parameters + +**Syntax:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 View Cluster ID + +**Syntax:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 View Server Time + +Shows time of the DataNode server directly connected to client + +**Syntax:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + + +### 1.8 View Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 View Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Configuration + +### 2.1 Set Connection Tree/Table Mode + +**Syntax:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**Example:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 Update Configuration Items + +**Syntax:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Example:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 Load Manually Modified Configuration + +**Syntax:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Set System Status + +**Syntax:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flush Memory Table to Disk + +**Syntax:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 Clear DataNode Cache + +**Syntax:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Start Background TsFile Repair + +**Syntax:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pause TsFile Repair + +**Syntax:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Operations + +### 5.1 View Active Queries + +**Syntax:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 Terminate Queries + +**Syntax:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Example:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- teminate specific query +IoTDB> KILL ALL QUERIES; -- teminate all query +``` + +### 5.3 Query Performance Analysis + +#### 5.3.1 View Execution Plan + +**Syntax:** + +```SQL +EXPLAIN +``` + +Detailed syntax reference: [EXPLAIN STATEMENT](../User-Manual/Query-Performance-Analysis.md#_1-explain-statement) + +**Example:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 Analyze Query Performance + +**Syntax:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +Detailed syntax reference: [EXPLAIN ANALYZE STATEMENT](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-statement) + +**Example:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/UserGuide/Master/Table/SQL-Manual/Select-Clause.md b/src/UserGuide/Master/Table/SQL-Manual/Select-Clause.md index 57abbe5de..1a6da4ae1 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/Select-Clause.md +++ b/src/UserGuide/Master/Table/SQL-Manual/Select-Clause.md @@ -1,3 +1,6 @@ +--- +redirectTo: Select-Clause_apache.html +--- - -# SELECT Clauses - -**SELECT Clause** specifies the columns included in the query results. - -## 1. Syntax Overview - -```sql -SELECT setQuantifier? selectItem (',' selectItem)* - -selectItem - : expression (AS? identifier)? #selectSingle - | tableName '.' ASTERISK (AS columnAliases)? #selectAll - | ASTERISK #selectAll - ; -setQuantifier - : DISTINCT - | ALL - ; -``` - -- It supports aggregate functions (e.g., `SUM`, `AVG`, `COUNT`) and window functions, logically executed last in the query process. -- DISTINCT Keyword: `SELECT DISTINCT column_name` ensures that the values in the query results are unique, removing duplicates. -- COLUMNS Function: The COLUMNS function is supported in the SELECT clause for column filtering. It can be combined with expressions, allowing the expression's logic to apply to all columns selected by the function. - -## 2. Detailed Syntax: - -Each `selectItem` can take one of the following forms: - -1. **Expression**: `expression [[AS] column_alias]` defines a single output column and optionally assigns an alias. -2. **All Columns from a Relation**: `relation.*` selects all columns from a specified relation. Column aliases are not allowed in this case. -3. **All Columns in the Result Set**: `*` selects all columns returned by the query. Column aliases are not allowed. - -Usage scenarios for DISTINCT: - -1. **SELECT Statement**: Use DISTINCT in the SELECT statement to remove duplicate items from the query results. - -2. **Aggregate Functions**: When used with aggregate functions, DISTINCT only processes non-duplicate rows in the input dataset. - -3. **AGROUP BY Clause**: Use ALL and DISTINCT quantifiers in the GROUP BY clause to determine whether each duplicate grouping set produces distinct output rows. - -`COLUMNS` Function: - -1. **`COLUMNS(*)`**: Matches all columns and supports combining with expressions. -2. **`COLUMNS(regexStr) ? AS identifier`**: Regular expression matching - - Selects columns whose names match the specified regular expression `(regexStr)` and supports combining with expressions. - - Allows renaming columns by referencing groups captured by the regular expression. If `AS` is omitted, the original column name is displayed in the format `_coln_original_name` (where `n` is the column’s position in the result table). - - Renaming Syntax: - - Use parentheses () in regexStr to define capture groups. - - Reference captured groups in identifier using `'$index'`. - - Note: The identifier must be enclosed in double quotes if it contains special characters like `$`. - -## 3. Example Data - - -The [Example Data page](../Reference/Sample-Data.md)page provides SQL statements to construct table schemas and insert data. By downloading and executing these statements in the IoTDB CLI, you can import the data into IoTDB. This data can be used to test and run the example SQL queries included in this documentation, allowing you to reproduce the described results. - -### 3.1 Selection List - -#### 3.1.1 Star Expression - -The asterisk (`*`) selects all columns in a table. Note that it cannot be used with most functions, except for cases like `COUNT(*)`. - -**Example**: Selecting all columns from a table. - - -```sql -SELECT * FROM table1; -``` - -Results: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -#### 3.1.2 Aggregate Functions - -Aggregate functions summarize multiple rows into a single value. When aggregate functions are present in the `SELECT` clause, the query is treated as an **aggregate query**. All expressions in the query must either be part of an aggregate function or specified in the [GROUP BY clause](../SQL-Manual/GroupBy-Clause.md). - -**Example 1**: Total number of rows in a table. - -```sql -SELECT count(*) FROM table1; -``` - -Results: - -```sql -+-----+ -|_col0| -+-----+ -| 18| -+-----+ -Total line number = 1 -It costs 0.091s -``` - -**Example 2**: Total rows grouped by region. - -```sql -SELECT region, count(*) - FROM table1 - GROUP BY region; -``` - -Results: - -```sql -+------+-----+ -|region|_col1| -+------+-----+ -| 上海| 9| -| 北京| 9| -+------+-----+ -Total line number = 2 -It costs 0.071s -``` - -#### 3.1.3 Aliases - -The `AS` keyword assigns an alias to selected columns, improving readability by overriding existing column names. - -**Example 1**: Original table. - - -```sql -IoTDB> SELECT * FROM table1; -``` - -Results: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -**Example 2**: Assigning an alias to a single column. - -```sql -IoTDB> SELECT device_id - AS device - FROM table1; -``` - -Results: - -```sql -+------+ -|device| -+------+ -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -+------+ -Total line number = 18 -It costs 0.053s -``` - -**Example 3:** Assigning aliases to all columns. - -```sql -IoTDB> SELECT table1.* - AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) - FROM table1; -``` - -Results: - -```sql -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| -|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| -|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| -|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| -|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| -|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| -|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -Total line number = 18 -It costs 0.189s -``` - -#### 3.1.4 Object Type Query - -> Supported since V2.0.8-beta - -**Example 1: Directly querying Object type data** - -```sql -IoTDB:database1> SELECT s1 FROM table1 WHERE device_id = 'tag1' -+------------+ -| s1| -+------------+ -|(Object) 5 B| -+------------+ -Total line number = 1 -It costs 0.428s -``` - -**Example 2: Retrieving raw content of Object type data using `read_object` function** - -```sql -IoTDB:database1> SELECT read_object(s1) FROM table1 WHERE device_id = 'tag1' -+------------+ -| _col0| -+------------+ -|0x696f746462| -+------------+ -Total line number = 1 -It costs 0.188s -``` - - -### 3.2 Columns Function - -1. Without combining expressions -```sql --- Query data from columns whose names start with 'm' -IoTDB:database1> select columns('^m.*') from table1 limit 5 -+--------+-----------+ -|model_id|maintenance| -+--------+-----------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+--------+-----------+ - - --- Query columns whose names start with 'o' - throw an exception if no columns match -IoTDB:database1> select columns('^o.*') from table1 limit 5 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' - - --- Query data from columns whose names start with 'm' and rename them with 'series_' prefix -IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 -+---------------+------------------+ -|series_model_id|series_maintenance| -+---------------+------------------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+---------------+------------------+ -``` - -2. With Expression Combination - -- Single COLUMNS Function -```sql --- Query the minimum value of all columns -IoTDB:database1> select min(columns(*)) from table1 -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -``` - -- Multiple COLUMNS Functions in Same Expression - -> Usage Restriction: When multiple COLUMNS functions appear in the same expression, their parameters must be identical. - -```sql --- Query the sum of minimum and maximum values for columns starting with 'h' -IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 -+--------------+ -|_col0_humidity| -+--------------+ -| 79.899994| -+--------------+ - --- Error Case: Non-Identical COLUMNS Functions -IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported -``` - -- Multiple COLUMNS Functions in Different Expressions - -```sql --- Query minimum of 'h'-columns and maximum of 'h'-columns separately -IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 -+--------------+--------------+ -|_col0_humidity|_col1_humidity| -+--------------+--------------+ -| 34.8| 45.1| -+--------------+--------------+ - --- Query minimum of 'h'-columns and maximum of 'te'-columns -IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 -+--------------+-----------------+ -|_col0_humidity|_col1_temperature| -+--------------+-----------------+ -| 34.8| 90.0| -+--------------+-----------------+ -``` - -3. In Where Clause - -```sql --- Query data where all 'h'-columns must be > 40 (equivalent to) -IoTDB:database1> select * from table1 where columns('^h.*') > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ - ---Alternative syntax -IoTDB:database1> select * from table1 where humidity > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -``` - -## 4. Column Order in the Result Set - -- **Column Order**: The order of columns in the result set matches the order specified in the `SELECT` clause. -- **Multi-column Expressions**: If a selection expression produces multiple columns, their order follows the order in the source relation.p. \ No newline at end of file diff --git a/src/UserGuide/Master/Table/SQL-Manual/Select-Clause_apache.md b/src/UserGuide/Master/Table/SQL-Manual/Select-Clause_apache.md new file mode 100644 index 000000000..57abbe5de --- /dev/null +++ b/src/UserGuide/Master/Table/SQL-Manual/Select-Clause_apache.md @@ -0,0 +1,421 @@ + + +# SELECT Clauses + +**SELECT Clause** specifies the columns included in the query results. + +## 1. Syntax Overview + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- It supports aggregate functions (e.g., `SUM`, `AVG`, `COUNT`) and window functions, logically executed last in the query process. +- DISTINCT Keyword: `SELECT DISTINCT column_name` ensures that the values in the query results are unique, removing duplicates. +- COLUMNS Function: The COLUMNS function is supported in the SELECT clause for column filtering. It can be combined with expressions, allowing the expression's logic to apply to all columns selected by the function. + +## 2. Detailed Syntax: + +Each `selectItem` can take one of the following forms: + +1. **Expression**: `expression [[AS] column_alias]` defines a single output column and optionally assigns an alias. +2. **All Columns from a Relation**: `relation.*` selects all columns from a specified relation. Column aliases are not allowed in this case. +3. **All Columns in the Result Set**: `*` selects all columns returned by the query. Column aliases are not allowed. + +Usage scenarios for DISTINCT: + +1. **SELECT Statement**: Use DISTINCT in the SELECT statement to remove duplicate items from the query results. + +2. **Aggregate Functions**: When used with aggregate functions, DISTINCT only processes non-duplicate rows in the input dataset. + +3. **AGROUP BY Clause**: Use ALL and DISTINCT quantifiers in the GROUP BY clause to determine whether each duplicate grouping set produces distinct output rows. + +`COLUMNS` Function: + +1. **`COLUMNS(*)`**: Matches all columns and supports combining with expressions. +2. **`COLUMNS(regexStr) ? AS identifier`**: Regular expression matching + - Selects columns whose names match the specified regular expression `(regexStr)` and supports combining with expressions. + - Allows renaming columns by referencing groups captured by the regular expression. If `AS` is omitted, the original column name is displayed in the format `_coln_original_name` (where `n` is the column’s position in the result table). + - Renaming Syntax: + - Use parentheses () in regexStr to define capture groups. + - Reference captured groups in identifier using `'$index'`. + - Note: The identifier must be enclosed in double quotes if it contains special characters like `$`. + +## 3. Example Data + + +The [Example Data page](../Reference/Sample-Data.md)page provides SQL statements to construct table schemas and insert data. By downloading and executing these statements in the IoTDB CLI, you can import the data into IoTDB. This data can be used to test and run the example SQL queries included in this documentation, allowing you to reproduce the described results. + +### 3.1 Selection List + +#### 3.1.1 Star Expression + +The asterisk (`*`) selects all columns in a table. Note that it cannot be used with most functions, except for cases like `COUNT(*)`. + +**Example**: Selecting all columns from a table. + + +```sql +SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 Aggregate Functions + +Aggregate functions summarize multiple rows into a single value. When aggregate functions are present in the `SELECT` clause, the query is treated as an **aggregate query**. All expressions in the query must either be part of an aggregate function or specified in the [GROUP BY clause](../SQL-Manual/GroupBy-Clause.md). + +**Example 1**: Total number of rows in a table. + +```sql +SELECT count(*) FROM table1; +``` + +Results: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +**Example 2**: Total rows grouped by region. + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +Results: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 Aliases + +The `AS` keyword assigns an alias to selected columns, improving readability by overriding existing column names. + +**Example 1**: Original table. + + +```sql +IoTDB> SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +**Example 2**: Assigning an alias to a single column. + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +Results: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +**Example 3:** Assigning aliases to all columns. + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +Results: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object Type Query + +> Supported since V2.0.8-beta + +**Example 1: Directly querying Object type data** + +```sql +IoTDB:database1> SELECT s1 FROM table1 WHERE device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +**Example 2: Retrieving raw content of Object type data using `read_object` function** + +```sql +IoTDB:database1> SELECT read_object(s1) FROM table1 WHERE device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns Function + +1. Without combining expressions +```sql +-- Query data from columns whose names start with 'm' +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- Query columns whose names start with 'o' - throw an exception if no columns match +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- Query data from columns whose names start with 'm' and rename them with 'series_' prefix +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. With Expression Combination + +- Single COLUMNS Function +```sql +-- Query the minimum value of all columns +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- Multiple COLUMNS Functions in Same Expression + +> Usage Restriction: When multiple COLUMNS functions appear in the same expression, their parameters must be identical. + +```sql +-- Query the sum of minimum and maximum values for columns starting with 'h' +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- Error Case: Non-Identical COLUMNS Functions +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- Multiple COLUMNS Functions in Different Expressions + +```sql +-- Query minimum of 'h'-columns and maximum of 'h'-columns separately +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- Query minimum of 'h'-columns and maximum of 'te'-columns +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. In Where Clause + +```sql +-- Query data where all 'h'-columns must be > 40 (equivalent to) +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--Alternative syntax +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. Column Order in the Result Set + +- **Column Order**: The order of columns in the result set matches the order specified in the `SELECT` clause. +- **Multi-column Expressions**: If a selection expression produces multiple columns, their order follows the order in the source relation.p. \ No newline at end of file diff --git a/src/UserGuide/Master/Table/SQL-Manual/Select-Clause_timecho.md b/src/UserGuide/Master/Table/SQL-Manual/Select-Clause_timecho.md new file mode 100644 index 000000000..75c5cd5c1 --- /dev/null +++ b/src/UserGuide/Master/Table/SQL-Manual/Select-Clause_timecho.md @@ -0,0 +1,421 @@ + + +# SELECT Clauses + +**SELECT Clause** specifies the columns included in the query results. + +## 1. Syntax Overview + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- It supports aggregate functions (e.g., `SUM`, `AVG`, `COUNT`) and window functions, logically executed last in the query process. +- DISTINCT Keyword: `SELECT DISTINCT column_name` ensures that the values in the query results are unique, removing duplicates. +- COLUMNS Function: The COLUMNS function is supported in the SELECT clause for column filtering. It can be combined with expressions, allowing the expression's logic to apply to all columns selected by the function. + +## 2. Detailed Syntax: + +Each `selectItem` can take one of the following forms: + +1. **Expression**: `expression [[AS] column_alias]` defines a single output column and optionally assigns an alias. +2. **All Columns from a Relation**: `relation.*` selects all columns from a specified relation. Column aliases are not allowed in this case. +3. **All Columns in the Result Set**: `*` selects all columns returned by the query. Column aliases are not allowed. + +Usage scenarios for DISTINCT: + +1. **SELECT Statement**: Use DISTINCT in the SELECT statement to remove duplicate items from the query results. + +2. **Aggregate Functions**: When used with aggregate functions, DISTINCT only processes non-duplicate rows in the input dataset. + +3. **AGROUP BY Clause**: Use ALL and DISTINCT quantifiers in the GROUP BY clause to determine whether each duplicate grouping set produces distinct output rows. + +`COLUMNS` Function: + +1. **`COLUMNS(*)`**: Matches all columns and supports combining with expressions. +2. **`COLUMNS(regexStr) ? AS identifier`**: Regular expression matching + - Selects columns whose names match the specified regular expression `(regexStr)` and supports combining with expressions. + - Allows renaming columns by referencing groups captured by the regular expression. If `AS` is omitted, the original column name is displayed in the format `_coln_original_name` (where `n` is the column’s position in the result table). + - Renaming Syntax: + - Use parentheses () in regexStr to define capture groups. + - Reference captured groups in identifier using `'$index'`. + - Note: The identifier must be enclosed in double quotes if it contains special characters like `$`. + +## 3. Example Data + + +The [Example Data page](../Reference/Sample-Data.md)page provides SQL statements to construct table schemas and insert data. By downloading and executing these statements in the IoTDB CLI, you can import the data into IoTDB. This data can be used to test and run the example SQL queries included in this documentation, allowing you to reproduce the described results. + +### 3.1 Selection List + +#### 3.1.1 Star Expression + +The asterisk (`*`) selects all columns in a table. Note that it cannot be used with most functions, except for cases like `COUNT(*)`. + +**Example**: Selecting all columns from a table. + + +```sql +SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 Aggregate Functions + +Aggregate functions summarize multiple rows into a single value. When aggregate functions are present in the `SELECT` clause, the query is treated as an **aggregate query**. All expressions in the query must either be part of an aggregate function or specified in the [GROUP BY clause](../SQL-Manual/GroupBy-Clause.md). + +**Example 1**: Total number of rows in a table. + +```sql +SELECT count(*) FROM table1; +``` + +Results: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +**Example 2**: Total rows grouped by region. + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +Results: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 Aliases + +The `AS` keyword assigns an alias to selected columns, improving readability by overriding existing column names. + +**Example 1**: Original table. + + +```sql +IoTDB> SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +**Example 2**: Assigning an alias to a single column. + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +Results: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +**Example 3:** Assigning aliases to all columns. + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +Results: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object Type Query + +> Supported since V2.0.8 + +**Example 1: Directly querying Object type data** + +```sql +IoTDB:database1> SELECT s1 FROM table1 WHERE device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +**Example 2: Retrieving raw content of Object type data using `read_object` function** + +```sql +IoTDB:database1> SELECT read_object(s1) FROM table1 WHERE device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns Function + +1. Without combining expressions +```sql +-- Query data from columns whose names start with 'm' +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- Query columns whose names start with 'o' - throw an exception if no columns match +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- Query data from columns whose names start with 'm' and rename them with 'series_' prefix +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. With Expression Combination + +- Single COLUMNS Function +```sql +-- Query the minimum value of all columns +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- Multiple COLUMNS Functions in Same Expression + +> Usage Restriction: When multiple COLUMNS functions appear in the same expression, their parameters must be identical. + +```sql +-- Query the sum of minimum and maximum values for columns starting with 'h' +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- Error Case: Non-Identical COLUMNS Functions +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- Multiple COLUMNS Functions in Different Expressions + +```sql +-- Query minimum of 'h'-columns and maximum of 'h'-columns separately +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- Query minimum of 'h'-columns and maximum of 'te'-columns +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. In Where Clause + +```sql +-- Query data where all 'h'-columns must be > 40 (equivalent to) +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--Alternative syntax +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. Column Order in the Result Set + +- **Column Order**: The order of columns in the result set matches the order specified in the `SELECT` clause. +- **Multi-column Expressions**: If a selection expression produces multiple columns, their order follows the order in the source relation.p. \ No newline at end of file diff --git a/src/UserGuide/Master/Table/SQL-Manual/overview_apache.md b/src/UserGuide/Master/Table/SQL-Manual/overview_apache.md index 0ef5cbc2e..e6b98ca16 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/overview_apache.md +++ b/src/UserGuide/Master/Table/SQL-Manual/overview_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_apache.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/Master/Table/SQL-Manual/overview_timecho.md b/src/UserGuide/Master/Table/SQL-Manual/overview_timecho.md index 19afdc1b8..d564f44c6 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/overview_timecho.md +++ b/src/UserGuide/Master/Table/SQL-Manual/overview_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_timecho.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md b/src/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md index dceab5681..3aa3cb94a 100644 --- a/src/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md +++ b/src/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md @@ -39,7 +39,7 @@ Administrators can enable/disable the whitelist function and add, modify, or del * Edit the configuration file `iotdb‑system.properties`. * Use the `set configuration` statement. - * Table model reference: [set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-update-configuration-items) + * Table model reference: [set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-update-configuration-items) Related parameters are as follows: @@ -60,7 +60,7 @@ Administrators can enable/disable the blacklist function and add, modify, or del * Edit the configuration file `iotdb‑system.properties`. * Use the `set configuration`statement. - * Table model reference:[set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-update-configuration-items) + * Table model reference:[set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-update-configuration-items) Related parameters are as follows: diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md index 106795a1d..f55a597b3 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md @@ -454,7 +454,7 @@ The supported operators are as follows: ### 3.1 Time Filter -Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type.md) . +Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type_apache.md) . An example is as follows: @@ -2948,7 +2948,7 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root #### Other points to note - For general aggregation queries, the timestamp is meaningless, and the convention is to use 0 to store. -- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md#Data Type Compatibility). +- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type_apache.md). - When the target time series does not exist, the system automatically creates it (including the database). - When the queried time series does not exist, or the queried sequence does not have data, the target time series will not be created automatically. diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md index 3f51a3045..72e365ab4 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md @@ -454,7 +454,7 @@ The supported operators are as follows: ### 3.1 Time Filter -Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type.md) . +Use time filters to filter data for a specific time range. For supported formats of timestamps, please refer to [Timestamp](../Background-knowledge/Data-Type_timecho.md) . An example is as follows: @@ -2948,7 +2948,7 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root #### Other points to note - For general aggregation queries, the timestamp is meaningless, and the convention is to use 0 to store. -- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md#Data Type Compatibility). +- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type_timecho.md). - When the target time series does not exist, the system automatically creates it (including the database). - When the queried time series does not exist, or the queried sequence does not have data, the target time series will not be created automatically. diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_apache.md b/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_apache.md index 35ebef8b5..d89fe30d3 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_apache.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_apache.md @@ -31,7 +31,7 @@ Writing a repeat timestamp covers the original timestamp data, which can be rega ### 1.1 Use of INSERT Statements -The [INSERT SQL statement](../SQL-Manual/SQL-Manual_apache#insert-data) statement is used to insert data into one or more specified timeseries created. For each point of data inserted, it consists of a [timestamp](../Basic-Concept/Operate-Metadata.md) and a sensor acquisition value (see [Data Type](../Background-knowledge/Data-Type.md)). +The [INSERT SQL statement](../SQL-Manual/SQL-Manual_apache#insert-data) statement is used to insert data into one or more specified timeseries created. For each point of data inserted, it consists of a [timestamp](../Basic-Concept/Operate-Metadata.md) and a sensor acquisition value (see [Data Type](../Background-knowledge/Data-Type_apache.md)). In the scenario of this section, take two timeseries `root.ln.wf02.wt02.status` and `root.ln.wf02.wt02.hardware` as an example, and their data types are BOOLEAN and TEXT, respectively. diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_timecho.md b/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_timecho.md index 380ab5037..ac2585dbd 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_timecho.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Write-Data_timecho.md @@ -31,7 +31,7 @@ Writing a repeat timestamp covers the original timestamp data, which can be rega ### 1.1 Use of INSERT Statements -The [INSERT SQL statement](../SQL-Manual/SQL-Manual_timecho#insert-data) statement is used to insert data into one or more specified timeseries created. For each point of data inserted, it consists of a [timestamp](../Basic-Concept/Operate-Metadata.md) and a sensor acquisition value (see [Data Type](../Background-knowledge/Data-Type.md)). +The [INSERT SQL statement](../SQL-Manual/SQL-Manual_timecho#insert-data) statement is used to insert data into one or more specified timeseries created. For each point of data inserted, it consists of a [timestamp](../Basic-Concept/Operate-Metadata.md) and a sensor acquisition value (see [Data Type](../Background-knowledge/Data-Type_timecho.md)). In the scenario of this section, take two timeseries `root.ln.wf02.wt02.status` and `root.ln.wf02.wt02.hardware` as an example, and their data types are BOOLEAN and TEXT, respectively. diff --git a/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands.md b/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands.md index b6d0988bd..fc3ebe560 100644 --- a/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands.md +++ b/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands.md @@ -1,3 +1,6 @@ +--- +redirectTo: Maintenance-commands_apache.html +--- -# Maintenance Statement - -## 1. Status Checking - -### 1.1 Viewing the Connected Model - -**Description**: Returns the current SQL dialect model (`Tree` or `Table`). - -**Syntax**: - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT; -``` - -**Result:** - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 1.2 Viewing the Cluster Version - -**Description**: Returns the current cluster version. - -**Syntax**: - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW VERSION; -``` - -**Result**: - -```Plain -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.3 Viewing Cluster Key Parameters - -**Description**: Returns key parameters of the current cluster. - -**Syntax**: - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -Key Parameters: - -1. **ClusterName**: The name of the current cluster. -2. **DataReplicationFactor**: Number of data replicas per DataRegion. -3. **SchemaReplicationFactor**: Number of schema replicas per SchemaRegion. -4. **DataRegionConsensusProtocolClass**: Consensus protocol class for DataRegions. -5. **SchemaRegionConsensusProtocolClass**: Consensus protocol class for SchemaRegions. -6. **ConfigNodeConsensusProtocolClass**: Consensus protocol class for ConfigNodes. -7. **TimePartitionOrigin**: The starting timestamp of database time partitions. -8. **TimePartitionInterval**: The interval of database time partitions (in milliseconds). -9. **ReadConsistencyLevel**: The consistency level for read operations. -10. **SchemaRegionPerDataNode**: Number of SchemaRegions per DataNode. -11. **DataRegionPerDataNode**: Number of DataRegions per DataNode. -12. **SeriesSlotNum**: Number of SeriesSlots per DataRegion. -13. **SeriesSlotExecutorClass**: Implementation class for SeriesSlots. -14. **DiskSpaceWarningThreshold**: Disk space warning threshold (in percentage). -15. **TimestampPrecision**: Timestamp precision. - -**Example**: - -```SQL -IoTDB> SHOW VARIABLES; -``` - -**Result**: - -```Plain -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.4 Viewing the Current Timestamp of Database - -**Description**: Returns the current timestamp of the database. - -**Syntax**: - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP; -``` - -**Result**: - -```Plain -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - -### 1.5 Viewing Executing Queries - -**Description**: Displays information about all currently executing queries. - -**Syntax**: - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**Parameters**: - -1. **WHERE Clause**: Filters the result set based on specified conditions. -2. **ORDER BY Clause**: Sorts the result set based on specified columns. -3. **limitOffsetClause**: Limits the number of rows returned. - 1. Format: `LIMIT , `. - -**Columns in QUERIES Table**: - -- **time**: Timestamp when the query started. -- **queryid**: Unique ID of the query. -- **datanodeid**: ID of the DataNode executing the query. -- **elapsedtime**: Time elapsed since the query started (in seconds). -- **statement**: The SQL statement being executed. - -**Example**: - -```SQL -IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 -``` - -**Result**: - -```SQL -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -| Time| QueryId|DataNodeId|ElapsedTime| Statement| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -``` - -### 1.6 Viewing Region Information - -**Description**: Displays regions' information of the current cluster. - -**Syntax**: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW REGIONS -``` - -**Result**: - -```SQL -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | -| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| -| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | -| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.7 Viewing Available Nodes - -**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. - -> This feature is supported starting from v2.0.8. - -**Syntax**: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -**Result**: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. Status Setting - -### 2.1 Setting the Connected Model - -**Description**: Sets the current SQL dialect model to `Tree` or `Table` which can be used in both tree and table models. - -**Syntax**: - -```SQL -SET SQL_DIALECT = (TABLE | TREE); -``` - -**Example**: - -```SQL -IoTDB> SET SQL_DIALECT=TREE; -IoTDB> SHOW CURRENT_SQL_DIALECT; -``` - -**Result**: - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 2.2 Updating Configuration Items - -**Description**: Updates configuration items. Changes take effect immediately without restarting if the items support hot modification. - -**Syntax**: - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**Parameters**: - -1. **propertyAssignments**: A list of properties to update. - 1. Format: `property (',' property)*`. - 2. Values: - - `DEFAULT`: Resets the configuration to its default value. - - `expression`: A specific value (must be a string). -2. **ON INTEGER_VALUE** **(Optional):** Specifies the node ID to update. - 1. If not specified or set to a negative value, updates all ConfigNodes and DataNodes. - -**Example**: - -```SQL -IoTDB> SET CONFIGURATION ‘disk_space_warning_threshold’='0.05',‘heartbeat_interval_in_ms’='1000' ON 1; -``` - -### 2.3 Loading Manually Modified Configuration Files - -**Description**: Loads manually modified configuration files and hot-loads the changes. Configuration items that support hot modification take effect immediately. - -**Syntax**: - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **localOrClusterMode** **(Optional):** - 1. Specifies the scope of configuration loading. - 2. Default: `CLUSTER`. - 3. Values: - - `LOCAL`: Loads configuration only on the DataNode directly connected to the client. - - `CLUSTER`: Loads configuration on all DataNodes in the cluster. - -**Example**: - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 Setting the System Status - -**Description**: Sets the system status to either `READONLY` or `RUNNING`. - -**Syntax**: - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **RUNNING |** **READONLY**: - 1. **RUNNING**: Sets the system to running mode, allowing both read and write operations. - 2. **READONLY**: Sets the system to read-only mode, allowing only read operations and prohibiting writes. -2. **localOrClusterMode** **(Optional):** - 1. **LOCAL**: Applies the status change only to the DataNode directly connected to the client. - 2. **CLUSTER**: Applies the status change to all DataNodes in the cluster. - 3. **Default**: `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - -## 3. Data Management - -### 3.1 Flushing Data from Memory to Disk - -**Description**: Flushes data from the memory table to disk. - -**Syntax**: - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **identifier** **(Optional):** - 1. Specifies the name of the path to flush. - 2. If not specified, all paths are flushed. - 3. **Multiple Paths**: Multiple path names can be specified, separated by commas (e.g., `FLUSH root.ln, root.lnm.**`). -2. **booleanValue** **(****Optional****)**: - 1. Specifies the type of data to flush. - 2. **TRUE**: Flushes only the sequential memory table. - 3. **FALSE**: Flushes only the unsequential MemTable. - 4. **Default**: Flushes both sequential and unsequential memory tables. -3. **localOrClusterMode** **(****Optional****)**: - 1. **ON LOCAL**: Flushes only the memory tables on the DataNode directly connected to the client. - 2. **ON CLUSTER**: Flushes memory tables on all DataNodes in the cluster. - 3. **Default:** `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> FLUSH root.ln TRUE ON LOCAL; -``` - -## 4. Data Repair - -### 4.1 Starting Background Scan and Repair of TsFiles - -**Description**: Starts a background task to scan and repair TsFiles, fixing issues such as timestamp disorder within data files. - -**Syntax**: - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **localOrClusterMode(Optional)**: - 1. **ON LOCAL**: Executes the repair task only on the DataNode directly connected to the client. - 2. **ON CLUSTER**: Executes the repair task on all DataNodes in the cluster. - 3. **Default:** `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 Pausing Background TsFile Repair Task - -**Description**: Pauses the background repair task. The paused task can be resumed by executing the `START REPAIR DATA` command again. - -**Syntax**: - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **localOrClusterMode** **(Optional):** - 1. **ON LOCAL**: Executes the pause command only on the DataNode directly connected to the client. - 2. **ON CLUSTER**: Executes the pause command on all DataNodes in the cluster. - 3. **Default:** `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. Query Termination - -### 5.1 Terminating Queries - -**Description**: Terminates one or more running queries. - -**Syntax**: - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**Parameters**: - -1. **QUERY** **queryId:** Specifies the ID of the query to terminate. - -- To obtain the `queryId`, use the `SHOW QUERIES` command. - -2. **ALL QUERIES:** Terminates all currently running queries. - -**Example**: - -Terminate a specific query: - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -``` - -Terminate all queries: - -```SQL -IoTDB> KILL ALL QUERIES; -``` \ No newline at end of file +--> \ No newline at end of file diff --git a/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands_apache.md b/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands_apache.md new file mode 100644 index 000000000..e72ce52fe --- /dev/null +++ b/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands_apache.md @@ -0,0 +1,548 @@ + +# Maintenance Statement + +## 1. Status Checking + +### 1.1 Viewing the Connected Model + +**Description**: Returns the current SQL dialect mode (`Tree` or `Table`). + +**Syntax**: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result:** + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 Viewing the Cluster Version + +**Description**: Returns the current cluster version. + +**Syntax**: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW VERSION; +``` + +**Result**: + +```Plain ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 Viewing Cluster Key Parameters + +**Description**: Returns key parameters of the current cluster. + +**Syntax**: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +Key Parameters: + +1. **ClusterName**: The name of the current cluster. +2. **DataReplicationFactor**: Number of data replicas per DataRegion. +3. **SchemaReplicationFactor**: Number of schema replicas per SchemaRegion. +4. **DataRegionConsensusProtocolClass**: Consensus protocol class for DataRegions. +5. **SchemaRegionConsensusProtocolClass**: Consensus protocol class for SchemaRegions. +6. **ConfigNodeConsensusProtocolClass**: Consensus protocol class for ConfigNodes. +7. **TimePartitionOrigin**: The starting timestamp of database time partitions. +8. **TimePartitionInterval**: The interval of database time partitions (in milliseconds). +9. **ReadConsistencyLevel**: The consistency level for read operations. +10. **SchemaRegionPerDataNode**: Number of SchemaRegions per DataNode. +11. **DataRegionPerDataNode**: Number of DataRegions per DataNode. +12. **SeriesSlotNum**: Number of SeriesSlots per DataRegion. +13. **SeriesSlotExecutorClass**: Implementation class for SeriesSlots. +14. **DiskSpaceWarningThreshold**: Disk space warning threshold (in percentage). +15. **TimestampPrecision**: Timestamp precision. + +**Example**: + +```SQL +IoTDB> SHOW VARIABLES; +``` + +**Result**: + +```Plain ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 Viewing the Current Timestamp of Database + +**Description**: Returns the current timestamp of the database. + +**Syntax**: + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP; +``` + +**Result**: + +```Plain ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 Viewing Executing Queries + +**Description**: Displays information about all currently executing queries. + +**Syntax**: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Parameters**: + +1. **WHERE Clause**: Filters the result set based on specified conditions. +2. **ORDER BY Clause**: Sorts the result set based on specified columns. +3. **limitOffsetClause**: Limits the number of rows returned. + 1. Format: `LIMIT , `. + +**Columns in QUERIES Table**: + +- **time**: Timestamp when the query started. +- **queryid**: Unique ID of the query. +- **datanodeid**: ID of the DataNode executing the query. +- **elapsedtime**: Time elapsed since the query started (in seconds). +- **statement**: The SQL statement being executed. + +**Example**: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +**Result**: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + +### 1.6 Viewing Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 Viewing Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8-beta. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Setting + +### 2.1 Setting the Connected Model + +**Description**: Sets the current SQL dialect mode to `Tree` or `Table` which can be used in both tree and table modes. + +**Syntax**: + +```SQL +SET SQL_DIALECT = (TABLE | TREE); +``` + +**Example**: + +```SQL +IoTDB> SET SQL_DIALECT=TREE; +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result**: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 Updating Configuration Items + +**Description**: Updates configuration items. Changes take effect immediately without restarting if the items support hot modification. + +**Syntax**: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Parameters**: + +1. **propertyAssignments**: A list of properties to update. + 1. Format: `property (',' property)*`. + 2. Values: + - `DEFAULT`: Resets the configuration to its default value. + - `expression`: A specific value (must be a string). +2. **ON INTEGER_VALUE** **(Optional):** Specifies the node ID to update. + 1. If not specified or set to a negative value, updates all ConfigNodes and DataNodes. + +**Example**: + +```SQL +IoTDB> SET CONFIGURATION ‘disk_space_warning_threshold’='0.05',‘heartbeat_interval_in_ms’='1000' ON 1; +``` + +### 2.3 Loading Manually Modified Configuration Files + +**Description**: Loads manually modified configuration files and hot-loads the changes. Configuration items that support hot modification take effect immediately. + +**Syntax**: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. Specifies the scope of configuration loading. + 2. Default: `CLUSTER`. + 3. Values: + - `LOCAL`: Loads configuration only on the DataNode directly connected to the client. + - `CLUSTER`: Loads configuration on all DataNodes in the cluster. + +**Example**: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Setting the System Status + +**Description**: Sets the system status to either `READONLY` or `RUNNING`. + +**Syntax**: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **RUNNING |** **READONLY**: + 1. **RUNNING**: Sets the system to running mode, allowing both read and write operations. + 2. **READONLY**: Sets the system to read-only mode, allowing only read operations and prohibiting writes. +2. **localOrClusterMode** **(Optional):** + 1. **LOCAL**: Applies the status change only to the DataNode directly connected to the client. + 2. **CLUSTER**: Applies the status change to all DataNodes in the cluster. + 3. **Default**: `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flushing Data from Memory to Disk + +**Description**: Flushes data from the memory table to disk. + +**Syntax**: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **identifier** **(Optional):** + 1. Specifies the name of the path to flush. + 2. If not specified, all paths are flushed. + 3. **Multiple Paths**: Multiple path names can be specified, separated by commas (e.g., `FLUSH root.ln, root.lnm.**`). +2. **booleanValue** **(****Optional****)**: + 1. Specifies the type of data to flush. + 2. **TRUE**: Flushes only the sequential memory table. + 3. **FALSE**: Flushes only the unsequential MemTable. + 4. **Default**: Flushes both sequential and unsequential memory tables. +3. **localOrClusterMode** **(****Optional****)**: + 1. **ON LOCAL**: Flushes only the memory tables on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Flushes memory tables on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Starting Background Scan and Repair of TsFiles + +**Description**: Starts a background task to scan and repair TsFiles, fixing issues such as timestamp disorder within data files. + +**Syntax**: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode(Optional)**: + 1. **ON LOCAL**: Executes the repair task only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the repair task on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pausing Background TsFile Repair Task + +**Description**: Pauses the background repair task. The paused task can be resumed by executing the `START REPAIR DATA` command again. + +**Syntax**: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. **ON LOCAL**: Executes the pause command only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the pause command on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Termination + +### 5.1 Terminating Queries + +**Description**: Terminates one or more running queries. + +**Syntax**: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Parameters**: + +1. **QUERY** **queryId:** Specifies the ID of the query to terminate. + +- To obtain the `queryId`, use the `SHOW QUERIES` command. + +2. **ALL QUERIES:** Terminates all currently running queries. + +**Example**: + +Terminate a specific query: + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; +``` + +Terminate all queries: + +```SQL +IoTDB> KILL ALL QUERIES; +``` \ No newline at end of file diff --git a/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands_timecho.md b/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands_timecho.md new file mode 100644 index 000000000..78ab71b7f --- /dev/null +++ b/src/UserGuide/Master/Tree/User-Manual/Maintenance-commands_timecho.md @@ -0,0 +1,548 @@ + +# Maintenance Statement + +## 1. Status Checking + +### 1.1 Viewing the Connected Model + +**Description**: Returns the current SQL dialect mode (`Tree` or `Table`). + +**Syntax**: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result:** + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 Viewing the Cluster Version + +**Description**: Returns the current cluster version. + +**Syntax**: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW VERSION; +``` + +**Result**: + +```Plain ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 Viewing Cluster Key Parameters + +**Description**: Returns key parameters of the current cluster. + +**Syntax**: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +Key Parameters: + +1. **ClusterName**: The name of the current cluster. +2. **DataReplicationFactor**: Number of data replicas per DataRegion. +3. **SchemaReplicationFactor**: Number of schema replicas per SchemaRegion. +4. **DataRegionConsensusProtocolClass**: Consensus protocol class for DataRegions. +5. **SchemaRegionConsensusProtocolClass**: Consensus protocol class for SchemaRegions. +6. **ConfigNodeConsensusProtocolClass**: Consensus protocol class for ConfigNodes. +7. **TimePartitionOrigin**: The starting timestamp of database time partitions. +8. **TimePartitionInterval**: The interval of database time partitions (in milliseconds). +9. **ReadConsistencyLevel**: The consistency level for read operations. +10. **SchemaRegionPerDataNode**: Number of SchemaRegions per DataNode. +11. **DataRegionPerDataNode**: Number of DataRegions per DataNode. +12. **SeriesSlotNum**: Number of SeriesSlots per DataRegion. +13. **SeriesSlotExecutorClass**: Implementation class for SeriesSlots. +14. **DiskSpaceWarningThreshold**: Disk space warning threshold (in percentage). +15. **TimestampPrecision**: Timestamp precision. + +**Example**: + +```SQL +IoTDB> SHOW VARIABLES; +``` + +**Result**: + +```Plain ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 Viewing the Current Timestamp of Database + +**Description**: Returns the current timestamp of the database. + +**Syntax**: + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP; +``` + +**Result**: + +```Plain ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 Viewing Executing Queries + +**Description**: Displays information about all currently executing queries. + +**Syntax**: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Parameters**: + +1. **WHERE Clause**: Filters the result set based on specified conditions. +2. **ORDER BY Clause**: Sorts the result set based on specified columns. +3. **limitOffsetClause**: Limits the number of rows returned. + 1. Format: `LIMIT , `. + +**Columns in QUERIES Table**: + +- **time**: Timestamp when the query started. +- **queryid**: Unique ID of the query. +- **datanodeid**: ID of the DataNode executing the query. +- **elapsedtime**: Time elapsed since the query started (in seconds). +- **statement**: The SQL statement being executed. + +**Example**: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +**Result**: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + +### 1.6 Viewing Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 Viewing Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Setting + +### 2.1 Setting the Connected Model + +**Description**: Sets the current SQL dialect mode to `Tree` or `Table` which can be used in both tree and table modes. + +**Syntax**: + +```SQL +SET SQL_DIALECT = (TABLE | TREE); +``` + +**Example**: + +```SQL +IoTDB> SET SQL_DIALECT=TREE; +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result**: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 Updating Configuration Items + +**Description**: Updates configuration items. Changes take effect immediately without restarting if the items support hot modification. + +**Syntax**: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Parameters**: + +1. **propertyAssignments**: A list of properties to update. + 1. Format: `property (',' property)*`. + 2. Values: + - `DEFAULT`: Resets the configuration to its default value. + - `expression`: A specific value (must be a string). +2. **ON INTEGER_VALUE** **(Optional):** Specifies the node ID to update. + 1. If not specified or set to a negative value, updates all ConfigNodes and DataNodes. + +**Example**: + +```SQL +IoTDB> SET CONFIGURATION ‘disk_space_warning_threshold’='0.05',‘heartbeat_interval_in_ms’='1000' ON 1; +``` + +### 2.3 Loading Manually Modified Configuration Files + +**Description**: Loads manually modified configuration files and hot-loads the changes. Configuration items that support hot modification take effect immediately. + +**Syntax**: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. Specifies the scope of configuration loading. + 2. Default: `CLUSTER`. + 3. Values: + - `LOCAL`: Loads configuration only on the DataNode directly connected to the client. + - `CLUSTER`: Loads configuration on all DataNodes in the cluster. + +**Example**: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Setting the System Status + +**Description**: Sets the system status to either `READONLY` or `RUNNING`. + +**Syntax**: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **RUNNING |** **READONLY**: + 1. **RUNNING**: Sets the system to running mode, allowing both read and write operations. + 2. **READONLY**: Sets the system to read-only mode, allowing only read operations and prohibiting writes. +2. **localOrClusterMode** **(Optional):** + 1. **LOCAL**: Applies the status change only to the DataNode directly connected to the client. + 2. **CLUSTER**: Applies the status change to all DataNodes in the cluster. + 3. **Default**: `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flushing Data from Memory to Disk + +**Description**: Flushes data from the memory table to disk. + +**Syntax**: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **identifier** **(Optional):** + 1. Specifies the name of the path to flush. + 2. If not specified, all paths are flushed. + 3. **Multiple Paths**: Multiple path names can be specified, separated by commas (e.g., `FLUSH root.ln, root.lnm.**`). +2. **booleanValue** **(****Optional****)**: + 1. Specifies the type of data to flush. + 2. **TRUE**: Flushes only the sequential memory table. + 3. **FALSE**: Flushes only the unsequential MemTable. + 4. **Default**: Flushes both sequential and unsequential memory tables. +3. **localOrClusterMode** **(****Optional****)**: + 1. **ON LOCAL**: Flushes only the memory tables on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Flushes memory tables on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Starting Background Scan and Repair of TsFiles + +**Description**: Starts a background task to scan and repair TsFiles, fixing issues such as timestamp disorder within data files. + +**Syntax**: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode(Optional)**: + 1. **ON LOCAL**: Executes the repair task only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the repair task on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pausing Background TsFile Repair Task + +**Description**: Pauses the background repair task. The paused task can be resumed by executing the `START REPAIR DATA` command again. + +**Syntax**: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. **ON LOCAL**: Executes the pause command only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the pause command on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Termination + +### 5.1 Terminating Queries + +**Description**: Terminates one or more running queries. + +**Syntax**: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Parameters**: + +1. **QUERY** **queryId:** Specifies the ID of the query to terminate. + +- To obtain the `queryId`, use the `SHOW QUERIES` command. + +2. **ALL QUERIES:** Terminates all currently running queries. + +**Example**: + +Terminate a specific query: + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; +``` + +Terminate all queries: + +```SQL +IoTDB> KILL ALL QUERIES; +``` \ No newline at end of file diff --git a/src/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md b/src/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md index eff3f0974..89805d8fd 100644 --- a/src/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md +++ b/src/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md @@ -65,7 +65,7 @@ The `ITableSession` interface defines basic operations for interacting with IoTD **Description of Object Data Type:** -Since V2.0.8-beta, the `iTableSession.insert(Tablet tablet)` interface supports splitting a single Object-class file into multiple segments and writing them sequentially in order. When the column data type in the Tablet data structure is **`TSDataType.Object`**, you need to use the following method to populate the Tablet: +Since V2.0.8, the `iTableSession.insert(Tablet tablet)` interface supports splitting a single Object-class file into multiple segments and writing them sequentially in order. When the column data type in the Tablet data structure is **`TSDataType.Object`**, you need to use the following method to populate the Tablet: ```Java /* diff --git a/src/UserGuide/latest-Table/Background-knowledge/Data-Type.md b/src/UserGuide/latest-Table/Background-knowledge/Data-Type.md index 3ebd7d16f..49bc408ab 100644 --- a/src/UserGuide/latest-Table/Background-knowledge/Data-Type.md +++ b/src/UserGuide/latest-Table/Background-knowledge/Data-Type.md @@ -1,3 +1,6 @@ +--- +redirectTo: Data-Type_apache.html +--- - -# Data Type - -## 1. Basic Data Types - -IoTDB supports the following ten data types: - -- **BOOLEAN** (Boolean value) -- **INT32** (32-bit integer) -- **INT64** (64-bit integer) -- **FLOAT** (Single-precision floating-point number) -- **DOUBLE** (Double-precision floating-point number) -- **TEXT** (Text data, suitable for long strings) -- **STRING** (String data with additional statistical information for optimized queries) -- **BLOB** (Large binary object) -- **OBJECT** (Large Binary Object) - > Supported since V2.0.8-beta -- **TIMESTAMP** (Timestamp, representing precise moments in time) -- **DATE** (Date, storing only calendar date information) - -The difference between **STRING** and **TEXT**: - -- **STRING** stores text data and includes additional statistical information to optimize value-filtering queries. -- **TEXT** is suitable for storing long text strings without additional query optimization. - -The differences between **OBJECT** and **BLOB** types are as follows: - -| | **OBJECT** | **BLOB** | -|----------------------|-------------------------------------------------------------------------------------------------------------------------|--------------------------------------| -| **Write Amplification** (Lower is better) | Low (Write amplification factor is always 1) | High (Write amplification factor = 2 + number of merges) | -| **Space Amplification** (Lower is better) | Low (Merge & release on write) | High (Merge on read and release on compact) | -| **Query Results** | When querying an OBJECT column by default, returns metadata like: `(Object) XX.XX KB`. Actual OBJECT data storage path: `${data_dir}/object_data`. Use `READ_OBJECT` function to retrieve raw content | Directly returns raw binary content | - -### 1.1 Floating-Point Precision Configuration - -For **FLOAT** and **DOUBLE** series using **RLE** or **TS_2DIFF** encoding, the number of decimal places can be set via the **MAX_POINT_NUMBER** attribute during series creation. - -For example: - -```SQL -CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; -``` - -If not specified, the system will use the configuration in the `iotdb-system.properties` file under the `float_precision` item (default is 2 decimal places). - -### 1.2 Data Type Compatibility - -If the written data type does not match the registered data type of a series: - -- **Incompatible types** → The system will issue an error. -- **Compatible types** → The system will automatically convert the written data type to match the registered type. - -The compatibility of data types is shown in the table below: - -| Registered Data Type | Compatible Write Data Types | -|:---------------------|:---------------------------------------| -| BOOLEAN | BOOLEAN | -| INT32 | INT32 | -| INT64 | INT32, INT64, TIMESTAMP | -| FLOAT | INT32, FLOAT | -| DOUBLE | INT32, INT64, FLOAT, DOUBLE, TIMESTAMP | -| TEXT | TEXT, STRING | -| STRING | TEXT, STRING | -| BLOB | TEXT, STRING, BLOB | -| OBJECT | OBJECT | -| TIMESTAMP | INT32, INT64, TIMESTAMP | -| DATE | DATE | - -## 2. Timestamp Types - -A timestamp represents the moment when data is recorded. IoTDB supports two types: - -- **Absolute timestamps**: Directly specify a point in time. -- **Relative timestamps**: Define time offsets from a reference point (e.g., `now()`). - -### 2.1 Absolute Timestamp - -IoTDB supports timestamps in two formats: - -1. **LONG**: Milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). -2. **DATETIME**: Human-readable date-time strings. (including **DATETIME-INPUT** and **DATETIME-DISPLAY** subcategories). - -When entering a timestamp, users can use either a LONG value or a DATETIME string. Supported input formats include: - -
- -**DATETIME-INPUT Type Supports Format** - - -| format | -| :--------------------------- | -| yyyy-MM-dd HH:mm:ss | -| yyyy/MM/dd HH:mm:ss | -| yyyy.MM.dd HH:mm:ss | -| yyyy-MM-dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ssZZ | -| yyyy.MM.dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSS | -| yyyy.MM.dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSSZZ | -| yyyy/MM/dd HH:mm:ss.SSSZZ | -| yyyy.MM.dd HH:mm:ss.SSSZZ | -| ISO8601 standard time format | - - -
- -> **Note:** `ZZ` represents a time zone offset (e.g., `+0800` for Beijing Time, `-0500` for Eastern Standard Time). - -IoTDB supports timestamp display in **LONG** format or **DATETIME-DISPLAY** format, allowing users to customize time output. - -
- -**Syntax for Custom Time Formats in DATETIME-DISPLAY** - - -| Symbol | Meaning | Presentation | Examples | -| :----: | :-------------------------: | :----------: | :--------------------------------: | -| G | era | era | era | -| C | century of era (>=0) | number | 20 | -| Y | year of era (>=0) | year | 1996 | -| | | | | -| x | weekyear | year | 1996 | -| w | week of weekyear | number | 27 | -| e | day of week | number | 2 | -| E | day of week | text | Tuesday; Tue | -| | | | | -| y | year | year | 1996 | -| D | day of year | number | 189 | -| M | month of year | month | July; Jul; 07 | -| d | day of month | number | 10 | -| | | | | -| a | halfday of day | text | PM | -| K | hour of halfday (0~11) | number | 0 | -| h | clockhour of halfday (1~12) | number | 12 | -| | | | | -| H | hour of day (0~23) | number | 0 | -| k | clockhour of day (1~24) | number | 24 | -| m | minute of hour | number | 30 | -| s | second of minute | number | 55 | -| S | fraction of second | millis | 978 | -| | | | | -| z | time zone | text | Pacific Standard Time; PST | -| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | -| | | | | -| ' | escape for text | delimiter | | -| '' | single quote | literal | ' | - -
- -### 2.2 Relative Timestamp - -Relative timestamps allow specifying time offsets from **now()** or a **DATETIME** reference. - -The formal definition is: - -```Plain -Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ -RelativeTime = (now() | DATETIME) ((+|-) Duration)+ -``` - -
- - **The syntax of the duration unit** - - - | Symbol | Meaning | Presentation | Examples | - | :----: | :---------: | :----------------------: | :------: | - | y | year | 1y=365 days | 1y | - | mo | month | 1mo=30 days | 1mo | - | w | week | 1w=7 days | 1w | - | d | day | 1d=1 day | 1d | - | | | | | - | h | hour | 1h=3600 seconds | 1h | - | m | minute | 1m=60 seconds | 1m | - | s | second | 1s=1 second | 1s | - | | | | | - | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | - | us | microsecond | 1us=1000 nanoseconds | 1us | - | ns | nanosecond | 1ns=1 nanosecond | 1ns | - -
- -**Examples:** - -```Plain -now() - 1d2h // A time 1 day and 2 hours earlier than the server time -now() - 1w // A time 1 week earlier than the server time -``` - -> **Note:** There must be spaces on both sides of `+` and `-` operators. \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Background-knowledge/Data-Type_apache.md b/src/UserGuide/latest-Table/Background-knowledge/Data-Type_apache.md new file mode 100644 index 000000000..3ebd7d16f --- /dev/null +++ b/src/UserGuide/latest-Table/Background-knowledge/Data-Type_apache.md @@ -0,0 +1,212 @@ + + +# Data Type + +## 1. Basic Data Types + +IoTDB supports the following ten data types: + +- **BOOLEAN** (Boolean value) +- **INT32** (32-bit integer) +- **INT64** (64-bit integer) +- **FLOAT** (Single-precision floating-point number) +- **DOUBLE** (Double-precision floating-point number) +- **TEXT** (Text data, suitable for long strings) +- **STRING** (String data with additional statistical information for optimized queries) +- **BLOB** (Large binary object) +- **OBJECT** (Large Binary Object) + > Supported since V2.0.8-beta +- **TIMESTAMP** (Timestamp, representing precise moments in time) +- **DATE** (Date, storing only calendar date information) + +The difference between **STRING** and **TEXT**: + +- **STRING** stores text data and includes additional statistical information to optimize value-filtering queries. +- **TEXT** is suitable for storing long text strings without additional query optimization. + +The differences between **OBJECT** and **BLOB** types are as follows: + +| | **OBJECT** | **BLOB** | +|----------------------|-------------------------------------------------------------------------------------------------------------------------|--------------------------------------| +| **Write Amplification** (Lower is better) | Low (Write amplification factor is always 1) | High (Write amplification factor = 2 + number of merges) | +| **Space Amplification** (Lower is better) | Low (Merge & release on write) | High (Merge on read and release on compact) | +| **Query Results** | When querying an OBJECT column by default, returns metadata like: `(Object) XX.XX KB`. Actual OBJECT data storage path: `${data_dir}/object_data`. Use `READ_OBJECT` function to retrieve raw content | Directly returns raw binary content | + +### 1.1 Floating-Point Precision Configuration + +For **FLOAT** and **DOUBLE** series using **RLE** or **TS_2DIFF** encoding, the number of decimal places can be set via the **MAX_POINT_NUMBER** attribute during series creation. + +For example: + +```SQL +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +If not specified, the system will use the configuration in the `iotdb-system.properties` file under the `float_precision` item (default is 2 decimal places). + +### 1.2 Data Type Compatibility + +If the written data type does not match the registered data type of a series: + +- **Incompatible types** → The system will issue an error. +- **Compatible types** → The system will automatically convert the written data type to match the registered type. + +The compatibility of data types is shown in the table below: + +| Registered Data Type | Compatible Write Data Types | +|:---------------------|:---------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32, INT64, TIMESTAMP | +| FLOAT | INT32, FLOAT | +| DOUBLE | INT32, INT64, FLOAT, DOUBLE, TIMESTAMP | +| TEXT | TEXT, STRING | +| STRING | TEXT, STRING | +| BLOB | TEXT, STRING, BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32, INT64, TIMESTAMP | +| DATE | DATE | + +## 2. Timestamp Types + +A timestamp represents the moment when data is recorded. IoTDB supports two types: + +- **Absolute timestamps**: Directly specify a point in time. +- **Relative timestamps**: Define time offsets from a reference point (e.g., `now()`). + +### 2.1 Absolute Timestamp + +IoTDB supports timestamps in two formats: + +1. **LONG**: Milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). +2. **DATETIME**: Human-readable date-time strings. (including **DATETIME-INPUT** and **DATETIME-DISPLAY** subcategories). + +When entering a timestamp, users can use either a LONG value or a DATETIME string. Supported input formats include: + +
+ +**DATETIME-INPUT Type Supports Format** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ +> **Note:** `ZZ` represents a time zone offset (e.g., `+0800` for Beijing Time, `-0500` for Eastern Standard Time). + +IoTDB supports timestamp display in **LONG** format or **DATETIME-DISPLAY** format, allowing users to customize time output. + +
+ +**Syntax for Custom Time Formats in DATETIME-DISPLAY** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 Relative Timestamp + +Relative timestamps allow specifying time offsets from **now()** or a **DATETIME** reference. + +The formal definition is: + +```Plain +Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ +RelativeTime = (now() | DATETIME) ((+|-) Duration)+ +``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ +**Examples:** + +```Plain +now() - 1d2h // A time 1 day and 2 hours earlier than the server time +now() - 1w // A time 1 week earlier than the server time +``` + +> **Note:** There must be spaces on both sides of `+` and `-` operators. \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Background-knowledge/Data-Type_timecho.md b/src/UserGuide/latest-Table/Background-knowledge/Data-Type_timecho.md new file mode 100644 index 000000000..e455155e6 --- /dev/null +++ b/src/UserGuide/latest-Table/Background-knowledge/Data-Type_timecho.md @@ -0,0 +1,212 @@ + + +# Data Type + +## 1. Basic Data Types + +IoTDB supports the following ten data types: + +- **BOOLEAN** (Boolean value) +- **INT32** (32-bit integer) +- **INT64** (64-bit integer) +- **FLOAT** (Single-precision floating-point number) +- **DOUBLE** (Double-precision floating-point number) +- **TEXT** (Text data, suitable for long strings) +- **STRING** (String data with additional statistical information for optimized queries) +- **BLOB** (Large binary object) +- **OBJECT** (Large Binary Object) + > Supported since V2.0.8 +- **TIMESTAMP** (Timestamp, representing precise moments in time) +- **DATE** (Date, storing only calendar date information) + +The difference between **STRING** and **TEXT**: + +- **STRING** stores text data and includes additional statistical information to optimize value-filtering queries. +- **TEXT** is suitable for storing long text strings without additional query optimization. + +The differences between **OBJECT** and **BLOB** types are as follows: + +| | **OBJECT** | **BLOB** | +|----------------------|-------------------------------------------------------------------------------------------------------------------------|--------------------------------------| +| **Write Amplification** (Lower is better) | Low (Write amplification factor is always 1) | High (Write amplification factor = 2 + number of merges) | +| **Space Amplification** (Lower is better) | Low (Merge & release on write) | High (Merge on read and release on compact) | +| **Query Results** | When querying an OBJECT column by default, returns metadata like: `(Object) XX.XX KB`. Actual OBJECT data storage path: `${data_dir}/object_data`. Use `READ_OBJECT` function to retrieve raw content | Directly returns raw binary content | + +### 1.1 Floating-Point Precision Configuration + +For **FLOAT** and **DOUBLE** series using **RLE** or **TS_2DIFF** encoding, the number of decimal places can be set via the **MAX_POINT_NUMBER** attribute during series creation. + +For example: + +```SQL +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +If not specified, the system will use the configuration in the `iotdb-system.properties` file under the `float_precision` item (default is 2 decimal places). + +### 1.2 Data Type Compatibility + +If the written data type does not match the registered data type of a series: + +- **Incompatible types** → The system will issue an error. +- **Compatible types** → The system will automatically convert the written data type to match the registered type. + +The compatibility of data types is shown in the table below: + +| Registered Data Type | Compatible Write Data Types | +|:---------------------|:---------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32, INT64, TIMESTAMP | +| FLOAT | INT32, FLOAT | +| DOUBLE | INT32, INT64, FLOAT, DOUBLE, TIMESTAMP | +| TEXT | TEXT, STRING | +| STRING | TEXT, STRING | +| BLOB | TEXT, STRING, BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32, INT64, TIMESTAMP | +| DATE | DATE | + +## 2. Timestamp Types + +A timestamp represents the moment when data is recorded. IoTDB supports two types: + +- **Absolute timestamps**: Directly specify a point in time. +- **Relative timestamps**: Define time offsets from a reference point (e.g., `now()`). + +### 2.1 Absolute Timestamp + +IoTDB supports timestamps in two formats: + +1. **LONG**: Milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). +2. **DATETIME**: Human-readable date-time strings. (including **DATETIME-INPUT** and **DATETIME-DISPLAY** subcategories). + +When entering a timestamp, users can use either a LONG value or a DATETIME string. Supported input formats include: + +
+ +**DATETIME-INPUT Type Supports Format** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ +> **Note:** `ZZ` represents a time zone offset (e.g., `+0800` for Beijing Time, `-0500` for Eastern Standard Time). + +IoTDB supports timestamp display in **LONG** format or **DATETIME-DISPLAY** format, allowing users to customize time output. + +
+ +**Syntax for Custom Time Formats in DATETIME-DISPLAY** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 Relative Timestamp + +Relative timestamps allow specifying time offsets from **now()** or a **DATETIME** reference. + +The formal definition is: + +```Plain +Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ +RelativeTime = (now() | DATETIME) ((+|-) Duration)+ +``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ +**Examples:** + +```Plain +now() - 1d2h // A time 1 day and 2 hours earlier than the server time +now() - 1w // A time 1 week earlier than the server time +``` + +> **Note:** There must be spaces on both sides of `+` and `-` operators. \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md b/src/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md index b3018d2f5..3f45018e6 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_apache.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md b/src/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md index 0a0fdb1f3..47976fb45 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_timecho.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md index d869ad08f..ef146a655 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -306,7 +306,7 @@ It costs 0.014s To avoid oversized Object write requests, values of **Object** type can be split into segments and written sequentially. In SQL, the `to_object(isEOF, offset, content)` function must be used for value insertion. -> Supported since V2.0.8-beta +> Supported since V2.0.8 **Syntax:** diff --git a/src/UserGuide/latest-Table/Reference/System-Config-Manual.md b/src/UserGuide/latest-Table/Reference/System-Config-Manual.md index c7006be42..660b55b42 100644 --- a/src/UserGuide/latest-Table/Reference/System-Config-Manual.md +++ b/src/UserGuide/latest-Table/Reference/System-Config-Manual.md @@ -1,3 +1,6 @@ +--- +redirectTo: System-Config-Manual_apache.html +--- -# Config Manual - -## 1. IoTDB Configuration Files - -The configuration files for IoTDB are located in the `conf` folder under the IoTDB installation directory. Key configuration files include: - -1. `confignode-env.sh` **/** `confignode-env.bat`: - 1. Environment configuration file for ConfigNode. - 2. Used to configure memory size and other environment settings for ConfigNode. -2. `datanode-env.sh` **/** `datanode-env.bat`: - 1. Environment configuration file for DataNode. - 2. Used to configure memory size and other environment settings for DataNode. -3. `iotdb-system.properties`: - 1. Main configuration file for IoTDB. - 2. Contains configurable parameters for IoTDB. -4. `iotdb-system.properties.template`: - 1. Template for the `iotdb-system.properties` file. - 2. Provides a reference for all available configuration parameters. - -## 2. Modify Configurations - -### 2.1 **Modify Existing Parameters**: - -- Parameters already present in the `iotdb-system.properties` file can be directly modified. - -### 2.2 **Adding New Parameters**: - -- For parameters not listed in `iotdb-system.properties`, you can find them in the `iotdb-system.properties.template` file. -- Copy the desired parameter from the template file to `iotdb-system.properties` and modify its value. - -### 2.3 Configuration Update Methods - -Different configuration parameters have different update methods, categorized as follows: - -1. **Modify before the first startup.**: - 1. These parameters can only be modified before the first startup of ConfigNode/DataNode. - 2. Modifying them after the first startup will prevent ConfigNode/DataNode from starting. -2. **Restart Required for Changes to Take Effect**: - 1. These parameters can be modified after ConfigNode/DataNode has started. - 2. However, a restart of ConfigNode/DataNode is required for the changes to take effect. -3. **Hot Reload**: - 1. These parameters can be modified while ConfigNode/DataNode is running. - 2. After modification, use the following SQL commands to apply the changes: - - `load configuration`: Reloads the configuration. - - `set configuration key1 = 'value1'`: Updates specific configuration parameters. - -## 3. Environment Parameters - -The environment configuration files (`confignode-env.sh/bat` and `datanode-env.sh/bat`) are used to configure Java environment parameters for ConfigNode and DataNode, such as JVM settings. These configurations are passed to the JVM when ConfigNode or DataNode starts. - -### 3.1 **confignode-env.sh/bat** - -- MEMORY_SIZE - -| Name | MEMORY_SIZE | -| ----------- | ------------------------------------------------------------ | -| Description | Memory size allocated when IoTDB ConfigNode starts. | -| Type | String | -| Default | Depends on the operating system and machine configuration. Defaults to 3/10 of the machine's memory, capped at 16G. | -| Effective | Restart required | - -- ON_HEAP_MEMORY - -| Name | ON_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | On-heap memory size available for IoTDB ConfigNode. Previously named `MAX_HEAP_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -- OFF_HEAP_MEMORY - -| Name | OFF_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | Off-heap memory size available for IoTDB ConfigNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -### 3.2 **datanode-env.sh/bat** - -- MEMORY_SIZE - -| Name | MEMORY_SIZE | -| ----------- | ------------------------------------------------------------ | -| Description | Memory size allocated when IoTDB DataNode starts. | -| Type | String | -| Default | Depends on the operating system and machine configuration. Defaults to 1/2 of the machine's memory. | -| Effective | Restart required | - -- ON_HEAP_MEMORY - -| Name | ON_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | On-heap memory size available for IoTDB DataNode. Previously named `MAX_HEAP_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -- OFF_HEAP_MEMORY - -| Name | OFF_HEAP_MEMORY | -| ----------- | ------------------------------------------------------------ | -| Description | Off-heap memory size available for IoTDB DataNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | -| Type | String | -| Default | Depends on the `MEMORY_SIZE` configuration. | -| Effective | Restart required | - -## 4. System Parameters (`iotdb-system.properties.template`) - -The `iotdb-system.properties` file contains various configurations for managing IoTDB clusters, nodes, replication, directories, monitoring, SSL, connections, object storage, tier management, and REST services. Below is a detailed breakdown of the parameters: - -### 4.1 Cluster Configuration - -- cluster_name - -| Name | cluster_name | -| ----------- | --------------------------------------------------------- | -| Description | Name of the cluster. | -| Type | String | -| Default | default_cluster | -| Effective | Use CLI: `set configuration cluster_name='xxx'`. | -| Note | Changes are distributed across nodes. Changes may not propagate to all nodes in case of network issues or node failures. Nodes that fail to update must manually modify `cluster_name` in their configuration files and restart. Under normal circumstances, it is not recommended to modify `cluster_name` by manually modifying configuration files or to perform hot-loading via `load configuration` method. | - -### 4.2 Seed ConfigNode - -- cn_seed_config_node - -| Name | cn_seed_config_node | -| ----------- | ------------------------------------------------------------ | -| Description | Address of the seed ConfigNode for Confignode to join the cluster. | -| Type | String | -| Default | 127.0.0.1:10710 | -| Effective | Modify before the first startup. | - -- dn_seed_config_node - -| Name | dn_seed_config_node | -| ----------- | ------------------------------------------------------------ | -| Description | Address of the seed ConfigNode for Datanode to join the cluster. | -| Type | String | -| Default | 127.0.0.1:10710 | -| Effective | Modify before the first startup. | - -### 4.3 Node RPC Configuration - -- cn_internal_address - -| Name | cn_internal_address | -| ----------- | ---------------------------------------------- | -| Description | Internal address for ConfigNode communication. | -| Type | String | -| Default | 127.0.0.1 | -| Effective | Modify before the first startup. | - -- cn_internal_port - -| Name | cn_internal_port | -| ----------- | ------------------------------------------- | -| Description | Port for ConfigNode internal communication. | -| Type | Short Int : [0,65535] | -| Default | 10710 | -| Effective | Modify before the first startup. | - -- cn_consensus_port - -| Name | cn_consensus_port | -| ----------- | ----------------------------------------------------- | -| Description | Port for ConfigNode consensus protocol communication. | -| Type | Short Int : [0,65535] | -| Default | 10720 | -| Effective | Modify before the first startup. | - -- dn_rpc_address - -| Name | dn_rpc_address | -| ----------- |---------------------------------| -| Description | Address for client RPC service. | -| Type | String | -| Default | 127.0.0.1 | -| Effective | Restart required. | - -- dn_rpc_port - -| Name | dn_rpc_port | -| ----------- | ---------------------------- | -| Description | Port for client RPC service. | -| Type | Short Int : [0,65535] | -| Default | 6667 | -| Effective | Restart required. | - -- dn_internal_address - -| Name | dn_internal_address | -| ----------- | -------------------------------------------- | -| Description | Internal address for DataNode communication. | -| Type | string | -| Default | 127.0.0.1 | -| Effective | Modify before the first startup. | - -- dn_internal_port - -| Name | dn_internal_port | -| ----------- | ----------------------------------------- | -| Description | Port for DataNode internal communication. | -| Type | int | -| Default | 10730 | -| Effective | Modify before the first startup. | - -- dn_mpp_data_exchange_port - -| Name | dn_mpp_data_exchange_port | -| ----------- | -------------------------------- | -| Description | Port for MPP data exchange. | -| Type | int | -| Default | 10740 | -| Effective | Modify before the first startup. | - -- dn_schema_region_consensus_port - -| Name | dn_schema_region_consensus_port | -| ----------- | ------------------------------------------------------------ | -| Description | Port for Datanode SchemaRegion consensus protocol communication. | -| Type | int | -| Default | 10750 | -| Effective | Modify before the first startup. | - -- dn_data_region_consensus_port - -| Name | dn_data_region_consensus_port | -| ----------- | ------------------------------------------------------------ | -| Description | Port for Datanode DataRegion consensus protocol communication. | -| Type | int | -| Default | 10760 | -| Effective | Modify before the first startup. | - -- dn_join_cluster_retry_interval_ms - -| Name | dn_join_cluster_retry_interval_ms | -| ----------- | --------------------------------------------------- | -| Description | Interval for DataNode to retry joining the cluster. | -| Type | long | -| Default | 5000 | -| Effective | Restart required. | - -### 4.4 Replication configuration - -- config_node_consensus_protocol_class - -| Name | config_node_consensus_protocol_class | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol for ConfigNode replication, only supports RatisConsensus | -| Type | String | -| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | -| Effective | Modify before the first startup. | - -- schema_replication_factor - -| Name | schema_replication_factor | -| ----------- | ------------------------------------------------------------ | -| Description | Default schema replication factor for databases. | -| Type | int32 | -| Default | 1 | -| Effective | Restart required. Takes effect on the new database after restarting. | - -- schema_region_consensus_protocol_class - -| Name | schema_region_consensus_protocol_class | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol for schema region replication. Only supports RatisConsensus when multi-replications. | -| Type | String | -| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | -| Effective | Modify before the first startup. | - -- data_replication_factor - -| Name | data_replication_factor | -| ----------- | ------------------------------------------------------------ | -| Description | Default data replication factor for databases. | -| Type | int32 | -| Default | 1 | -| Effective | Restart required. Takes effect on the new database after restarting. | - -- data_region_consensus_protocol_class - -| Name | data_region_consensus_protocol_class | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol for data region replication. Supports IoTConsensus or RatisConsensus when multi-replications. | -| Type | String | -| Default | org.apache.iotdb.consensus.iot.IoTConsensus | -| Effective | Modify before the first startup. | - -### 4.5 Directory configuration - -- cn_system_dir - -| Name | cn_system_dir | -| ----------- | ----------------------------------------------------------- | -| Description | System data storage path for ConfigNode. | -| Type | String | -| Default | data/confignode/system(Windows:data\\configndoe\\system) | -| Effective | Restart required | - -- cn_consensus_dir - -| Name | cn_consensus_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus protocol data storage path for ConfigNode. | -| Type | String | -| Default | data/confignode/consensus(Windows:data\\configndoe\\consensus) | -| Effective | Restart required | - -- cn_pipe_receiver_file_dir - -| Name | cn_pipe_receiver_file_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for pipe receiver files in ConfigNode. | -| Type | String | -| Default | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | -| Effective | Restart required | - -- dn_system_dir - -| Name | dn_system_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Schema storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/system(Windows:data\\datanode\\system) | -| Effective | Restart required | - -- dn_data_dirs - -| Name | dn_data_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Data storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/data(Windows:data\\datanode\\data) | -| Effective | Restart required | - -- dn_multi_dir_strategy - -| Name | dn_multi_dir_strategy | -| ----------- | ------------------------------------------------------------ | -| Description | The strategy used by IoTDB to select directories in `data_dirs` for TsFiles. You can use either the simple class name or the fully qualified class name. The system provides the following two strategies: 1. SequenceStrategy: IoTDB selects directories sequentially, iterating through all directories in `data_dirs` in a round-robin manner. 2. MaxDiskUsableSpaceFirstStrategy IoTDB prioritizes the directory in `data_dirs` with the largest disk free space. To implement a custom strategy: 1. Inherit the `org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy `class and implement your own strategy method. 2. Fill in the configuration item with the fully qualified class name of your implementation (package name + class name, e.g., `UserDefineStrategyPackage`). 3. Add the JAR file containing your custom class to the project. | -| Type | String | -| Default | SequenceStrategy | -| Effective | Hot reload. | - -- dn_consensus_dir - -| Name | dn_consensus_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Consensus log storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/consensus(Windows:data\\datanode\\consensus) | -| Effective | Restart required | - -- dn_wal_dirs - -| Name | dn_wal_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Write-ahead log (WAL) storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/wal(Windows:data\\datanode\\wal) | -| Effective | Restart required | - -- dn_tracing_dir - -| Name | dn_tracing_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Tracing root directory for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | datanode/tracing(Windows:datanode\\tracing) | -| Effective | Restart required | - -- dn_sync_dir - -| Name | dn_sync_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Sync storage path for DataNode.By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | -| Type | String | -| Default | data/datanode/sync(Windows:data\\datanode\\sync) | -| Effective | Restart required | - -- sort_tmp_dir - -| Name | sort_tmp_dir | -| ----------- | ------------------------------------------------- | -| Description | Temporary directory for sorting operations. | -| Type | String | -| Default | data/datanode/tmp(Windows:data\\datanode\\tmp) | -| Effective | Restart required | - -- dn_pipe_receiver_file_dirs - -| Name | dn_pipe_receiver_file_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for pipe receiver files in DataNode. | -| Type | String | -| Default | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | -| Effective | Restart required | - -- iot_consensus_v2_receiver_file_dirs - -| Name | iot_consensus_v2_receiver_file_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for IoTConsensus V2 receiver files. | -| Type | String | -| Default | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | -| Effective | Restart required | - -- iot_consensus_v2_deletion_file_dir - -| Name | iot_consensus_v2_deletion_file_dir | -| ----------- | ------------------------------------------------------------ | -| Description | Directory for IoTConsensus V2 deletion files. | -| Type | String | -| Default | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | -| Effective | Restart required | - -### 4.6 Metric Configuration - -- cn_metric_reporter_list - -| Name | cn_metric_reporter_list | -| ----------- | ----------------------------------------- | -| Description | Systems for reporting ConfigNode metrics. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- cn_metric_level - -| Name | cn_metric_level | -| ----------- | --------------------------------------- | -| Description | Level of detail for ConfigNode metrics. | -| Type | String | -| Default | IMPORTANT | -| Effective | Restart required. | - -- cn_metric_async_collect_period - -| Name | cn_metric_async_collect_period | -| ----------- | ------------------------------------------------------------ | -| Description | Period for asynchronous metric collection in ConfigNode (in seconds). | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- cn_metric_prometheus_reporter_port - -| Name | cn_metric_prometheus_reporter_port | -| ----------- | --------------------------------------------------- | -| Description | Port for Prometheus metric reporting in ConfigNode. | -| Type | int | -| Default | 9091 | -| Effective | Restart required. | - -- dn_metric_reporter_list - -| Name | dn_metric_reporter_list | -| ----------- | --------------------------------------- | -| Description | Systems for reporting DataNode metrics. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- dn_metric_level - -| Name | dn_metric_level | -| ----------- | ------------------------------------- | -| Description | Level of detail for DataNode metrics. | -| Type | String | -| Default | IMPORTANT | -| Effective | Restart required. | - -- dn_metric_async_collect_period - -| Name | dn_metric_async_collect_period | -| ----------- | ------------------------------------------------------------ | -| Description | Period for asynchronous metric collection in DataNode (in seconds). | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- dn_metric_prometheus_reporter_port - -| Name | dn_metric_prometheus_reporter_port | -| ----------- | ------------------------------------------------- | -| Description | Port for Prometheus metric reporting in DataNode. | -| Type | int | -| Default | 9092 | -| Effective | Restart required. | - -- dn_metric_internal_reporter_type - -| Name | dn_metric_internal_reporter_type | -| ----------- | ------------------------------------------------------------ | -| Description | Internal reporter types for DataNode metrics. For internal monitoring and checking that the data has been successfully written and refreshed. | -| Type | String | -| Default | IOTDB | -| Effective | Restart required. | - -### 4.7 SSL Configuration - -- enable_thrift_ssl - -| Name | enable_thrift_ssl | -| ----------- | --------------------------------------------- | -| Description | Enables SSL encryption for RPC communication. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- enable_https - -| Name | enable_https | -| ----------- | ------------------------------ | -| Description | Enables SSL for REST services. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- key_store_path - -| Name | key_store_path | -| ----------- | ---------------------------- | -| Description | Path to the SSL certificate. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- key_store_pwd - -| Name | key_store_pwd | -| ----------- | --------------------------------- | -| Description | Password for the SSL certificate. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -### 4.8 Connection Configuration - -- cn_rpc_thrift_compression_enable - -| Name | cn_rpc_thrift_compression_enable | -| ----------- | ----------------------------------- | -| Description | Enables Thrift compression for RPC. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- cn_rpc_max_concurrent_client_num - -| Name | cn_rpc_max_concurrent_client_num | -| ----------- |-------------------------------------------| -| Description | Maximum number of concurrent RPC clients. | -| Type | int | -| Default | 3000 | -| Effective | Restart required. | - -- cn_connection_timeout_ms - -| Name | cn_connection_timeout_ms | -| ----------- | ---------------------------------------------------- | -| Description | Connection timeout for ConfigNode (in milliseconds). | -| Type | int | -| Default | 60000 | -| Effective | Restart required. | - -- cn_selector_thread_nums_of_client_manager - -| Name | cn_selector_thread_nums_of_client_manager | -| ----------- | ------------------------------------------------------------ | -| Description | Number of selector threads for client management in ConfigNode. | -| Type | int | -| Default | 1 | -| Effective | Restart required. | - -- cn_max_client_count_for_each_node_in_client_manager - -| Name | cn_max_client_count_for_each_node_in_client_manager | -| ----------- | ------------------------------------------------------ | -| Description | Maximum clients per node in ConfigNode client manager. | -| Type | int | -| Default | 300 | -| Effective | Restart required. | - -- dn_session_timeout_threshold - -| Name | dn_session_timeout_threshold | -| ----------- | ---------------------------------------- | -| Description | Maximum idle time for DataNode sessions. | -| Type | int | -| Default | 0 | -| Effective | Restart required.t required. | - -- dn_rpc_thrift_compression_enable - -| Name | dn_rpc_thrift_compression_enable | -| ----------- | -------------------------------------------- | -| Description | Enables Thrift compression for DataNode RPC. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- dn_rpc_advanced_compression_enable - -| Name | dn_rpc_advanced_compression_enable | -| ----------- | ----------------------------------------------------- | -| Description | Enables advanced Thrift compression for DataNode RPC. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- dn_rpc_selector_thread_count - -| Name | rpc_selector_thread_count | -| ----------- | -------------------------------------------- | -| Description | Number of selector threads for DataNode RPC. | -| Type | int | -| Default | 1 | -| Effective | Restart required.t required. | - -- dn_rpc_min_concurrent_client_num - -| Name | rpc_min_concurrent_client_num | -| ----------- | ------------------------------------------------------ | -| Description | Minimum number of concurrent RPC clients for DataNode. | -| Type | Short Int : [0,65535] | -| Default | 1 | -| Effective | Restart required. | - -- dn_rpc_max_concurrent_client_num - -| Name | dn_rpc_max_concurrent_client_num | -| ----------- |--------------------------------------------------------| -| Description | Maximum number of concurrent RPC clients for DataNode. | -| Type | Short Int : [0,65535] | -| Default | 1000 | -| Effective | Restart required. | - -- dn_thrift_max_frame_size - -| Name | dn_thrift_max_frame_size | -| ----------- |------------------------------------------------| -| Description | Maximum frame size for RPC requests/responses. | -| Type | long | -| Default | 536870912 (Default 512MB) | -| Effective | Restart required. | - -- dn_thrift_init_buffer_size - -| Name | dn_thrift_init_buffer_size | -| ----------- | ----------------------------------- | -| Description | Initial buffer size for Thrift RPC. | -| Type | long | -| Default | 1024 | -| Effective | Restart required. | - -- dn_connection_timeout_ms - -| Name | dn_connection_timeout_ms | -| ----------- | -------------------------------------------------- | -| Description | Connection timeout for DataNode (in milliseconds). | -| Type | int | -| Default | 60000 | -| Effective | Restart required. | - -- dn_selector_thread_count_of_client_manager - -| Name | dn_selector_thread_count_of_client_manager | -| ----------- | ------------------------------------------------------------ | -| Description | selector thread (TAsyncClientManager) nums for async thread in a clientManager | -| Type | int | -| Default | 1 | -| Effective | Restart required.t required. | - -- dn_max_client_count_for_each_node_in_client_manager - -| Name | dn_max_client_count_for_each_node_in_client_manager | -| ----------- | --------------------------------------------------- | -| Description | Maximum clients per node in DataNode clientmanager. | -| Type | int | -| Default | 300 | -| Effective | Restart required. | - -### 4.9 Object storage management - -- remote_tsfile_cache_dirs - -| Name | remote_tsfile_cache_dirs | -| ----------- | ---------------------------------------- | -| Description | Local cache directory for cloud storage. | -| Type | String | -| Default | data/datanode/data/cache | -| Effective | Restart required. | - -- remote_tsfile_cache_page_size_in_kb - -| Name | remote_tsfile_cache_page_size_in_kb | -| ----------- | --------------------------------------------- | -| Description | Block size for cached files in cloud storage. | -| Type | int | -| Default | 20480 | -| Effective | Restart required. | - -- remote_tsfile_cache_max_disk_usage_in_mb - -| Name | remote_tsfile_cache_max_disk_usage_in_mb | -| ----------- | ------------------------------------------- | -| Description | Maximum disk usage for cloud storage cache. | -| Type | long | -| Default | 51200 | -| Effective | Restart required. | - -- object_storage_type - -| Name | object_storage_type | -| ----------- | ---------------------- | -| Description | Type of cloud storage. | -| Type | String | -| Default | AWS_S3 | -| Effective | Restart required. | - -- object_storage_endpoint - -| Name | object_storage_endpoint | -| ----------- | --------------------------- | -| Description | Endpoint for cloud storage. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- object_storage_bucket - -| Name | object_storage_bucket | -| ----------- | ------------------------------ | -| Description | Bucket name for cloud storage. | -| Type | String | -| Default | iotdb_data | -| Effective | Restart required. | - -- object_storage_access_key - -| Name | object_storage_access_key | -| ----------- | ----------------------------- | -| Description | Access key for cloud storage. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -- object_storage_access_secret - -| Name | object_storage_access_secret | -| ----------- | -------------------------------- | -| Description | Access secret for cloud storage. | -| Type | String | -| Default | None | -| Effective | Restart required. | - -### 4.10 Tier management - -- dn_default_space_usage_thresholds - -| Name | dn_default_space_usage_thresholds | -| ----------- | ------------------------------------------------------------ | -| Description | Disk usage threshold, data will be moved to the next tier when the usage of the tier is higher than this threshold.If tiered storage is enabled, please separate thresholds of different tiers by semicolons ";". | -| Type | double | -| Default | 0.85 | -| Effective | Hot reload. | - -- dn_tier_full_policy - -| Name | dn_tier_full_policy | -| ----------- | ------------------------------------------------------------ | -| Description | How to deal with the last tier's data when its used space has been higher than its dn_default_space_usage_thresholds. | -| Type | String | -| Default | NULL | -| Effective | Hot reload. | - -- migrate_thread_count - -| Name | migrate_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | thread pool size for migrate operation in the DataNode's data directories. | -| Type | int | -| Default | 1 | -| Effective | Hot reload. | - -- tiered_storage_migrate_speed_limit_bytes_per_sec - -| Name | tiered_storage_migrate_speed_limit_bytes_per_sec | -| ----------- | ------------------------------------------------------------ | -| Description | The migrate speed limit of different tiers can reach per second | -| Type | int | -| Default | 10485760 | -| Effective | Hot reload. | - -### 4.11 REST Service Configuration - -- enable_rest_service - -| Name | enable_rest_service | -| ----------- | --------------------------- | -| Description | Is the REST service enabled | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- rest_service_port - -| Name | rest_service_port | -| ----------- | ------------------------------------ | -| Description | the binding port of the REST service | -| Type | int32 | -| Default | 18080 | -| Effective | Restart required. | - -- enable_swagger - -| Name | enable_swagger | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to display rest service interface information through swagger. eg: http://ip:port/swagger.json | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- rest_query_default_row_size_limit - -| Name | rest_query_default_row_size_limit | -| ----------- | ------------------------------------------------------------ | -| Description | the default row limit to a REST query response when the rowSize parameter is not given in request | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- cache_expire_in_seconds - -| Name | cache_expire_in_seconds | -| ----------- | ------------------------------------------------------------ | -| Description | The expiration time of the user login information cache (in seconds) | -| Type | int32 | -| Default | 28800 | -| Effective | Restart required. | - -- cache_max_num - -| Name | cache_max_num | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of users can be stored in the user login cache. | -| Type | int32 | -| Default | 100 | -| Effective | Restart required. | - -- cache_init_num - -| Name | cache_init_num | -| ----------- | ------------------------------------------------------------ | -| Description | The initial capacity of users can be stored in the user login cache. | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- client_auth - -| Name | client_auth | -| ----------- | --------------------------------- | -| Description | Is client authentication required | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- trust_store_path - -| Name | trust_store_path | -| ----------- | -------------------- | -| Description | SSL trust store path | -| Type | String | -| Default | "" | -| Effective | Restart required. | - -- trust_store_pwd - -| Name | trust_store_pwd | -| ----------- | ------------------------- | -| Description | SSL trust store password. | -| Type | String | -| Default | "" | -| Effective | Restart required. | - -- idle_timeout_in_seconds - -| Name | idle_timeout_in_seconds | -| ----------- | ------------------------ | -| Description | SSL timeout (in seconds) | -| Type | int32 | -| Default | 5000 | -| Effective | Restart required. | - -### 4.12 Load balancing configuration - -- series_slot_num - -| Name | series_slot_num | -| ----------- | ------------------------------------------- | -| Description | Number of SeriesPartitionSlots per Database | -| Type | int32 | -| Default | 10000 | -| Effective | Modify before the first startup. | - -- series_partition_executor_class - -| Name | series_partition_executor_class | -| ----------- | ------------------------------------------------------------ | -| Description | SeriesPartitionSlot executor class | -| Type | String | -| Default | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | -| Effective | Modify before the first startup. | - -- schema_region_group_extension_policy - -| Name | schema_region_group_extension_policy | -| ----------- | ------------------------------------------------------------ | -| Description | The policy of extension SchemaRegionGroup for each Database. | -| Type | string | -| Default | AUTO | -| Effective | Restart required. | - -- default_schema_region_group_num_per_database - -| Name | default_schema_region_group_num_per_database | -| ----------- | ------------------------------------------------------------ | -| Description | When set schema_region_group_extension_policy=CUSTOM, this parameter is the default number of SchemaRegionGroups for each Database.When set schema_region_group_extension_policy=AUTO, this parameter is the default minimal number of SchemaRegionGroups for each Database. | -| Type | int | -| Default | 1 | -| Effective | Restart required. | - -- schema_region_per_data_node - -| Name | schema_region_per_data_node | -| ----------- | ------------------------------------------------------------ | -| Description | It only takes effect when set schema_region_group_extension_policy=AUTO.This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode. | -| Type | double | -| Default | 1.0 | -| Effective | Restart required. | - -- data_region_group_extension_policy - -| Name | data_region_group_extension_policy | -| ----------- | ---------------------------------------------------------- | -| Description | The policy of extension DataRegionGroup for each Database. | -| Type | string | -| Default | AUTO | -| Effective | Restart required. | - -- default_data_region_group_num_per_database - -| Name | default_data_region_group_per_database | -| ----------- | ------------------------------------------------------------ | -| Description | When set data_region_group_extension_policy=CUSTOM, this parameter is the default number of DataRegionGroups for each Database.When set data_region_group_extension_policy=AUTO, this parameter is the default minimal number of DataRegionGroups for each Database. | -| Type | int | -| Default | 2 | -| Effective | Restart required. | - -- data_region_per_data_node - -| Name | data_region_per_data_node | -| ----------- | ------------------------------------------------------------ | -| Description | It only takes effect when set data_region_group_extension_policy=AUTO.This parameter is the maximum number of DataRegions expected to be managed by each DataNode. | -| Type | double | -| Default | 5.0 | -| Effective | Restart required. | - -- enable_auto_leader_balance_for_ratis_consensus - -| Name | enable_auto_leader_balance_for_ratis_consensus | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable auto leader balance for Ratis consensus protocol. | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- enable_auto_leader_balance_for_iot_consensus - -| Name | enable_auto_leader_balance_for_iot_consensus | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable auto leader balance for IoTConsensus protocol. | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -### 4.13 Cluster management - -- time_partition_origin - -| Name | time_partition_origin | -| ----------- | ------------------------------------------------------------ | -| Description | Time partition origin in milliseconds, default is equal to zero. | -| Type | Long | -| Unit | ms | -| Default | 0 | -| Effective | Modify before the first startup. | - -- time_partition_interval - -| Name | time_partition_interval | -| ----------- | ------------------------------------------------------------ | -| Description | Time partition interval in milliseconds, and partitioning data inside each data region, default is equal to one week | -| Type | Long | -| Unit | ms | -| Default | 604800000 | -| Effective | Modify before the first startup. | - -- heartbeat_interval_in_ms - -| Name | heartbeat_interval_in_ms | -| ----------- | -------------------------------------- | -| Description | The heartbeat interval in milliseconds | -| Type | Long | -| Unit | ms | -| Default | 1000 | -| Effective | Restart required. | - -- disk_space_warning_threshold - -| Name | disk_space_warning_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | Disk remaining threshold at which DataNode is set to ReadOnly status | -| Type | double(percentage) | -| Default | 0.05 | -| Effective | Restart required. | - -### 4.14 Memory Control Configuration - -- datanode_memory_proportion - -| Name | datanode_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Memory Allocation Ratio: StorageEngine, QueryEngine, SchemaEngine, Consensus, StreamingEngine and Free Memory. | -| Type | Ratio | -| Default | 3:3:1:1:1:1 | -| Effective | Restart required. | - -- schema_memory_proportion - -| Name | schema_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, and PartitionCache. | -| Type | Ratio | -| Default | 5:4:1 | -| Effective | Restart required. | - -- storage_engine_memory_proportion - -| Name | storage_engine_memory_proportion | -| ----------- | ----------------------------------------------------------- | -| Description | Memory allocation ratio in StorageEngine: Write, Compaction | -| Type | Ratio | -| Default | 8:2 | -| Effective | Restart required. | - -- write_memory_proportion - -| Name | write_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Memory allocation ratio in writing: Memtable, TimePartitionInfo | -| Type | Ratio | -| Default | 19:1 | -| Effective | Restart required. | - -- primitive_array_size - -| Name | primitive_array_size | -| ----------- | --------------------------------------------------------- | -| Description | primitive array size (length of each array) in array pool | -| Type | int32 | -| Default | 64 | -| Effective | Restart required. | - -- chunk_metadata_size_proportion - -| Name | chunk_metadata_size_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of compaction memory for chunk metadata maintains in memory when doing compaction | -| Type | Double | -| Default | 0.1 | -| Effective | Restart required. | - -- flush_proportion - -| Name | flush_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory for invoking flush disk, 0.4 by defaultIf you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 | -| Type | Double | -| Default | 0.4 | -| Effective | Restart required. | - -- buffered_arrays_memory_proportion - -| Name | buffered_arrays_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory allocated for buffered arrays, 0.6 by default | -| Type | Double | -| Default | 0.6 | -| Effective | Restart required. | - -- reject_proportion - -| Name | reject_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory for rejecting insertion, 0.8 by defaultIf you have extremely high write load (like batch=1000) and the physical memory size is large enough, it can be set higher than the default value like 0.9 | -| Type | Double | -| Default | 0.8 | -| Effective | Restart required. | - -- device_path_cache_proportion - -| Name | device_path_cache_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Ratio of memtable memory for the DevicePathCache. DevicePathCache is the deviceId cache, keeping only one copy of the same deviceId in memory | -| Type | Double | -| Default | 0.05 | -| Effective | Restart required. | - -- write_memory_variation_report_proportion - -| Name | write_memory_variation_report_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | If memory cost of data region increased more than proportion of allocated memory for writing, report to system. The default value is 0.001 | -| Type | Double | -| Default | 0.001 | -| Effective | Restart required. | - -- check_period_when_insert_blocked - -| Name | check_period_when_insert_blocked | -| ----------- | ------------------------------------------------------------ | -| Description | When an insertion is rejected, the waiting period (in ms) to check system again, 50 by default.If the insertion has been rejected and the read load is low, it can be set larger. | -| Type | int32 | -| Default | 50 | -| Effective | Restart required. | - -- io_task_queue_size_for_flushing - -| Name | io_task_queue_size_for_flushing | -| ----------- | -------------------------------------------- | -| Description | size of ioTaskQueue. The default value is 10 | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- enable_query_memory_estimation - -| Name | enable_query_memory_estimation | -| ----------- | ------------------------------------------------------------ | -| Description | If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory | -| Type | bool | -| Default | true | -| Effective | Hot reload. | - -### 4.15 Schema Engine Configuration - -- schema_engine_mode - -| Name | schema_engine_mode | -| ----------- | ------------------------------------------------------------ | -| Description | The schema management mode of schema engine. Currently, support Memory and PBTree.This config of all DataNodes in one cluster must keep same. | -| Type | string | -| Default | Memory | -| Effective | Modify before the first startup. | - -- partition_cache_size - -| Name | partition_cache_size | -| ----------- | ------------------------- | -| Description | cache size for partition. | -| Type | Int32 | -| Default | 1000 | -| Effective | Restart required. | - -- sync_mlog_period_in_ms - -| Name | sync_mlog_period_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The cycle when metadata log is periodically forced to be written to disk(in milliseconds)If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshmentSetting this parameter to 0 may slow down the operation on slow disk. | -| Type | Int64 | -| Default | 100 | -| Effective | Restart required. | - -- tag_attribute_flush_interval - -| Name | tag_attribute_flush_interval | -| ----------- | ------------------------------------------------------------ | -| Description | interval num for tag and attribute records when force flushing to disk | -| Type | int32 | -| Default | 1000 | -| Effective | Modify before the first startup. | - -- tag_attribute_total_size - -| Name | tag_attribute_total_size | -| ----------- | ------------------------------------------------------------ | -| Description | max size for a storage block for tags and attributes of a one-time series | -| Type | int32 | -| Default | 700 | -| Effective | Modify before the first startup. | - -- max_measurement_num_of_internal_request - -| Name | max_measurement_num_of_internal_request | -| ----------- | ------------------------------------------------------------ | -| Description | max measurement num of internal requestWhen creating timeseries with Session.createMultiTimeseries, the user input plan, the timeseries num ofwhich exceeds this num, will be split to several plans with timeseries no more than this num. | -| Type | Int32 | -| Default | 10000 | -| Effective | Restart required. | - -- datanode_schema_cache_eviction_policy - -| Name | datanode_schema_cache_eviction_policy | -| ----------- | --------------------------------------- | -| Description | Policy of DataNodeSchemaCache eviction. | -| Type | String | -| Default | FIFO | -| Effective | Restart required. | - -- cluster_timeseries_limit_threshold - -| Name | cluster_timeseries_limit_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | This configuration parameter sets the maximum number of time series allowed in the cluster. | -| Type | Int32 | -| Default | -1 | -| Effective | Restart required. | - -- cluster_device_limit_threshold - -| Name | cluster_device_limit_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | This configuration parameter sets the maximum number of devices allowed in the cluster. | -| Type | Int32 | -| Default | -1 | -| Effective | Restart required. | - -- database_limit_threshold - -| Name | database_limit_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | This configuration parameter sets the maximum number of Cluster Databases allowed. | -| Type | Int32 | -| Default | -1 | -| Effective | Restart required. | - -### 4.16 Configurations for creating schema automatically - -- enable_auto_create_schema - -| Name | enable_auto_create_schema | -| ----------- | ------------------------------------------------ | -| Description | Whether creating schema automatically is enabled | -| Value | true or false | -| Default | true | -| Effective | Restart required. | - -- default_storage_group_level - -| Name | default_storage_group_level | -| ----------- | ------------------------------------------------------------ | -| Description | Database level when creating schema automatically is enabled e.g. root.sg0.d1.s2We will set root.sg0 as the database if database level is 1If the incoming path is shorter than this value, the creation/insertion will fail. | -| Value | int32 | -| Default | 1 | -| Effective | Restart required. | - -- boolean_string_infer_type - -| Name | boolean_string_infer_type | -| ----------- |------------------------------------------------------------------------------------| -| Description | register time series as which type when receiving boolean string "true" or "false" | -| Value | BOOLEAN or TEXT | -| Default | BOOLEAN | -| Effective | Hot_reload | - -- integer_string_infer_type - -| Name | integer_string_infer_type | -| ----------- |------------------------------------------------------------------------------------------------------------------| -| Description | register time series as which type when receiving an integer string and using float or double may lose precision | -| Value | INT32, INT64, FLOAT, DOUBLE, TEXT | -| Default | DOUBLE | -| Effective | Hot_reload | - -- floating_string_infer_type - -| Name | floating_string_infer_type | -| ----------- |----------------------------------------------------------------------------------| -| Description | register time series as which type when receiving a floating number string "6.7" | -| Value | DOUBLE, FLOAT or TEXT | -| Default | DOUBLE | -| Effective | Hot_reload | - -- nan_string_infer_type - -| Name | nan_string_infer_type | -| ----------- |--------------------------------------------------------------------| -| Description | register time series as which type when receiving the Literal NaN. | -| Value | DOUBLE, FLOAT or TEXT | -| Default | DOUBLE | -| Effective | Hot_reload | - -- default_boolean_encoding - -| Name | default_boolean_encoding | -| ----------- |----------------------------------------------------------------| -| Description | BOOLEAN encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE | -| Default | RLE | -| Effective | Hot_reload | - -- default_int32_encoding - -| Name | default_int32_encoding | -| ----------- |--------------------------------------------------------------| -| Description | INT32 encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| Default | TS_2DIFF | -| Effective | Hot_reload | - -- default_int64_encoding - -| Name | default_int64_encoding | -| ----------- |--------------------------------------------------------------| -| Description | INT64 encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| Default | TS_2DIFF | -| Effective | Hot_reload | - -- default_float_encoding - -| Name | default_float_encoding | -| ----------- |--------------------------------------------------------------| -| Description | FLOAT encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, GORILLA | -| Default | GORILLA | -| Effective | Hot_reload | - -- default_double_encoding - -| Name | default_double_encoding | -| ----------- |---------------------------------------------------------------| -| Description | DOUBLE encoding when creating schema automatically is enabled | -| Value | PLAIN, RLE, TS_2DIFF, GORILLA | -| Default | GORILLA | -| Effective | Hot_reload | - -- default_text_encoding - -| Name | default_text_encoding | -| ----------- |-------------------------------------------------------------| -| Description | TEXT encoding when creating schema automatically is enabled | -| Value | PLAIN | -| Default | PLAIN | -| Effective | Hot_reload | - - -* boolean_compressor - -| Name | boolean_compressor | -|------------------|-----------------------------------------------------------------------------------------| -| Description | BOOLEAN compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* int32_compressor - -| Name | int32_compressor | -|----------------------|--------------------------------------------------------------------------------------------| -| Description | INT32/DATE compression when creating schema automatically is enabled(Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* int64_compressor - -| Name | int64_compressor | -|--------------------|-------------------------------------------------------------------------------------------------| -| Description | INT64/TIMESTAMP compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* float_compressor - -| Name | float_compressor | -|-----------------------|---------------------------------------------------------------------------------------| -| Description | FLOAT compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* double_compressor - -| Name | double_compressor | -|-------------------|----------------------------------------------------------------------------------------| -| Description | DOUBLE compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - -* text_compressor - -| Name | text_compressor | -|--------------------|--------------------------------------------------------------------------------------------------| -| Description | TEXT/BINARY/BLOB compression when creating schema automatically is enabled (Supports from V2.0.6) | -| Type | String | -| Default | LZ4 | -| Effective | Hot_reload | - - -### 4.17 Query Configurations - -- read_consistency_level - -| Name | read_consistency_level | -| ----------- | ------------------------------------------------------------ | -| Description | The read consistency levelThese consistency levels are currently supported:strong(Default, read from the leader replica)weak(Read from a random replica) | -| Type | String | -| Default | strong | -| Effective | Restart required. | - -- meta_data_cache_enable - -| Name | meta_data_cache_enable | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to cache meta data (BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not. | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- chunk_timeseriesmeta_free_memory_proportion - -| Name | chunk_timeseriesmeta_free_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50 | -| Type | String | -| Default | 1 : 100 : 200 : 300 : 400 | -| Effective | Restart required. | - -- enable_last_cache - -| Name | enable_last_cache | -| ----------- | ---------------------------- | -| Description | Whether to enable LAST cache | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- mpp_data_exchange_core_pool_size - -| Name | mpp_data_exchange_core_pool_size | -| ----------- | -------------------------------------------- | -| Description | Core size of ThreadPool of MPP data exchange | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- mpp_data_exchange_max_pool_size - -| Name | mpp_data_exchange_max_pool_size | -| ----------- | ------------------------------------------- | -| Description | Max size of ThreadPool of MPP data exchange | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- mpp_data_exchange_keep_alive_time_in_ms - -| Name | mpp_data_exchange_keep_alive_time_in_ms | -| ----------- | --------------------------------------- | -| Description | Max waiting time for MPP data exchange | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- driver_task_execution_time_slice_in_ms - -| Name | driver_task_execution_time_slice_in_ms | -| ----------- | -------------------------------------- | -| Description | The max execution time of a DriverTask | -| Type | int32 | -| Default | 200 | -| Effective | Restart required. | - -- max_tsblock_size_in_bytes - -| Name | max_tsblock_size_in_bytes | -| ----------- | ----------------------------- | -| Description | The max capacity of a TsBlock | -| Type | int32 | -| Default | 131072 | -| Effective | Restart required. | - -- max_tsblock_line_numbers - -| Name | max_tsblock_line_numbers | -| ----------- | ------------------------------------------- | -| Description | The max number of lines in a single TsBlock | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- slow_query_threshold - -| Name | slow_query_threshold | -| ----------- | -------------------------------------- | -| Description | Time cost(ms) threshold for slow query | -| Type | long | -| Default | 10000 | -| Effective | Hot reload | - -- query_cost_stat_window - -| Name | query_cost_stat_window | -|-------------|--------------------| -| Description | Time window threshold(min) for record of history queries. | -| Type | Int32 | -| Default | 0 | -| Effective | Hot reload | - -- query_timeout_threshold - -| Name | query_timeout_threshold | -| ----------- | ----------------------------------------- | -| Description | The max executing time of query. unit: ms | -| Type | Int32 | -| Default | 60000 | -| Effective | Restart required. | - -- max_allowed_concurrent_queries - -| Name | max_allowed_concurrent_queries | -| ----------- | -------------------------------------------------- | -| Description | The maximum allowed concurrently executing queries | -| Type | Int32 | -| Default | 1000 | -| Effective | Restart required. | - -- query_thread_count - -| Name | query_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | How many threads can concurrently execute query statement. When <= 0, use CPU core number. | -| Type | Int32 | -| Default | 0 | -| Effective | Restart required. | - -- degree_of_query_parallelism - -| Name | degree_of_query_parallelism | -| ----------- | ------------------------------------------------------------ | -| Description | How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2. | -| Type | Int32 | -| Default | 0 | -| Effective | Restart required. | - -- mode_map_size_threshold - -| Name | mode_map_size_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | The threshold of count map size when calculating the MODE aggregation function | -| Type | Int32 | -| Default | 10000 | -| Effective | Restart required. | - -- batch_size - -| Name | batch_size | -| ----------- | ------------------------------------------------------------ | -| Description | The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) | -| Type | Int32 | -| Default | 100000 | -| Effective | Restart required. | - -- sort_buffer_size_in_bytes - -| Name | sort_buffer_size_in_bytes | -| ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Description | The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort. | -| Type | long | -| Default | 1048576(Before V2.0.6)
0(Supports from V2.0.6), if `sort_buffer_size_in_bytes <= 0`, default value will be used, `default value = min(32MB, memory for query operators / query_thread_count / 2)`, if `sort_buffer_size_in_bytes > 0`, the specified value will be used. | -| Effective | Hot_reload | - -- merge_threshold_of_explain_analyze - -| Name | merge_threshold_of_explain_analyze | -| ----------- | ------------------------------------------------------------ | -| Description | The threshold of operator count in the result set of EXPLAIN ANALYZE, if the number of operator in the result set is larger than this threshold, operator will be merged. | -| Type | int | -| Default | 10 | -| Effective | Hot reload | - -### 4.18 TTL Configuration - -- ttl_check_interval - -| Name | ttl_check_interval | -| ----------- | ------------------------------------------------------------ | -| Description | The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours. | -| Type | int | -| Default | 7200000 | -| Effective | Restart required. | - -- max_expired_time - -| Name | max_expired_time | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum expiring time of device which has a ttl. Default is 1 month.If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction. | -| Type | int | -| Default | 2592000000 | -| Effective | Restart required. | - -- expired_data_ratio - -| Name | expired_data_ratio | -| ----------- | ------------------------------------------------------------ | -| Description | The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction. | -| Type | float | -| Default | 0.3 | -| Effective | Restart required. | - -### 4.19 Storage Engine Configuration - -- timestamp_precision - -| Name | timestamp_precision | -| ----------- | ------------------------------------------------------------ | -| Description | Use this value to set timestamp precision as "ms", "us" or "ns". | -| Type | String | -| Default | ms | -| Effective | Modify before the first startup. | - -- timestamp_precision_check_enabled - -| Name | timestamp_precision_check_enabled | -| ----------- | ------------------------------------------------------------ | -| Description | When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted. | -| Type | Boolean | -| Default | true | -| Effective | Modify before the first startup. | - -- max_waiting_time_when_insert_blocked - -| Name | max_waiting_time_when_insert_blocked | -| ----------- | ------------------------------------------------------------ | -| Description | When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. | -| Type | Int32 | -| Default | 10000 | -| Effective | Restart required. | - -- handle_system_error - -| Name | handle_system_error | -| ----------- | -------------------------------------------------------- | -| Description | What will the system do when unrecoverable error occurs. | -| Type | String | -| Default | CHANGE_TO_READ_ONLY | -| Effective | Restart required. | - -- enable_timed_flush_seq_memtable - -| Name | enable_timed_flush_seq_memtable | -| ----------- | --------------------------------------------------- | -| Description | Whether to timed flush sequence tsfiles' memtables. | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- seq_memtable_flush_interval_in_ms - -| Name | seq_memtable_flush_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | -| Type | long | -| Default | 600000 | -| Effective | Hot reload | - -- seq_memtable_flush_check_interval_in_ms - -| Name | seq_memtable_flush_check_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The interval to check whether sequence memtables need flushing. | -| Type | long | -| Default | 30000 | -| Effective | Hot reload | - -- enable_timed_flush_unseq_memtable - -| Name | enable_timed_flush_unseq_memtable | -| ----------- | ----------------------------------------------------- | -| Description | Whether to timed flush unsequence tsfiles' memtables. | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- unseq_memtable_flush_interval_in_ms - -| Name | unseq_memtable_flush_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | -| Type | long | -| Default | 600000 | -| Effective | Hot reload | - -- unseq_memtable_flush_check_interval_in_ms - -| Name | unseq_memtable_flush_check_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The interval to check whether unsequence memtables need flushing. | -| Type | long | -| Default | 30000 | -| Effective | Hot reload | - -- tvlist_sort_algorithm - -| Name | tvlist_sort_algorithm | -| ----------- | ------------------------------------------------- | -| Description | The sort algorithms used in the memtable's TVList | -| Type | String | -| Default | TIM | -| Effective | Restart required. | - -- avg_series_point_number_threshold - -| Name | avg_series_point_number_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. | -| Type | int32 | -| Default | 100000 | -| Effective | Restart required. | - -- flush_thread_count - -| Name | flush_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | How many threads can concurrently flush. When <= 0, use CPU core number. | -| Type | int32 | -| Default | 0 | -| Effective | Restart required. | - -- enable_partial_insert - -| Name | enable_partial_insert | -| ----------- | ------------------------------------------------------------ | -| Description | In one insert (one device, one timestamp, multiple measurements), if enable partial insert, one measurement failure will not impact other measurements | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -- recovery_log_interval_in_ms - -| Name | recovery_log_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | the interval to log recover progress of each vsg when starting iotdb | -| Type | Int32 | -| Default | 5000 | -| Effective | Restart required. | - -- 0.13_data_insert_adapt - -| Name | 0.13_data_insert_adapt | -| ----------- | ------------------------------------------------------------ | -| Description | If using a v0.13 client to insert data, please set this configuration to true. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- enable_tsfile_validation - -| Name | enable_tsfile_validation | -| ----------- | ------------------------------------------------------------ | -| Description | Verify that TSfiles generated by Flush, Load, and Compaction are correct. | -| Type | boolean | -| Default | false | -| Effective | Hot reload | - -- tier_ttl_in_ms - -| Name | tier_ttl_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | Default tier TTL. When the survival time of the data exceeds the threshold, it will be migrated to the next tier. | -| Type | long | -| Default | -1 | -| Effective | Restart required. | - -- max_object_file_size_in_byte - -| Name | max_object_file_size_in_byte | -|-------------|--------------------------------------------------------------------------| -| Description | Maximum size limit for a single object file (supported since V2.0.8-beta). | -| Type | long | -| Default | 4294967296 (4 GB in bytes) | -| Effective | Hot reload | - -- restrict_object_limit - -| Name | restrict_object_limit | -|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Description | No special restrictions on table names, column names, or device identifiers for `OBJECT` type (supported since V2.0.8-beta). When set to `true` and the table contains `OBJECT` columns, the following restrictions apply:
1. Naming Rules: Values in TAG columns, table names, and field names must not use `.` or `..`; Prohibited characters include `./` or `.\`, otherwise metadata creation will fail; Names containing filesystem-unsupported characters will cause write errors.
2. Case Sensitivity: If the underlying filesystem is case-insensitive, device identifiers like `'d1'` and `'D1'` are treated as identical; Creating similar identifiers may overwrite `OBJECT` data files, leading to data corruption.
3. Storage Path: Actual storage path format: `${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin` | -| Type | boolean | -| Default | false | -| Effective | Can only be modified before the first service startup. | - -### 4.20 Compaction Configurations - -- enable_seq_space_compaction - -| Name | enable_seq_space_compaction | -| ----------- | ---------------------------------------------------------- | -| Description | sequence space compaction: only compact the sequence files | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- enable_unseq_space_compaction - -| Name | enable_unseq_space_compaction | -| ----------- | ------------------------------------------------------------ | -| Description | unsequence space compaction: only compact the unsequence files | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- enable_cross_space_compaction - -| Name | enable_cross_space_compaction | -| ----------- | ------------------------------------------------------------ | -| Description | cross space compaction: compact the unsequence files into the overlapped sequence files | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- enable_auto_repair_compaction - -| Name | enable_auto_repair_compaction | -| ----------- | ---------------------------------------------- | -| Description | enable auto repair unsorted file by compaction | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- cross_selector - -| Name | cross_selector | -| ----------- | ------------------------------------------- | -| Description | the selector of cross space compaction task | -| Type | String | -| Default | rewrite | -| Effective | Restart required. | - -- cross_performer - -| Name | cross_performer | -| ----------- |-----------------------------------------------------------| -| Description | the compaction performer of cross space compaction task, Options: read_point, fast | -| Type | String | -| Default | fast | -| Effective | Hot reload . | - -- inner_seq_selector - -| Name | inner_seq_selector | -| ----------- |--------------------------------------------------------| -| Description | the selector of inner sequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | -| Type | String | -| Default | size_tiered_multi_target | -| Effective | Hot reload | - -- inner_seq_performer - -| Name | inner_seq_performer | -| ----------- |---------------------------------------------------------| -| Description | the performer of inner sequence space compaction task, Options: read_chunk, fast | -| Type | String | -| Default | read_chunk | -| Effective | Hot reload | - -- inner_unseq_selector - -| Name | inner_unseq_selector | -| ----------- |----------------------------------------------------------| -| Description | the selector of inner unsequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | -| Type | String | -| Default | size_tiered_multi_target | -| Effective | Hot reload | - -- inner_unseq_performer - -| Name | inner_unseq_performer | -| ----------- |-----------------------------------------------------------| -| Description | the performer of inner unsequence space compaction task, Options: read_point, fast | -| Type | String | -| Default | fast | -| Effective | Hot reload | - -- compaction_priority - -| Name | compaction_priority | -| ----------- | ------------------------------------------------------------ | -| Description | The priority of compaction executionINNER_CROSS: prioritize inner space compaction, reduce the number of files firstCROSS_INNER: prioritize cross space compaction, eliminate the unsequence files firstBALANCE: alternate two compaction types | -| Type | String | -| Default | INNER_CROSS | -| Effective | Restart required. | - -- candidate_compaction_task_queue_size - -| Name | candidate_compaction_task_queue_size | -| ----------- | -------------------------------------------- | -| Description | The size of candidate compaction task queue. | -| Type | int32 | -| Default | 50 | -| Effective | Restart required. | - -- target_compaction_file_size - -| Name | target_compaction_file_size | -| ----------- | ------------------------------------------------------------ | -| Description | This parameter is used in two places:The target tsfile size of inner space compaction.The candidate size of seq tsfile in cross space compaction will be smaller than target_compaction_file_size * 1.5.In most cases, the target file size of cross compaction won't exceed this threshold, and if it does, it will not be much larger than it. | -| Type | Int64 | -| Default | 2147483648 | -| Effective | Hot reload | - -- inner_compaction_total_file_size_threshold - -| Name | inner_compaction_total_file_size_threshold | -| ----------- | ---------------------------------------------------- | -| Description | The total file size limit in inner space compaction. | -| Type | int64 | -| Default | 10737418240 | -| Effective | Hot reload | - -- inner_compaction_total_file_num_threshold - -| Name | inner_compaction_total_file_num_threshold | -| ----------- | --------------------------------------------------- | -| Description | The total file num limit in inner space compaction. | -| Type | int32 | -| Default | 100 | -| Effective | Hot reload | - -- max_level_gap_in_inner_compaction - -| Name | max_level_gap_in_inner_compaction | -| ----------- | ----------------------------------------------- | -| Description | The max level gap in inner compaction selection | -| Type | int32 | -| Default | 2 | -| Effective | Hot reload | - -- target_chunk_size - -| Name | target_chunk_size | -| ----------- | ------------------------------------------------------------ | -| Description | The target chunk size in flushing and compaction. If the size of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks.| -| Type | Int64 | -| Default | 1600000 | -| Effective | Restart required. | - -- target_chunk_point_num - -| Name | target_chunk_point_num | -| ----------- |-----------------------------------------------------------------| -| Description | The target point nums in one chunk in flushing and compaction. If the point number of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks. | -| Type | Int64 | -| Default | 100000 | -| Effective | Restart required. | - -- chunk_size_lower_bound_in_compaction - -| Name | chunk_size_lower_bound_in_compaction | -| ----------- | ------------------------------------------------------------ | -| Description | If the chunk size is lower than this threshold, it will be deserialized into points | -| Type | Int64 | -| Default | 128 | -| Effective | Restart required. | - -- chunk_point_num_lower_bound_in_compaction - -| Name | chunk_point_num_lower_bound_in_compaction | -| ----------- |------------------------------------------------------------------------------------------| -| Description | If the chunk point num is lower than this threshold, it will be deserialized into points | -| Type | Int64 | -| Default | 100 | -| Effective | Restart required. | - -- inner_compaction_candidate_file_num - -| Name | inner_compaction_candidate_file_num | -| ----------- | ------------------------------------------------------------ | -| Description | The file num requirement when selecting inner space compaction candidate files | -| Type | int32 | -| Default | 30 | -| Effective | Hot reload | - -- max_cross_compaction_candidate_file_num - -| Name | max_cross_compaction_candidate_file_num | -| ----------- | ------------------------------------------------------------ | -| Description | The max file when selecting cross space compaction candidate files | -| Type | int32 | -| Default | 500 | -| Effective | Hot reload | - -- max_cross_compaction_candidate_file_size - -| Name | max_cross_compaction_candidate_file_size | -| ----------- | ------------------------------------------------------------ | -| Description | The max total size when selecting cross space compaction candidate files | -| Type | Int64 | -| Default | 5368709120 | -| Effective | Hot reload | - -- min_cross_compaction_unseq_file_level - -| Name | min_cross_compaction_unseq_file_level | -| ----------- | ------------------------------------------------------------ | -| Description | The min inner compaction level of unsequence file which can be selected as candidate | -| Type | int32 | -| Default | 1 | -| Effective | Hot reload | - -- compaction_thread_count - -| Name | compaction_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | How many threads will be set up to perform compaction, 10 by default. | -| Type | int32 | -| Default | 10 | -| Effective | Hot reload | - -- compaction_max_aligned_series_num_in_one_batch - -| Name | compaction_max_aligned_series_num_in_one_batch | -| ----------- | ------------------------------------------------------------ | -| Description | How many chunk will be compacted in aligned series compaction, 10 by default. | -| Type | int32 | -| Default | 10 | -| Effective | Hot reload | - -- compaction_schedule_interval_in_ms - -| Name | compaction_schedule_interval_in_ms | -| ----------- | ---------------------------------------- | -| Description | The interval of compaction task schedule | -| Type | Int64 | -| Default | 60000 | -| Effective | Restart required. | - -- compaction_write_throughput_mb_per_sec - -| Name | compaction_write_throughput_mb_per_sec | -| ----------- | -------------------------------------------------------- | -| Description | The limit of write throughput merge can reach per second | -| Type | int32 | -| Default | 16 | -| Effective | Restart required. | - -- compaction_read_throughput_mb_per_sec - -| Name | compaction_read_throughput_mb_per_sec | -| ----------- | ------------------------------------------------------- | -| Description | The limit of read throughput merge can reach per second | -| Type | int32 | -| Default | 0 | -| Effective | Hot reload | - -- compaction_read_operation_per_sec - -| Name | compaction_read_operation_per_sec | -| ----------- | ------------------------------------------------------ | -| Description | The limit of read operation merge can reach per second | -| Type | int32 | -| Default | 0 | -| Effective | Hot reload | - -- sub_compaction_thread_count - -| Name | sub_compaction_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | The number of sub compaction threads to be set up to perform compaction. | -| Type | int32 | -| Default | 4 | -| Effective | Hot reload | - -- inner_compaction_task_selection_disk_redundancy - -| Name | inner_compaction_task_selection_disk_redundancy | -| ----------- | ------------------------------------------------------------ | -| Description | Redundancy value of disk availability, only use for inner compaction. | -| Type | double | -| Default | 0.05 | -| Effective | Hot reload | - -- inner_compaction_task_selection_mods_file_threshold - -| Name | inner_compaction_task_selection_mods_file_threshold | -| ----------- | -------------------------------------------------------- | -| Description | Mods file size threshold, only use for inner compaction. | -| Type | long | -| Default | 131072 | -| Effective | Hot reload | - -- compaction_schedule_thread_num - -| Name | compaction_schedule_thread_num | -| ----------- | ------------------------------------------------------------ | -| Description | The number of threads to be set up to select compaction task. | -| Type | int32 | -| Default | 4 | -| Effective | Hot reload | - -### 4.21 Write Ahead Log Configuration - -- wal_mode - -| Name | wal_mode | -| ----------- | ------------------------------------------------------------ | -| Description | The details of these three modes are as follows:DISABLE: the system will disable wal.SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully. | -| Type | String | -| Default | ASYNC | -| Effective | Restart required. | - -- max_wal_nodes_num - -| Name | max_wal_nodes_num | -| ----------- | ------------------------------------------------------------ | -| Description | each node corresponds to one wal directory The default value 0 means the number is determined by the system, the number is in the range of [data region num / 2, data region num]. | -| Type | int32 | -| Default | 0 | -| Effective | Restart required. | - -- wal_async_mode_fsync_delay_in_ms - -| Name | wal_async_mode_fsync_delay_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | Duration a wal flush operation will wait before calling fsync in the async mode | -| Type | int32 | -| Default | 1000 | -| Effective | Hot reload | - -- wal_sync_mode_fsync_delay_in_ms - -| Name | wal_sync_mode_fsync_delay_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | Duration a wal flush operation will wait before calling fsync in the sync mode | -| Type | int32 | -| Default | 3 | -| Effective | Hot reload | - -- wal_buffer_size_in_byte - -| Name | wal_buffer_size_in_byte | -| ----------- | ---------------------------- | -| Description | Buffer size of each wal node | -| Type | int32 | -| Default | 33554432 | -| Effective | Restart required. | - -- wal_buffer_queue_capacity - -| Name | wal_buffer_queue_capacity | -| ----------- | --------------------------------- | -| Description | Buffer capacity of each wal queue | -| Type | int32 | -| Default | 500 | -| Effective | Restart required. | - -- wal_file_size_threshold_in_byte - -| Name | wal_file_size_threshold_in_byte | -| ----------- | ------------------------------- | -| Description | Size threshold of each wal file | -| Type | int32 | -| Default | 31457280 | -| Effective | Hot reload | - -- wal_min_effective_info_ratio - -| Name | wal_min_effective_info_ratio | -| ----------- | --------------------------------------------------- | -| Description | Minimum ratio of effective information in wal files | -| Type | double | -| Default | 0.1 | -| Effective | Hot reload | - -- wal_memtable_snapshot_threshold_in_byte - -| Name | wal_memtable_snapshot_threshold_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | MemTable size threshold for triggering MemTable snapshot in wal | -| Type | int64 | -| Default | 8388608 | -| Effective | Hot reload | - -- max_wal_memtable_snapshot_num - -| Name | max_wal_memtable_snapshot_num | -| ----------- | ------------------------------------- | -| Description | MemTable's max snapshot number in wal | -| Type | int32 | -| Default | 1 | -| Effective | Hot reload | - -- delete_wal_files_period_in_ms - -| Name | delete_wal_files_period_in_ms | -| ----------- | ----------------------------------------------------------- | -| Description | The period when outdated wal files are periodically deleted | -| Type | int64 | -| Default | 20000 | -| Effective | Hot reload | - -- wal_throttle_threshold_in_byte - -| Name | wal_throttle_threshold_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The minimum size of wal files when throttle down in IoTConsensus | -| Type | long | -| Default | 53687091200 | -| Effective | Hot reload | - -- iot_consensus_cache_window_time_in_ms - -| Name | iot_consensus_cache_window_time_in_ms | -| ----------- | ------------------------------------------------ | -| Description | Maximum wait time of write cache in IoTConsensus | -| Type | long | -| Default | -1 | -| Effective | Hot reload | - -- enable_wal_compression - -| Name | iot_consensus_cache_window_time_in_ms | -| ----------- | ------------------------------------- | -| Description | Enable Write Ahead Log compression. | -| Type | boolean | -| Default | true | -| Effective | Hot reload | - -### 4.22 **IoTConsensus Configuration** - -- data_region_iot_max_log_entries_num_per_batch - -| Name | data_region_iot_max_log_entries_num_per_batch | -| ----------- | ------------------------------------------------- | -| Description | The maximum log entries num in IoTConsensus Batch | -| Type | int32 | -| Default | 1024 | -| Effective | Restart required. | - -- data_region_iot_max_size_per_batch - -| Name | data_region_iot_max_size_per_batch | -| ----------- | -------------------------------------- | -| Description | The maximum size in IoTConsensus Batch | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- data_region_iot_max_pending_batches_num - -| Name | data_region_iot_max_pending_batches_num | -| ----------- | ----------------------------------------------- | -| Description | The maximum pending batches num in IoTConsensus | -| Type | int32 | -| Default | 5 | -| Effective | Restart required. | - -- data_region_iot_max_memory_ratio_for_queue - -| Name | data_region_iot_max_memory_ratio_for_queue | -| ----------- | -------------------------------------------------- | -| Description | The maximum memory ratio for queue in IoTConsensus | -| Type | double | -| Default | 0.6 | -| Effective | Restart required. | - -- region_migration_speed_limit_bytes_per_second - -| Name | region_migration_speed_limit_bytes_per_second | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum transit size in byte per second for region migration | -| Type | long | -| Default | 33554432 | -| Effective | Restart required. | - -### 4.23 TsFile Configurations - -- group_size_in_byte - -| Name | group_size_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of bytes written to disk each time the data in memory is written to disk | -| Type | int32 | -| Default | 134217728 | -| Effective | Hot reload | - -- page_size_in_byte - -| Name | page_size_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The memory size for each series writer to pack page, default value is 64KB | -| Type | int32 | -| Default | 65536 | -| Effective | Hot reload | - -- max_number_of_points_in_page - -| Name | max_number_of_points_in_page | -| ----------- | ------------------------------------------- | -| Description | The maximum number of data points in a page | -| Type | int32 | -| Default | 10000 | -| Effective | Hot reload | - -- pattern_matching_threshold - -| Name | pattern_matching_threshold | -| ----------- | ------------------------------------------- | -| Description | The threshold for pattern matching in regex | -| Type | int32 | -| Default | 1000000 | -| Effective | Hot reload | - -- float_precision - -| Name | float_precision | -| ----------- | ------------------------------------------------------------ | -| Description | Floating-point precision of query results.Only effective for RLE and TS_2DIFF encodings.Due to the limitation of machine precision, some values may not be interpreted strictly. | -| Type | int32 | -| Default | 2 | -| Effective | Hot reload | - -- value_encoder - -| Name | value_encoder | -| ----------- | ------------------------------------------------------------ | -| Description | Encoder of value series. default value is PLAIN. | -| Type | For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG. | -| Default | PLAIN | -| Effective | Hot reload | - -- compressor - -| Name | compressor | -| ----------- | ------------------------------------------------------------ | -| Description | Compression configuration And it is also used as the default compressor of time column in aligned timeseries. | -| Type | Data compression method, supports UNCOMPRESSED, SNAPPY, ZSTD, LZMA2 or LZ4. Default value is LZ4 | -| Default | LZ4 | -| Effective | Hot reload | - -- encrypt_flag - -| Name | encrypt_flag | -| ----------- | ---------------------- | -| Description | Enable data encryption | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- encrypt_type - -| Name | encrypt_type | -| ----------- |---------------------------------------| -| Description | The method of data encrytion | -| Type | String | -| Default | org.apache.tsfile.encrypt.UNENCRYPTED | -| Effective | Restart required. | - -- encrypt_key_path - -| Name | encrypt_key_path | -| ----------- | ----------------------------------- | -| Description | The path of key for data encryption | -| Type | String | -| Default | None | -| Effective | Restart required. | - -### 4.24 Authorization Configuration - -- authorizer_provider_class - -| Name | authorizer_provider_class | -| ----------- | ------------------------------------------------------------ | -| Description | which class to serve for authorization. | -| Type | String | -| Default | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | -| Effective | Restart required. | -| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | - -- openID_url - -| Name | openID_url | -| ----------- | ------------------------------------------------------------ | -| Description | The url of openID server If OpenIdAuthorizer is enabled, then openID_url must be set. | -| Type | String(a http link) | -| Default | None | -| Effective | Restart required. | - -- iotdb_server_encrypt_decrypt_provider - -| Name | iotdb_server_encrypt_decrypt_provider | -| ----------- | ------------------------------------------------------------ | -| Description | encryption provider class | -| Type | String | -| Default | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | -| Effective | Modify before the first startup. | - -- iotdb_server_encrypt_decrypt_provider_parameter - -| Name | iotdb_server_encrypt_decrypt_provider_parameter | -| ----------- | ----------------------------------------------- | -| Description | encryption provided class parameter | -| Type | String | -| Default | None | -| Effective | Modify before the first startup. | - -- author_cache_size - -| Name | author_cache_size | -| ----------- | --------------------------- | -| Description | Cache size of user and role | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- author_cache_expire_time - -| Name | author_cache_expire_time | -| ----------- | ---------------------------------- | -| Description | Cache expire time of user and role | -| Type | int32 | -| Default | 30 | -| Effective | Restart required. | - -### 4.25 UDF Configuration - -- udf_initial_byte_array_length_for_memory_control - -| Name | udf_initial_byte_array_length_for_memory_control | -| ----------- | ------------------------------------------------------------ | -| Description | Used to estimate the memory usage of text fields in a UDF query.It is recommended to set this value to be slightly larger than the average length of all text records. | -| Type | int32 | -| Default | 48 | -| Effective | Restart required. | - -- udf_memory_budget_in_mb - -| Name | udf_memory_budget_in_mb | -| ----------- | ------------------------------------------------------------ | -| Description | How much memory may be used in ONE UDF query (in MB). The upper limit is 20% of allocated memory for read. | -| Type | Float | -| Default | 30.0 | -| Effective | Restart required. | - -- udf_reader_transformer_collector_memory_proportion - -| Name | udf_reader_transformer_collector_memory_proportion | -| ----------- | ------------------------------------------------------------ | -| Description | UDF memory allocation ratio.The parameter form is a:b:c, where a, b, and c are integers. | -| Type | String | -| Default | 1:1:1 | -| Effective | Restart required. | - -- udf_lib_dir - -| Name | udf_lib_dir | -| ----------- | ---------------------------- | -| Description | the udf lib directory | -| Type | String | -| Default | ext/udf(Windows:ext\\udf) | -| Effective | Restart required. | - -### 4.26 Trigger Configuration - -- trigger_lib_dir - -| Name | trigger_lib_dir | -| ----------- | ------------------------- | -| Description | the trigger lib directory | -| Type | String | -| Default | ext/trigger | -| Effective | Restart required. | - -- stateful_trigger_retry_num_when_not_found - -| Name | stateful_trigger_retry_num_when_not_found | -| ----------- | ------------------------------------------------------------ | -| Description | How many times will we retry to found an instance of stateful trigger on DataNodes | -| Type | Int32 | -| Default | 3 | -| Effective | Restart required. | - -### 4.27 **Select-Into Configuration** - -- into_operation_buffer_size_in_byte - -| Name | into_operation_buffer_size_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum memory occupied by the data to be written when executing select-into statements. | -| Type | long | -| Default | 104857600 | -| Effective | Hot reload | - -- select_into_insert_tablet_plan_row_limit - -| Name | select_into_insert_tablet_plan_row_limit | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements. | -| Type | int32 | -| Default | 10000 | -| Effective | Hot reload | - -- into_operation_execution_thread_count - -| Name | into_operation_execution_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | The number of threads in the thread pool that execute insert-tablet tasks | -| Type | int32 | -| Default | 2 | -| Effective | Restart required. | - -### 4.28 Continuous Query Configuration - -- continuous_query_submit_thread_count - -| Name | continuous_query_execution_thread | -| ----------- | ------------------------------------------------------------ | -| Description | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | -| Type | int32 | -| Default | 2 | -| Effective | Restart required. | - -- continuous_query_min_every_interval_in_ms - -| Name | continuous_query_min_every_interval_in_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The minimum value of the continuous query execution time interval | -| Type | long (duration) | -| Default | 1000 | -| Effective | Restart required. | - -### 4.29 Pipe Configuration - -- pipe_lib_dir - -| Name | pipe_lib_dir | -| ----------- | ----------------------- | -| Description | the pipe lib directory. | -| Type | string | -| Default | ext/pipe | -| Effective | Not support modify | - -- pipe_subtask_executor_max_thread_num - -| Name | pipe_subtask_executor_max_thread_num | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- pipe_sink_timeout_ms - -| Name | pipe_sink_timeout_ms | -| ----------- | ------------------------------------------------------------ | -| Description | The connection timeout (in milliseconds) for the thrift client. | -| Type | int | -| Default | 900000 | -| Effective | Restart required. | - -- pipe_sink_selector_number - -| Name | pipe_sink_selector_number | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of selectors that can be used in the sink.Recommend to set this value to less than or equal to pipe_sink_max_client_number. | -| Type | int | -| Default | 4 | -| Effective | Restart required. | - -- pipe_sink_max_client_number - -| Name | pipe_sink_max_client_number | -| ----------- | ----------------------------------------------------------- | -| Description | The maximum number of clients that can be used in the sink. | -| Type | int | -| Default | 16 | -| Effective | Restart required. | - -- pipe_air_gap_receiver_enabled - -| Name | pipe_air_gap_receiver_enabled | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable receiving pipe data through air gap.The receiver can only return 0 or 1 in TCP mode to indicate whether the data is received successfully. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- pipe_air_gap_receiver_port - -| Name | pipe_air_gap_receiver_port | -| ----------- | ------------------------------------------------------------ | -| Description | The port for the server to receive pipe data through air gap. | -| Type | int | -| Default | 9780 | -| Effective | Restart required. | - -- pipe_all_sinks_rate_limit_bytes_per_second - -| Name | pipe_all_sinks_rate_limit_bytes_per_second | -| ----------- | ------------------------------------------------------------ | -| Description | The total bytes that all pipe sinks can transfer per second.When given a value less than or equal to 0, it means no limit. default value is -1, which means no limit. | -| Type | double | -| Default | -1 | -| Effective | Hot reload | - -### 4.30 RatisConsensus Configuration - -- config_node_ratis_log_appender_buffer_size_max - -| Name | config_node_ratis_log_appender_buffer_size_max | -| ----------- | ------------------------------------------------------------ | -| Description | max payload size for a single log-sync-RPC from leader to follower of ConfigNode (in byte, by default 16MB) | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- schema_region_ratis_log_appender_buffer_size_max - -| Name | schema_region_ratis_log_appender_buffer_size_max | -| ----------- | ------------------------------------------------------------ | -| Description | max payload size for a single log-sync-RPC from leader to follower of SchemaRegion (in byte, by default 16MB) | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- data_region_ratis_log_appender_buffer_size_max - -| Name | data_region_ratis_log_appender_buffer_size_max | -| ----------- | ------------------------------------------------------------ | -| Description | max payload size for a single log-sync-RPC from leader to follower of DataRegion (in byte, by default 16MB) | -| Type | int32 | -| Default | 16777216 | -| Effective | Restart required. | - -- config_node_ratis_snapshot_trigger_threshold - -| Name | config_node_ratis_snapshot_trigger_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of Confignode | -| Type | int32 | -| Default | 400,000 | -| Effective | Restart required. | - -- schema_region_ratis_snapshot_trigger_threshold - -| Name | schema_region_ratis_snapshot_trigger_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of SchemaRegion | -| Type | int32 | -| Default | 400,000 | -| Effective | Restart required. | - -- data_region_ratis_snapshot_trigger_threshold - -| Name | data_region_ratis_snapshot_trigger_threshold | -| ----------- | ------------------------------------------------------------ | -| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of DataRegion | -| Type | int32 | -| Default | 400,000 | -| Effective | Restart required. | - -- config_node_ratis_log_unsafe_flush_enable - -| Name | config_node_ratis_log_unsafe_flush_enable | -| ----------- | ------------------------------------------------------ | -| Description | Is confignode allowed flushing Raft Log asynchronously | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- schema_region_ratis_log_unsafe_flush_enable - -| Name | schema_region_ratis_log_unsafe_flush_enable | -| ----------- | -------------------------------------------------------- | -| Description | Is schemaregion allowed flushing Raft Log asynchronously | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- data_region_ratis_log_unsafe_flush_enable - -| Name | data_region_ratis_log_unsafe_flush_enable | -| ----------- | ------------------------------------------------------ | -| Description | Is dataregion allowed flushing Raft Log asynchronously | -| Type | boolean | -| Default | false | -| Effective | Restart required. | - -- config_node_ratis_log_segment_size_max_in_byte - -| Name | config_node_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a RaftLog segment file of confignode (in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- schema_region_ratis_log_segment_size_max_in_byte - -| Name | schema_region_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a RaftLog segment file of schemaregion (in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- data_region_ratis_log_segment_size_max_in_byte - -| Name | data_region_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a RaftLog segment file of dataregion(in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- config_node_simple_consensus_log_segment_size_max_in_byte - -| Name | data_region_ratis_log_segment_size_max_in_byte | -| ----------- | ------------------------------------------------------------ | -| Description | max capacity of a simple log segment file of confignode(in byte, by default 24MB) | -| Type | int32 | -| Default | 25165824 | -| Effective | Restart required. | - -- config_node_ratis_grpc_flow_control_window - -| Name | config_node_ratis_grpc_flow_control_window | -| ----------- | ---------------------------------------------------------- | -| Description | confignode flow control window for ratis grpc log appender | -| Type | int32 | -| Default | 4194304 | -| Effective | Restart required. | - -- schema_region_ratis_grpc_flow_control_window - -| Name | schema_region_ratis_grpc_flow_control_window | -| ----------- | ------------------------------------------------------------ | -| Description | schema region flow control window for ratis grpc log appender | -| Type | int32 | -| Default | 4194304 | -| Effective | Restart required. | - -- data_region_ratis_grpc_flow_control_window - -| Name | data_region_ratis_grpc_flow_control_window | -| ----------- | ----------------------------------------------------------- | -| Description | data region flow control window for ratis grpc log appender | -| Type | int32 | -| Default | 4194304 | -| Effective | Restart required. | - -- config_node_ratis_grpc_leader_outstanding_appends_max - -| Name | config_node_ratis_grpc_leader_outstanding_appends_max | -| ----------- | ----------------------------------------------------- | -| Description | config node grpc line concurrency threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- schema_region_ratis_grpc_leader_outstanding_appends_max - -| Name | schema_region_ratis_grpc_leader_outstanding_appends_max | -| ----------- | ------------------------------------------------------- | -| Description | schema region grpc line concurrency threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- data_region_ratis_grpc_leader_outstanding_appends_max - -| Name | data_region_ratis_grpc_leader_outstanding_appends_max | -| ----------- | ----------------------------------------------------- | -| Description | data region grpc line concurrency threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- config_node_ratis_log_force_sync_num - -| Name | config_node_ratis_log_force_sync_num | -| ----------- | ------------------------------------ | -| Description | config node fsync threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- schema_region_ratis_log_force_sync_num - -| Name | schema_region_ratis_log_force_sync_num | -| ----------- | -------------------------------------- | -| Description | schema region fsync threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- data_region_ratis_log_force_sync_num - -| Name | data_region_ratis_log_force_sync_num | -| ----------- | ------------------------------------ | -| Description | data region fsync threshold | -| Type | int32 | -| Default | 128 | -| Effective | Restart required. | - -- config_node_ratis_rpc_leader_election_timeout_min_ms - -| Name | config_node_ratis_rpc_leader_election_timeout_min_ms | -| ----------- | ---------------------------------------------------- | -| Description | confignode leader min election timeout | -| Type | int32 | -| Default | 2000ms | -| Effective | Restart required. | - -- schema_region_ratis_rpc_leader_election_timeout_min_ms - -| Name | schema_region_ratis_rpc_leader_election_timeout_min_ms | -| ----------- | ------------------------------------------------------ | -| Description | schema region leader min election timeout | -| Type | int32 | -| Default | 2000ms | -| Effective | Restart required. | - -- data_region_ratis_rpc_leader_election_timeout_min_ms - -| Name | data_region_ratis_rpc_leader_election_timeout_min_ms | -| ----------- | ---------------------------------------------------- | -| Description | data region leader min election timeout | -| Type | int32 | -| Default | 2000ms | -| Effective | Restart required. | - -- config_node_ratis_rpc_leader_election_timeout_max_ms - -| Name | config_node_ratis_rpc_leader_election_timeout_max_ms | -| ----------- | ---------------------------------------------------- | -| Description | confignode leader max election timeout | -| Type | int32 | -| Default | 4000ms | -| Effective | Restart required. | - -- schema_region_ratis_rpc_leader_election_timeout_max_ms - -| Name | schema_region_ratis_rpc_leader_election_timeout_max_ms | -| ----------- | ------------------------------------------------------ | -| Description | schema region leader max election timeout | -| Type | int32 | -| Default | 4000ms | -| Effective | Restart required. | - -- data_region_ratis_rpc_leader_election_timeout_max_ms - -| Name | data_region_ratis_rpc_leader_election_timeout_max_ms | -| ----------- | ---------------------------------------------------- | -| Description | data region leader max election timeout | -| Type | int32 | -| Default | 4000ms | -| Effective | Restart required. | - -- config_node_ratis_request_timeout_ms - -| Name | config_node_ratis_request_timeout_ms | -| ----------- | --------------------------------------- | -| Description | confignode ratis client retry threshold | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- schema_region_ratis_request_timeout_ms - -| Name | schema_region_ratis_request_timeout_ms | -| ----------- | ------------------------------------------ | -| Description | schema region ratis client retry threshold | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- data_region_ratis_request_timeout_ms - -| Name | data_region_ratis_request_timeout_ms | -| ----------- | ---------------------------------------- | -| Description | data region ratis client retry threshold | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- config_node_ratis_max_retry_attempts - -| Name | config_node_ratis_max_retry_attempts | -| ----------- | ------------------------------------ | -| Description | confignode ratis client retry times | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- config_node_ratis_initial_sleep_time_ms - -| Name | config_node_ratis_initial_sleep_time_ms | -| ----------- | ------------------------------------------ | -| Description | confignode ratis client initial sleep time | -| Type | int32 | -| Default | 100ms | -| Effective | Restart required. | - -- config_node_ratis_max_sleep_time_ms - -| Name | config_node_ratis_max_sleep_time_ms | -| ----------- | -------------------------------------------- | -| Description | confignode ratis client max retry sleep time | -| Type | int32 | -| Default | 10000 | -| Effective | Restart required. | - -- schema_region_ratis_max_retry_attempts - -| Name | schema_region_ratis_max_retry_attempts | -| ----------- | ------------------------------------------ | -| Description | schema region ratis client max retry times | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- schema_region_ratis_initial_sleep_time_ms - -| Name | schema_region_ratis_initial_sleep_time_ms | -| ----------- | ------------------------------------------ | -| Description | schema region ratis client init sleep time | -| Type | int32 | -| Default | 100ms | -| Effective | Restart required. | - -- schema_region_ratis_max_sleep_time_ms - -| Name | schema_region_ratis_max_sleep_time_ms | -| ----------- | ----------------------------------------- | -| Description | schema region ratis client max sleep time | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- data_region_ratis_max_retry_attempts - -| Name | data_region_ratis_max_retry_attempts | -| ----------- | --------------------------------------------- | -| Description | data region ratis client max retry sleep time | -| Type | int32 | -| Default | 10 | -| Effective | Restart required. | - -- data_region_ratis_initial_sleep_time_ms - -| Name | data_region_ratis_initial_sleep_time_ms | -| ----------- | ---------------------------------------- | -| Description | data region ratis client init sleep time | -| Type | int32 | -| Default | 100ms | -| Effective | Restart required. | - -- data_region_ratis_max_sleep_time_ms - -| Name | data_region_ratis_max_sleep_time_ms | -| ----------- | --------------------------------------------- | -| Description | data region ratis client max retry sleep time | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- ratis_first_election_timeout_min_ms - -| Name | ratis_first_election_timeout_min_ms | -| ----------- | ----------------------------------- | -| Description | Ratis first election min timeout | -| Type | int64 | -| Default | 50 (ms) | -| Effective | Restart required. | - -- ratis_first_election_timeout_max_ms - -| Name | ratis_first_election_timeout_max_ms | -| ----------- | ----------------------------------- | -| Description | Ratis first election max timeout | -| Type | int64 | -| Default | 150 (ms) | -| Effective | Restart required. | - -- config_node_ratis_preserve_logs_num_when_purge - -| Name | config_node_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------------------ | -| Description | confignode snapshot preserves certain logs when taking snapshot and purge | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- schema_region_ratis_preserve_logs_num_when_purge - -| Name | schema_region_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------------------ | -| Description | schema region snapshot preserves certain logs when taking snapshot and purge | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- data_region_ratis_preserve_logs_num_when_purge - -| Name | data_region_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------------------ | -| Description | data region snapshot preserves certain logs when taking snapshot and purge | -| Type | int32 | -| Default | 1000 | -| Effective | Restart required. | - -- config_node_ratis_log_max_size - -| Name | config_node_ratis_log_max_size | -| ----------- | -------------------------------------- | -| Description | config node Raft Log disk size control | -| Type | int64 | -| Default | 2147483648 (2GB) | -| Effective | Restart required. | - -- schema_region_ratis_log_max_size - -| Name | schema_region_ratis_log_max_size | -| ----------- | ---------------------------------------- | -| Description | schema region Raft Log disk size control | -| Type | int64 | -| Default | 2147483648 (2GB) | -| Effective | Restart required. | - -- data_region_ratis_log_max_size - -| Name | data_region_ratis_log_max_size | -| ----------- | -------------------------------------- | -| Description | data region Raft Log disk size control | -| Type | int64 | -| Default | 21474836480 (20GB) | -| Effective | Restart required. | - -- config_node_ratis_periodic_snapshot_interval - -| Name | config_node_ratis_periodic_snapshot_interval | -| ----------- | -------------------------------------------- | -| Description | config node Raft periodic snapshot interval | -| Type | int64 | -| Default | 86400 (s) | -| Effective | Restart required. | - -- schema_region_ratis_periodic_snapshot_interval - -| Name | schema_region_ratis_preserve_logs_num_when_purge | -| ----------- | ------------------------------------------------ | -| Description | schema region Raft periodic snapshot interval | -| Type | int64 | -| Default | 86400 (s) | -| Effective | Restart required. | - -- data_region_ratis_periodic_snapshot_interval - -| Name | data_region_ratis_preserve_logs_num_when_purge | -| ----------- | ---------------------------------------------- | -| Description | data region Raft periodic snapshot interval | -| Type | int64 | -| Default | 86400 (s) | -| Effective | Restart required. | - -### 4.31 IoTConsensusV2 Configuration - -- iot_consensus_v2_pipeline_size - -| Name | iot_consensus_v2_pipeline_size | -| ----------- | ------------------------------------------------------------ | -| Description | Default event buffer size for connector and receiver in iot consensus v2 | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -- iot_consensus_v2_mode - -| Name | iot_consensus_v2_pipeline_size | -| ----------- | ------------------------------ | -| Description | IoTConsensusV2 mode. | -| Type | String | -| Default | batch | -| Effective | Restart required. | - -### 4.32 Procedure Configuration - -- procedure_core_worker_thread_count - -| Name | procedure_core_worker_thread_count | -| ----------- | ------------------------------------- | -| Description | Default number of worker thread count | -| Type | int32 | -| Default | 4 | -| Effective | Restart required. | - -- procedure_completed_clean_interval - -| Name | procedure_completed_clean_interval | -| ----------- | ------------------------------------------------------------ | -| Description | Default time interval of completed procedure cleaner work in, time unit is second | -| Type | int32 | -| Default | 30(s) | -| Effective | Restart required. | - -- procedure_completed_evict_ttl - -| Name | procedure_completed_evict_ttl | -| ----------- | ------------------------------------------------------- | -| Description | Default ttl of completed procedure, time unit is second | -| Type | int32 | -| Default | 60(s) | -| Effective | Restart required. | - -### 4.33 MQTT Broker Configuration - -- enable_mqtt_service - -| Name | enable_mqtt_service。 | -| ----------- | ----------------------------------- | -| Description | whether to enable the mqtt service. | -| Type | Boolean | -| Default | false | -| Effective | Hot reload | - -- mqtt_host - -| Name | mqtt_host | -| ----------- | ------------------------------ | -| Description | the mqtt service binding host. | -| Type | String | -| Default | 127.0.0.1 | -| Effective | Hot reload | - -- mqtt_port - -| Name | mqtt_port | -| ----------- | ------------------------------ | -| Description | the mqtt service binding port. | -| Type | int32 | -| Default | 1883 | -| Effective | Hot reload | - -- mqtt_handler_pool_size - -| Name | mqtt_handler_pool_size | -| ----------- | ---------------------------------------------------- | -| Description | the handler pool size for handing the mqtt messages. | -| Type | int32 | -| Default | 1 | -| Effective | Hot reload | - -- mqtt_payload_formatter - -| Name | mqtt_payload_formatter | -| ----------- | ----------------------------------- | -| Description | the mqtt message payload formatter. | -| Type | String | -| Default | json | -| Effective | Hot reload | - -- mqtt_max_message_size - -| Name | mqtt_max_message_size | -| ----------- | ---------------------------------- | -| Description | max length of mqtt message in byte | -| Type | int32 | -| Default | 1048576 | -| Effective | Hot reload | - -### 4.34 Audit log Configuration - -- enable_audit_log - -| Name | enable_audit_log | -| ----------- | -------------------------------- | -| Description | whether to enable the audit log. | -| Type | Boolean | -| Default | false | -| Effective | Restart required. | - -- audit_log_storage - -| Name | audit_log_storage | -| ----------- | ----------------------------- | -| Description | Output location of audit logs | -| Type | String | -| Default | IOTDB,LOGGER | -| Effective | Restart required. | - -- audit_log_operation - -| Name | audit_log_operation | -| ----------- | ------------------------------------------------------------ | -| Description | whether enable audit log for DML operation of datawhether enable audit log for DDL operation of schemawhether enable audit log for QUERY operation of data and schema | -| Type | String | -| Default | DML,DDL,QUERY | -| Effective | Restart required. | - -- enable_audit_log_for_native_insert_api - -| Name | enable_audit_log_for_native_insert_api | -| ----------- | ---------------------------------------------- | -| Description | whether the local write api records audit logs | -| Type | Boolean | -| Default | true | -| Effective | Restart required. | - -### 4.35 White List Configuration - -- enable_white_list - -| Name | enable_white_list | -| ----------- | ------------------------- | -| Description | whether enable white list | -| Type | Boolean | -| Default | false | -| Effective | Hot reload | - -### 4.36 IoTDB-AI Configuration - -- model_inference_execution_thread_count - -| Name | model_inference_execution_thread_count | -| ----------- | ------------------------------------------------------------ | -| Description | The thread count which can be used for model inference operation. | -| Type | int | -| Default | 5 | -| Effective | Restart required. | - -### 4.37 Load TsFile Configuration - -- load_clean_up_task_execution_delay_time_seconds - -| Name | load_clean_up_task_execution_delay_time_seconds | -| ----------- | ------------------------------------------------------------ | -| Description | Load clean up task is used to clean up the unsuccessful loaded tsfile after a certain period of time. | -| Type | int | -| Default | 1800 | -| Effective | Hot reload | - -- load_write_throughput_bytes_per_second - -| Name | load_write_throughput_bytes_per_second | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum bytes per second of disk write throughput when loading tsfile. | -| Type | int | -| Default | -1 | -| Effective | Hot reload | - -- load_active_listening_enable - -| Name | load_active_listening_enable | -| ----------- | ------------------------------------------------------------ | -| Description | Whether to enable the active listening mode for tsfile loading. | -| Type | Boolean | -| Default | true | -| Effective | Hot reload | - -- load_active_listening_dirs - -| Name | load_active_listening_dirs | -| ----------- | ------------------------------------------------------------ | -| Description | The directory to be actively listened for tsfile loading.Multiple directories should be separated by a ','. | -| Type | String | -| Default | ext/load/pending | -| Effective | Hot reload | - -- load_active_listening_fail_dir - -| Name | load_active_listening_fail_dir | -| ----------- | ------------------------------------------------------------ | -| Description | The directory where tsfiles are moved if the active listening mode fails to load them. | -| Type | String | -| Default | ext/load/failed | -| Effective | Hot reload | - -- load_active_listening_max_thread_num - -| Name | load_active_listening_max_thread_num | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum number of threads that can be used to load tsfile actively.The default value, when this parameter is commented out or <= 0, use CPU core number. | -| Type | Long | -| Default | 0 | -| Effective | Restart required. | - -- load_active_listening_check_interval_seconds - -| Name | load_active_listening_check_interval_seconds | -| ----------- | ------------------------------------------------------------ | -| Description | The interval specified in seconds for the active listening mode to check the directory specified in `load_active_listening_dirs`.The active listening mode will check the directory every `load_active_listening_check_interval_seconds seconds`. | -| Type | Long | -| Default | 5 | -| Effective | Restart required. | - -* last_cache_operation_on_load - -|Name| last_cache_operation_on_load | -|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|Description| The operation performed to LastCache when a TsFile is successfully loaded. `UPDATE`: use the data in the TsFile to update LastCache; `UPDATE_NO_BLOB`: similar to UPDATE, but will invalidate LastCache for blob series; `CLEAN_DEVICE`: invalidate LastCache of devices contained in the TsFile; `CLEAN_ALL`: clean the whole LastCache. | -|Type| String | -|Default| UPDATE_NO_BLOB | -|Effective| Effective after restart | - -* cache_last_values_for_load - -|Name| cache_last_values_for_load | -|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|Description| Whether to cache last values before loading a TsFile. Only effective when `last_cache_operation_on_load=UPDATE_NO_BLOB` or `last_cache_operation_on_load=UPDATE`. When set to true, blob series will be ignored even with `last_cache_operation_on_load=UPDATE`. Enabling this will increase the memory footprint during loading TsFiles. | -|Type| Boolean | -|Default| true | -|Effective| Effective after restart | - -* cache_last_values_memory_budget_in_byte - -|Name| cache_last_values_memory_budget_in_byte | -|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|Description| When `cache_last_values_for_load=true`, the maximum memory that can be used to cache last values. If this value is exceeded, the cached values will be abandoned and last values will be read from the TsFile in a streaming manner. | -|Type| int32 | -|Default| 4194304 | -|Effective| Effective after restart | - - -### 4.38 Dispatch Retry Configuration - -- enable_retry_for_unknown_error - -| Name | enable_retry_for_unknown_error | -| ----------- | ------------------------------------------------------------ | -| Description | The maximum retrying time for write request remotely dispatching, time unit is milliseconds. | -| Type | Long | -| Default | 60000 | -| Effective | Hot reload | - -- enable_retry_for_unknown_error - -| Name | enable_retry_for_unknown_error | -| ----------- | ------------------------------------ | -| Description | Whether retrying for unknown errors. | -| Type | boolean | -| Default | false | -| Effective | Hot reload | \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Reference/System-Config-Manual_apache.md b/src/UserGuide/latest-Table/Reference/System-Config-Manual_apache.md new file mode 100644 index 000000000..c7006be42 --- /dev/null +++ b/src/UserGuide/latest-Table/Reference/System-Config-Manual_apache.md @@ -0,0 +1,3383 @@ + +# Config Manual + +## 1. IoTDB Configuration Files + +The configuration files for IoTDB are located in the `conf` folder under the IoTDB installation directory. Key configuration files include: + +1. `confignode-env.sh` **/** `confignode-env.bat`: + 1. Environment configuration file for ConfigNode. + 2. Used to configure memory size and other environment settings for ConfigNode. +2. `datanode-env.sh` **/** `datanode-env.bat`: + 1. Environment configuration file for DataNode. + 2. Used to configure memory size and other environment settings for DataNode. +3. `iotdb-system.properties`: + 1. Main configuration file for IoTDB. + 2. Contains configurable parameters for IoTDB. +4. `iotdb-system.properties.template`: + 1. Template for the `iotdb-system.properties` file. + 2. Provides a reference for all available configuration parameters. + +## 2. Modify Configurations + +### 2.1 **Modify Existing Parameters**: + +- Parameters already present in the `iotdb-system.properties` file can be directly modified. + +### 2.2 **Adding New Parameters**: + +- For parameters not listed in `iotdb-system.properties`, you can find them in the `iotdb-system.properties.template` file. +- Copy the desired parameter from the template file to `iotdb-system.properties` and modify its value. + +### 2.3 Configuration Update Methods + +Different configuration parameters have different update methods, categorized as follows: + +1. **Modify before the first startup.**: + 1. These parameters can only be modified before the first startup of ConfigNode/DataNode. + 2. Modifying them after the first startup will prevent ConfigNode/DataNode from starting. +2. **Restart Required for Changes to Take Effect**: + 1. These parameters can be modified after ConfigNode/DataNode has started. + 2. However, a restart of ConfigNode/DataNode is required for the changes to take effect. +3. **Hot Reload**: + 1. These parameters can be modified while ConfigNode/DataNode is running. + 2. After modification, use the following SQL commands to apply the changes: + - `load configuration`: Reloads the configuration. + - `set configuration key1 = 'value1'`: Updates specific configuration parameters. + +## 3. Environment Parameters + +The environment configuration files (`confignode-env.sh/bat` and `datanode-env.sh/bat`) are used to configure Java environment parameters for ConfigNode and DataNode, such as JVM settings. These configurations are passed to the JVM when ConfigNode or DataNode starts. + +### 3.1 **confignode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB ConfigNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 3/10 of the machine's memory, capped at 16G. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB ConfigNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB ConfigNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +### 3.2 **datanode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB DataNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 1/2 of the machine's memory. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB DataNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB DataNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +## 4. System Parameters (`iotdb-system.properties.template`) + +The `iotdb-system.properties` file contains various configurations for managing IoTDB clusters, nodes, replication, directories, monitoring, SSL, connections, object storage, tier management, and REST services. Below is a detailed breakdown of the parameters: + +### 4.1 Cluster Configuration + +- cluster_name + +| Name | cluster_name | +| ----------- | --------------------------------------------------------- | +| Description | Name of the cluster. | +| Type | String | +| Default | default_cluster | +| Effective | Use CLI: `set configuration cluster_name='xxx'`. | +| Note | Changes are distributed across nodes. Changes may not propagate to all nodes in case of network issues or node failures. Nodes that fail to update must manually modify `cluster_name` in their configuration files and restart. Under normal circumstances, it is not recommended to modify `cluster_name` by manually modifying configuration files or to perform hot-loading via `load configuration` method. | + +### 4.2 Seed ConfigNode + +- cn_seed_config_node + +| Name | cn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Confignode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +- dn_seed_config_node + +| Name | dn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Datanode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +### 4.3 Node RPC Configuration + +- cn_internal_address + +| Name | cn_internal_address | +| ----------- | ---------------------------------------------- | +| Description | Internal address for ConfigNode communication. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- cn_internal_port + +| Name | cn_internal_port | +| ----------- | ------------------------------------------- | +| Description | Port for ConfigNode internal communication. | +| Type | Short Int : [0,65535] | +| Default | 10710 | +| Effective | Modify before the first startup. | + +- cn_consensus_port + +| Name | cn_consensus_port | +| ----------- | ----------------------------------------------------- | +| Description | Port for ConfigNode consensus protocol communication. | +| Type | Short Int : [0,65535] | +| Default | 10720 | +| Effective | Modify before the first startup. | + +- dn_rpc_address + +| Name | dn_rpc_address | +| ----------- |---------------------------------| +| Description | Address for client RPC service. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Restart required. | + +- dn_rpc_port + +| Name | dn_rpc_port | +| ----------- | ---------------------------- | +| Description | Port for client RPC service. | +| Type | Short Int : [0,65535] | +| Default | 6667 | +| Effective | Restart required. | + +- dn_internal_address + +| Name | dn_internal_address | +| ----------- | -------------------------------------------- | +| Description | Internal address for DataNode communication. | +| Type | string | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- dn_internal_port + +| Name | dn_internal_port | +| ----------- | ----------------------------------------- | +| Description | Port for DataNode internal communication. | +| Type | int | +| Default | 10730 | +| Effective | Modify before the first startup. | + +- dn_mpp_data_exchange_port + +| Name | dn_mpp_data_exchange_port | +| ----------- | -------------------------------- | +| Description | Port for MPP data exchange. | +| Type | int | +| Default | 10740 | +| Effective | Modify before the first startup. | + +- dn_schema_region_consensus_port + +| Name | dn_schema_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode SchemaRegion consensus protocol communication. | +| Type | int | +| Default | 10750 | +| Effective | Modify before the first startup. | + +- dn_data_region_consensus_port + +| Name | dn_data_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode DataRegion consensus protocol communication. | +| Type | int | +| Default | 10760 | +| Effective | Modify before the first startup. | + +- dn_join_cluster_retry_interval_ms + +| Name | dn_join_cluster_retry_interval_ms | +| ----------- | --------------------------------------------------- | +| Description | Interval for DataNode to retry joining the cluster. | +| Type | long | +| Default | 5000 | +| Effective | Restart required. | + +### 4.4 Replication configuration + +- config_node_consensus_protocol_class + +| Name | config_node_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for ConfigNode replication, only supports RatisConsensus | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- schema_replication_factor + +| Name | schema_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default schema replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- schema_region_consensus_protocol_class + +| Name | schema_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for schema region replication. Only supports RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- data_replication_factor + +| Name | data_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default data replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- data_region_consensus_protocol_class + +| Name | data_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for data region replication. Supports IoTConsensus or RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.iot.IoTConsensus | +| Effective | Modify before the first startup. | + +### 4.5 Directory configuration + +- cn_system_dir + +| Name | cn_system_dir | +| ----------- | ----------------------------------------------------------- | +| Description | System data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/system(Windows:data\\configndoe\\system) | +| Effective | Restart required | + +- cn_consensus_dir + +| Name | cn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| Effective | Restart required | + +- cn_pipe_receiver_file_dir + +| Name | cn_pipe_receiver_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in ConfigNode. | +| Type | String | +| Default | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- dn_system_dir + +| Name | dn_system_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Schema storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/system(Windows:data\\datanode\\system) | +| Effective | Restart required | + +- dn_data_dirs + +| Name | dn_data_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Data storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/data(Windows:data\\datanode\\data) | +| Effective | Restart required | + +- dn_multi_dir_strategy + +| Name | dn_multi_dir_strategy | +| ----------- | ------------------------------------------------------------ | +| Description | The strategy used by IoTDB to select directories in `data_dirs` for TsFiles. You can use either the simple class name or the fully qualified class name. The system provides the following two strategies: 1. SequenceStrategy: IoTDB selects directories sequentially, iterating through all directories in `data_dirs` in a round-robin manner. 2. MaxDiskUsableSpaceFirstStrategy IoTDB prioritizes the directory in `data_dirs` with the largest disk free space. To implement a custom strategy: 1. Inherit the `org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy `class and implement your own strategy method. 2. Fill in the configuration item with the fully qualified class name of your implementation (package name + class name, e.g., `UserDefineStrategyPackage`). 3. Add the JAR file containing your custom class to the project. | +| Type | String | +| Default | SequenceStrategy | +| Effective | Hot reload. | + +- dn_consensus_dir + +| Name | dn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus log storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| Effective | Restart required | + +- dn_wal_dirs + +| Name | dn_wal_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Write-ahead log (WAL) storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/wal(Windows:data\\datanode\\wal) | +| Effective | Restart required | + +- dn_tracing_dir + +| Name | dn_tracing_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Tracing root directory for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | datanode/tracing(Windows:datanode\\tracing) | +| Effective | Restart required | + +- dn_sync_dir + +| Name | dn_sync_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Sync storage path for DataNode.By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/sync(Windows:data\\datanode\\sync) | +| Effective | Restart required | + +- sort_tmp_dir + +| Name | sort_tmp_dir | +| ----------- | ------------------------------------------------- | +| Description | Temporary directory for sorting operations. | +| Type | String | +| Default | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| Effective | Restart required | + +- dn_pipe_receiver_file_dirs + +| Name | dn_pipe_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in DataNode. | +| Type | String | +| Default | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_receiver_file_dirs + +| Name | iot_consensus_v2_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 receiver files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_deletion_file_dir + +| Name | iot_consensus_v2_deletion_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 deletion files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| Effective | Restart required | + +### 4.6 Metric Configuration + +- cn_metric_reporter_list + +| Name | cn_metric_reporter_list | +| ----------- | ----------------------------------------- | +| Description | Systems for reporting ConfigNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- cn_metric_level + +| Name | cn_metric_level | +| ----------- | --------------------------------------- | +| Description | Level of detail for ConfigNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- cn_metric_async_collect_period + +| Name | cn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in ConfigNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- cn_metric_prometheus_reporter_port + +| Name | cn_metric_prometheus_reporter_port | +| ----------- | --------------------------------------------------- | +| Description | Port for Prometheus metric reporting in ConfigNode. | +| Type | int | +| Default | 9091 | +| Effective | Restart required. | + +- dn_metric_reporter_list + +| Name | dn_metric_reporter_list | +| ----------- | --------------------------------------- | +| Description | Systems for reporting DataNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- dn_metric_level + +| Name | dn_metric_level | +| ----------- | ------------------------------------- | +| Description | Level of detail for DataNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- dn_metric_async_collect_period + +| Name | dn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in DataNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- dn_metric_prometheus_reporter_port + +| Name | dn_metric_prometheus_reporter_port | +| ----------- | ------------------------------------------------- | +| Description | Port for Prometheus metric reporting in DataNode. | +| Type | int | +| Default | 9092 | +| Effective | Restart required. | + +- dn_metric_internal_reporter_type + +| Name | dn_metric_internal_reporter_type | +| ----------- | ------------------------------------------------------------ | +| Description | Internal reporter types for DataNode metrics. For internal monitoring and checking that the data has been successfully written and refreshed. | +| Type | String | +| Default | IOTDB | +| Effective | Restart required. | + +### 4.7 SSL Configuration + +- enable_thrift_ssl + +| Name | enable_thrift_ssl | +| ----------- | --------------------------------------------- | +| Description | Enables SSL encryption for RPC communication. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_https + +| Name | enable_https | +| ----------- | ------------------------------ | +| Description | Enables SSL for REST services. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- key_store_path + +| Name | key_store_path | +| ----------- | ---------------------------- | +| Description | Path to the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- key_store_pwd + +| Name | key_store_pwd | +| ----------- | --------------------------------- | +| Description | Password for the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.8 Connection Configuration + +- cn_rpc_thrift_compression_enable + +| Name | cn_rpc_thrift_compression_enable | +| ----------- | ----------------------------------- | +| Description | Enables Thrift compression for RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- cn_rpc_max_concurrent_client_num + +| Name | cn_rpc_max_concurrent_client_num | +| ----------- |-------------------------------------------| +| Description | Maximum number of concurrent RPC clients. | +| Type | int | +| Default | 3000 | +| Effective | Restart required. | + +- cn_connection_timeout_ms + +| Name | cn_connection_timeout_ms | +| ----------- | ---------------------------------------------------- | +| Description | Connection timeout for ConfigNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- cn_selector_thread_nums_of_client_manager + +| Name | cn_selector_thread_nums_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | Number of selector threads for client management in ConfigNode. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- cn_max_client_count_for_each_node_in_client_manager + +| Name | cn_max_client_count_for_each_node_in_client_manager | +| ----------- | ------------------------------------------------------ | +| Description | Maximum clients per node in ConfigNode client manager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +- dn_session_timeout_threshold + +| Name | dn_session_timeout_threshold | +| ----------- | ---------------------------------------- | +| Description | Maximum idle time for DataNode sessions. | +| Type | int | +| Default | 0 | +| Effective | Restart required.t required. | + +- dn_rpc_thrift_compression_enable + +| Name | dn_rpc_thrift_compression_enable | +| ----------- | -------------------------------------------- | +| Description | Enables Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_advanced_compression_enable + +| Name | dn_rpc_advanced_compression_enable | +| ----------- | ----------------------------------------------------- | +| Description | Enables advanced Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_selector_thread_count + +| Name | rpc_selector_thread_count | +| ----------- | -------------------------------------------- | +| Description | Number of selector threads for DataNode RPC. | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_rpc_min_concurrent_client_num + +| Name | rpc_min_concurrent_client_num | +| ----------- | ------------------------------------------------------ | +| Description | Minimum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1 | +| Effective | Restart required. | + +- dn_rpc_max_concurrent_client_num + +| Name | dn_rpc_max_concurrent_client_num | +| ----------- |--------------------------------------------------------| +| Description | Maximum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1000 | +| Effective | Restart required. | + +- dn_thrift_max_frame_size + +| Name | dn_thrift_max_frame_size | +| ----------- |------------------------------------------------| +| Description | Maximum frame size for RPC requests/responses. | +| Type | long | +| Default | 536870912 (Default 512MB) | +| Effective | Restart required. | + +- dn_thrift_init_buffer_size + +| Name | dn_thrift_init_buffer_size | +| ----------- | ----------------------------------- | +| Description | Initial buffer size for Thrift RPC. | +| Type | long | +| Default | 1024 | +| Effective | Restart required. | + +- dn_connection_timeout_ms + +| Name | dn_connection_timeout_ms | +| ----------- | -------------------------------------------------- | +| Description | Connection timeout for DataNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- dn_selector_thread_count_of_client_manager + +| Name | dn_selector_thread_count_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | selector thread (TAsyncClientManager) nums for async thread in a clientManager | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_max_client_count_for_each_node_in_client_manager + +| Name | dn_max_client_count_for_each_node_in_client_manager | +| ----------- | --------------------------------------------------- | +| Description | Maximum clients per node in DataNode clientmanager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +### 4.9 Object storage management + +- remote_tsfile_cache_dirs + +| Name | remote_tsfile_cache_dirs | +| ----------- | ---------------------------------------- | +| Description | Local cache directory for cloud storage. | +| Type | String | +| Default | data/datanode/data/cache | +| Effective | Restart required. | + +- remote_tsfile_cache_page_size_in_kb + +| Name | remote_tsfile_cache_page_size_in_kb | +| ----------- | --------------------------------------------- | +| Description | Block size for cached files in cloud storage. | +| Type | int | +| Default | 20480 | +| Effective | Restart required. | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| Name | remote_tsfile_cache_max_disk_usage_in_mb | +| ----------- | ------------------------------------------- | +| Description | Maximum disk usage for cloud storage cache. | +| Type | long | +| Default | 51200 | +| Effective | Restart required. | + +- object_storage_type + +| Name | object_storage_type | +| ----------- | ---------------------- | +| Description | Type of cloud storage. | +| Type | String | +| Default | AWS_S3 | +| Effective | Restart required. | + +- object_storage_endpoint + +| Name | object_storage_endpoint | +| ----------- | --------------------------- | +| Description | Endpoint for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_bucket + +| Name | object_storage_bucket | +| ----------- | ------------------------------ | +| Description | Bucket name for cloud storage. | +| Type | String | +| Default | iotdb_data | +| Effective | Restart required. | + +- object_storage_access_key + +| Name | object_storage_access_key | +| ----------- | ----------------------------- | +| Description | Access key for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_access_secret + +| Name | object_storage_access_secret | +| ----------- | -------------------------------- | +| Description | Access secret for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.10 Tier management + +- dn_default_space_usage_thresholds + +| Name | dn_default_space_usage_thresholds | +| ----------- | ------------------------------------------------------------ | +| Description | Disk usage threshold, data will be moved to the next tier when the usage of the tier is higher than this threshold.If tiered storage is enabled, please separate thresholds of different tiers by semicolons ";". | +| Type | double | +| Default | 0.85 | +| Effective | Hot reload. | + +- dn_tier_full_policy + +| Name | dn_tier_full_policy | +| ----------- | ------------------------------------------------------------ | +| Description | How to deal with the last tier's data when its used space has been higher than its dn_default_space_usage_thresholds. | +| Type | String | +| Default | NULL | +| Effective | Hot reload. | + +- migrate_thread_count + +| Name | migrate_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | thread pool size for migrate operation in the DataNode's data directories. | +| Type | int | +| Default | 1 | +| Effective | Hot reload. | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| Name | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ----------- | ------------------------------------------------------------ | +| Description | The migrate speed limit of different tiers can reach per second | +| Type | int | +| Default | 10485760 | +| Effective | Hot reload. | + +### 4.11 REST Service Configuration + +- enable_rest_service + +| Name | enable_rest_service | +| ----------- | --------------------------- | +| Description | Is the REST service enabled | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_service_port + +| Name | rest_service_port | +| ----------- | ------------------------------------ | +| Description | the binding port of the REST service | +| Type | int32 | +| Default | 18080 | +| Effective | Restart required. | + +- enable_swagger + +| Name | enable_swagger | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to display rest service interface information through swagger. eg: http://ip:port/swagger.json | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_query_default_row_size_limit + +| Name | rest_query_default_row_size_limit | +| ----------- | ------------------------------------------------------------ | +| Description | the default row limit to a REST query response when the rowSize parameter is not given in request | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- cache_expire_in_seconds + +| Name | cache_expire_in_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The expiration time of the user login information cache (in seconds) | +| Type | int32 | +| Default | 28800 | +| Effective | Restart required. | + +- cache_max_num + +| Name | cache_max_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of users can be stored in the user login cache. | +| Type | int32 | +| Default | 100 | +| Effective | Restart required. | + +- cache_init_num + +| Name | cache_init_num | +| ----------- | ------------------------------------------------------------ | +| Description | The initial capacity of users can be stored in the user login cache. | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- client_auth + +| Name | client_auth | +| ----------- | --------------------------------- | +| Description | Is client authentication required | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- trust_store_path + +| Name | trust_store_path | +| ----------- | -------------------- | +| Description | SSL trust store path | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- trust_store_pwd + +| Name | trust_store_pwd | +| ----------- | ------------------------- | +| Description | SSL trust store password. | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- idle_timeout_in_seconds + +| Name | idle_timeout_in_seconds | +| ----------- | ------------------------ | +| Description | SSL timeout (in seconds) | +| Type | int32 | +| Default | 5000 | +| Effective | Restart required. | + +### 4.12 Load balancing configuration + +- series_slot_num + +| Name | series_slot_num | +| ----------- | ------------------------------------------- | +| Description | Number of SeriesPartitionSlots per Database | +| Type | int32 | +| Default | 10000 | +| Effective | Modify before the first startup. | + +- series_partition_executor_class + +| Name | series_partition_executor_class | +| ----------- | ------------------------------------------------------------ | +| Description | SeriesPartitionSlot executor class | +| Type | String | +| Default | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| Effective | Modify before the first startup. | + +- schema_region_group_extension_policy + +| Name | schema_region_group_extension_policy | +| ----------- | ------------------------------------------------------------ | +| Description | The policy of extension SchemaRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_schema_region_group_num_per_database + +| Name | default_schema_region_group_num_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set schema_region_group_extension_policy=CUSTOM, this parameter is the default number of SchemaRegionGroups for each Database.When set schema_region_group_extension_policy=AUTO, this parameter is the default minimal number of SchemaRegionGroups for each Database. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- schema_region_per_data_node + +| Name | schema_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set schema_region_group_extension_policy=AUTO.This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 1.0 | +| Effective | Restart required. | + +- data_region_group_extension_policy + +| Name | data_region_group_extension_policy | +| ----------- | ---------------------------------------------------------- | +| Description | The policy of extension DataRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_data_region_group_num_per_database + +| Name | default_data_region_group_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set data_region_group_extension_policy=CUSTOM, this parameter is the default number of DataRegionGroups for each Database.When set data_region_group_extension_policy=AUTO, this parameter is the default minimal number of DataRegionGroups for each Database. | +| Type | int | +| Default | 2 | +| Effective | Restart required. | + +- data_region_per_data_node + +| Name | data_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set data_region_group_extension_policy=AUTO.This parameter is the maximum number of DataRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 5.0 | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_ratis_consensus + +| Name | enable_auto_leader_balance_for_ratis_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for Ratis consensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_iot_consensus + +| Name | enable_auto_leader_balance_for_iot_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for IoTConsensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.13 Cluster management + +- time_partition_origin + +| Name | time_partition_origin | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition origin in milliseconds, default is equal to zero. | +| Type | Long | +| Unit | ms | +| Default | 0 | +| Effective | Modify before the first startup. | + +- time_partition_interval + +| Name | time_partition_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition interval in milliseconds, and partitioning data inside each data region, default is equal to one week | +| Type | Long | +| Unit | ms | +| Default | 604800000 | +| Effective | Modify before the first startup. | + +- heartbeat_interval_in_ms + +| Name | heartbeat_interval_in_ms | +| ----------- | -------------------------------------- | +| Description | The heartbeat interval in milliseconds | +| Type | Long | +| Unit | ms | +| Default | 1000 | +| Effective | Restart required. | + +- disk_space_warning_threshold + +| Name | disk_space_warning_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | Disk remaining threshold at which DataNode is set to ReadOnly status | +| Type | double(percentage) | +| Default | 0.05 | +| Effective | Restart required. | + +### 4.14 Memory Control Configuration + +- datanode_memory_proportion + +| Name | datanode_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory Allocation Ratio: StorageEngine, QueryEngine, SchemaEngine, Consensus, StreamingEngine and Free Memory. | +| Type | Ratio | +| Default | 3:3:1:1:1:1 | +| Effective | Restart required. | + +- schema_memory_proportion + +| Name | schema_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, and PartitionCache. | +| Type | Ratio | +| Default | 5:4:1 | +| Effective | Restart required. | + +- storage_engine_memory_proportion + +| Name | storage_engine_memory_proportion | +| ----------- | ----------------------------------------------------------- | +| Description | Memory allocation ratio in StorageEngine: Write, Compaction | +| Type | Ratio | +| Default | 8:2 | +| Effective | Restart required. | + +- write_memory_proportion + +| Name | write_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory allocation ratio in writing: Memtable, TimePartitionInfo | +| Type | Ratio | +| Default | 19:1 | +| Effective | Restart required. | + +- primitive_array_size + +| Name | primitive_array_size | +| ----------- | --------------------------------------------------------- | +| Description | primitive array size (length of each array) in array pool | +| Type | int32 | +| Default | 64 | +| Effective | Restart required. | + +- chunk_metadata_size_proportion + +| Name | chunk_metadata_size_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of compaction memory for chunk metadata maintains in memory when doing compaction | +| Type | Double | +| Default | 0.1 | +| Effective | Restart required. | + +- flush_proportion + +| Name | flush_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for invoking flush disk, 0.4 by defaultIf you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 | +| Type | Double | +| Default | 0.4 | +| Effective | Restart required. | + +- buffered_arrays_memory_proportion + +| Name | buffered_arrays_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory allocated for buffered arrays, 0.6 by default | +| Type | Double | +| Default | 0.6 | +| Effective | Restart required. | + +- reject_proportion + +| Name | reject_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for rejecting insertion, 0.8 by defaultIf you have extremely high write load (like batch=1000) and the physical memory size is large enough, it can be set higher than the default value like 0.9 | +| Type | Double | +| Default | 0.8 | +| Effective | Restart required. | + +- device_path_cache_proportion + +| Name | device_path_cache_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for the DevicePathCache. DevicePathCache is the deviceId cache, keeping only one copy of the same deviceId in memory | +| Type | Double | +| Default | 0.05 | +| Effective | Restart required. | + +- write_memory_variation_report_proportion + +| Name | write_memory_variation_report_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | If memory cost of data region increased more than proportion of allocated memory for writing, report to system. The default value is 0.001 | +| Type | Double | +| Default | 0.001 | +| Effective | Restart required. | + +- check_period_when_insert_blocked + +| Name | check_period_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When an insertion is rejected, the waiting period (in ms) to check system again, 50 by default.If the insertion has been rejected and the read load is low, it can be set larger. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- io_task_queue_size_for_flushing + +| Name | io_task_queue_size_for_flushing | +| ----------- | -------------------------------------------- | +| Description | size of ioTaskQueue. The default value is 10 | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- enable_query_memory_estimation + +| Name | enable_query_memory_estimation | +| ----------- | ------------------------------------------------------------ | +| Description | If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory | +| Type | bool | +| Default | true | +| Effective | Hot reload. | + +### 4.15 Schema Engine Configuration + +- schema_engine_mode + +| Name | schema_engine_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The schema management mode of schema engine. Currently, support Memory and PBTree.This config of all DataNodes in one cluster must keep same. | +| Type | string | +| Default | Memory | +| Effective | Modify before the first startup. | + +- partition_cache_size + +| Name | partition_cache_size | +| ----------- | ------------------------- | +| Description | cache size for partition. | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- sync_mlog_period_in_ms + +| Name | sync_mlog_period_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The cycle when metadata log is periodically forced to be written to disk(in milliseconds)If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshmentSetting this parameter to 0 may slow down the operation on slow disk. | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- tag_attribute_flush_interval + +| Name | tag_attribute_flush_interval | +| ----------- | ------------------------------------------------------------ | +| Description | interval num for tag and attribute records when force flushing to disk | +| Type | int32 | +| Default | 1000 | +| Effective | Modify before the first startup. | + +- tag_attribute_total_size + +| Name | tag_attribute_total_size | +| ----------- | ------------------------------------------------------------ | +| Description | max size for a storage block for tags and attributes of a one-time series | +| Type | int32 | +| Default | 700 | +| Effective | Modify before the first startup. | + +- max_measurement_num_of_internal_request + +| Name | max_measurement_num_of_internal_request | +| ----------- | ------------------------------------------------------------ | +| Description | max measurement num of internal requestWhen creating timeseries with Session.createMultiTimeseries, the user input plan, the timeseries num ofwhich exceeds this num, will be split to several plans with timeseries no more than this num. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- datanode_schema_cache_eviction_policy + +| Name | datanode_schema_cache_eviction_policy | +| ----------- | --------------------------------------- | +| Description | Policy of DataNodeSchemaCache eviction. | +| Type | String | +| Default | FIFO | +| Effective | Restart required. | + +- cluster_timeseries_limit_threshold + +| Name | cluster_timeseries_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of time series allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- cluster_device_limit_threshold + +| Name | cluster_device_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of devices allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- database_limit_threshold + +| Name | database_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of Cluster Databases allowed. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +### 4.16 Configurations for creating schema automatically + +- enable_auto_create_schema + +| Name | enable_auto_create_schema | +| ----------- | ------------------------------------------------ | +| Description | Whether creating schema automatically is enabled | +| Value | true or false | +| Default | true | +| Effective | Restart required. | + +- default_storage_group_level + +| Name | default_storage_group_level | +| ----------- | ------------------------------------------------------------ | +| Description | Database level when creating schema automatically is enabled e.g. root.sg0.d1.s2We will set root.sg0 as the database if database level is 1If the incoming path is shorter than this value, the creation/insertion will fail. | +| Value | int32 | +| Default | 1 | +| Effective | Restart required. | + +- boolean_string_infer_type + +| Name | boolean_string_infer_type | +| ----------- |------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving boolean string "true" or "false" | +| Value | BOOLEAN or TEXT | +| Default | BOOLEAN | +| Effective | Hot_reload | + +- integer_string_infer_type + +| Name | integer_string_infer_type | +| ----------- |------------------------------------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving an integer string and using float or double may lose precision | +| Value | INT32, INT64, FLOAT, DOUBLE, TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- floating_string_infer_type + +| Name | floating_string_infer_type | +| ----------- |----------------------------------------------------------------------------------| +| Description | register time series as which type when receiving a floating number string "6.7" | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- nan_string_infer_type + +| Name | nan_string_infer_type | +| ----------- |--------------------------------------------------------------------| +| Description | register time series as which type when receiving the Literal NaN. | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- default_boolean_encoding + +| Name | default_boolean_encoding | +| ----------- |----------------------------------------------------------------| +| Description | BOOLEAN encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE | +| Default | RLE | +| Effective | Hot_reload | + +- default_int32_encoding + +| Name | default_int32_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT32 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_int64_encoding + +| Name | default_int64_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT64 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_float_encoding + +| Name | default_float_encoding | +| ----------- |--------------------------------------------------------------| +| Description | FLOAT encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_double_encoding + +| Name | default_double_encoding | +| ----------- |---------------------------------------------------------------| +| Description | DOUBLE encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_text_encoding + +| Name | default_text_encoding | +| ----------- |-------------------------------------------------------------| +| Description | TEXT encoding when creating schema automatically is enabled | +| Value | PLAIN | +| Default | PLAIN | +| Effective | Hot_reload | + + +* boolean_compressor + +| Name | boolean_compressor | +|------------------|-----------------------------------------------------------------------------------------| +| Description | BOOLEAN compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int32_compressor + +| Name | int32_compressor | +|----------------------|--------------------------------------------------------------------------------------------| +| Description | INT32/DATE compression when creating schema automatically is enabled(Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int64_compressor + +| Name | int64_compressor | +|--------------------|-------------------------------------------------------------------------------------------------| +| Description | INT64/TIMESTAMP compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* float_compressor + +| Name | float_compressor | +|-----------------------|---------------------------------------------------------------------------------------| +| Description | FLOAT compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* double_compressor + +| Name | double_compressor | +|-------------------|----------------------------------------------------------------------------------------| +| Description | DOUBLE compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* text_compressor + +| Name | text_compressor | +|--------------------|--------------------------------------------------------------------------------------------------| +| Description | TEXT/BINARY/BLOB compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + + +### 4.17 Query Configurations + +- read_consistency_level + +| Name | read_consistency_level | +| ----------- | ------------------------------------------------------------ | +| Description | The read consistency levelThese consistency levels are currently supported:strong(Default, read from the leader replica)weak(Read from a random replica) | +| Type | String | +| Default | strong | +| Effective | Restart required. | + +- meta_data_cache_enable + +| Name | meta_data_cache_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to cache meta data (BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- chunk_timeseriesmeta_free_memory_proportion + +| Name | chunk_timeseriesmeta_free_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50 | +| Type | String | +| Default | 1 : 100 : 200 : 300 : 400 | +| Effective | Restart required. | + +- enable_last_cache + +| Name | enable_last_cache | +| ----------- | ---------------------------- | +| Description | Whether to enable LAST cache | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- mpp_data_exchange_core_pool_size + +| Name | mpp_data_exchange_core_pool_size | +| ----------- | -------------------------------------------- | +| Description | Core size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_max_pool_size + +| Name | mpp_data_exchange_max_pool_size | +| ----------- | ------------------------------------------- | +| Description | Max size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_keep_alive_time_in_ms + +| Name | mpp_data_exchange_keep_alive_time_in_ms | +| ----------- | --------------------------------------- | +| Description | Max waiting time for MPP data exchange | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- driver_task_execution_time_slice_in_ms + +| Name | driver_task_execution_time_slice_in_ms | +| ----------- | -------------------------------------- | +| Description | The max execution time of a DriverTask | +| Type | int32 | +| Default | 200 | +| Effective | Restart required. | + +- max_tsblock_size_in_bytes + +| Name | max_tsblock_size_in_bytes | +| ----------- | ----------------------------- | +| Description | The max capacity of a TsBlock | +| Type | int32 | +| Default | 131072 | +| Effective | Restart required. | + +- max_tsblock_line_numbers + +| Name | max_tsblock_line_numbers | +| ----------- | ------------------------------------------- | +| Description | The max number of lines in a single TsBlock | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- slow_query_threshold + +| Name | slow_query_threshold | +| ----------- | -------------------------------------- | +| Description | Time cost(ms) threshold for slow query | +| Type | long | +| Default | 10000 | +| Effective | Hot reload | + +- query_cost_stat_window + +| Name | query_cost_stat_window | +|-------------|--------------------| +| Description | Time window threshold(min) for record of history queries. | +| Type | Int32 | +| Default | 0 | +| Effective | Hot reload | + +- query_timeout_threshold + +| Name | query_timeout_threshold | +| ----------- | ----------------------------------------- | +| Description | The max executing time of query. unit: ms | +| Type | Int32 | +| Default | 60000 | +| Effective | Restart required. | + +- max_allowed_concurrent_queries + +| Name | max_allowed_concurrent_queries | +| ----------- | -------------------------------------------------- | +| Description | The maximum allowed concurrently executing queries | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- query_thread_count + +| Name | query_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently execute query statement. When <= 0, use CPU core number. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- degree_of_query_parallelism + +| Name | degree_of_query_parallelism | +| ----------- | ------------------------------------------------------------ | +| Description | How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- mode_map_size_threshold + +| Name | mode_map_size_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of count map size when calculating the MODE aggregation function | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- batch_size + +| Name | batch_size | +| ----------- | ------------------------------------------------------------ | +| Description | The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) | +| Type | Int32 | +| Default | 100000 | +| Effective | Restart required. | + +- sort_buffer_size_in_bytes + +| Name | sort_buffer_size_in_bytes | +| ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort. | +| Type | long | +| Default | 1048576(Before V2.0.6)
0(Supports from V2.0.6), if `sort_buffer_size_in_bytes <= 0`, default value will be used, `default value = min(32MB, memory for query operators / query_thread_count / 2)`, if `sort_buffer_size_in_bytes > 0`, the specified value will be used. | +| Effective | Hot_reload | + +- merge_threshold_of_explain_analyze + +| Name | merge_threshold_of_explain_analyze | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of operator count in the result set of EXPLAIN ANALYZE, if the number of operator in the result set is larger than this threshold, operator will be merged. | +| Type | int | +| Default | 10 | +| Effective | Hot reload | + +### 4.18 TTL Configuration + +- ttl_check_interval + +| Name | ttl_check_interval | +| ----------- | ------------------------------------------------------------ | +| Description | The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours. | +| Type | int | +| Default | 7200000 | +| Effective | Restart required. | + +- max_expired_time + +| Name | max_expired_time | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum expiring time of device which has a ttl. Default is 1 month.If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction. | +| Type | int | +| Default | 2592000000 | +| Effective | Restart required. | + +- expired_data_ratio + +| Name | expired_data_ratio | +| ----------- | ------------------------------------------------------------ | +| Description | The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction. | +| Type | float | +| Default | 0.3 | +| Effective | Restart required. | + +### 4.19 Storage Engine Configuration + +- timestamp_precision + +| Name | timestamp_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Use this value to set timestamp precision as "ms", "us" or "ns". | +| Type | String | +| Default | ms | +| Effective | Modify before the first startup. | + +- timestamp_precision_check_enabled + +| Name | timestamp_precision_check_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted. | +| Type | Boolean | +| Default | true | +| Effective | Modify before the first startup. | + +- max_waiting_time_when_insert_blocked + +| Name | max_waiting_time_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- handle_system_error + +| Name | handle_system_error | +| ----------- | -------------------------------------------------------- | +| Description | What will the system do when unrecoverable error occurs. | +| Type | String | +| Default | CHANGE_TO_READ_ONLY | +| Effective | Restart required. | + +- enable_timed_flush_seq_memtable + +| Name | enable_timed_flush_seq_memtable | +| ----------- | --------------------------------------------------- | +| Description | Whether to timed flush sequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- seq_memtable_flush_interval_in_ms + +| Name | seq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- seq_memtable_flush_check_interval_in_ms + +| Name | seq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether sequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- enable_timed_flush_unseq_memtable + +| Name | enable_timed_flush_unseq_memtable | +| ----------- | ----------------------------------------------------- | +| Description | Whether to timed flush unsequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- unseq_memtable_flush_interval_in_ms + +| Name | unseq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- unseq_memtable_flush_check_interval_in_ms + +| Name | unseq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether unsequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- tvlist_sort_algorithm + +| Name | tvlist_sort_algorithm | +| ----------- | ------------------------------------------------- | +| Description | The sort algorithms used in the memtable's TVList | +| Type | String | +| Default | TIM | +| Effective | Restart required. | + +- avg_series_point_number_threshold + +| Name | avg_series_point_number_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. | +| Type | int32 | +| Default | 100000 | +| Effective | Restart required. | + +- flush_thread_count + +| Name | flush_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently flush. When <= 0, use CPU core number. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- enable_partial_insert + +| Name | enable_partial_insert | +| ----------- | ------------------------------------------------------------ | +| Description | In one insert (one device, one timestamp, multiple measurements), if enable partial insert, one measurement failure will not impact other measurements | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- recovery_log_interval_in_ms + +| Name | recovery_log_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | the interval to log recover progress of each vsg when starting iotdb | +| Type | Int32 | +| Default | 5000 | +| Effective | Restart required. | + +- 0.13_data_insert_adapt + +| Name | 0.13_data_insert_adapt | +| ----------- | ------------------------------------------------------------ | +| Description | If using a v0.13 client to insert data, please set this configuration to true. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_tsfile_validation + +| Name | enable_tsfile_validation | +| ----------- | ------------------------------------------------------------ | +| Description | Verify that TSfiles generated by Flush, Load, and Compaction are correct. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | + +- tier_ttl_in_ms + +| Name | tier_ttl_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Default tier TTL. When the survival time of the data exceeds the threshold, it will be migrated to the next tier. | +| Type | long | +| Default | -1 | +| Effective | Restart required. | + +- max_object_file_size_in_byte + +| Name | max_object_file_size_in_byte | +|-------------|--------------------------------------------------------------------------| +| Description | Maximum size limit for a single object file (supported since V2.0.8-beta). | +| Type | long | +| Default | 4294967296 (4 GB in bytes) | +| Effective | Hot reload | + +- restrict_object_limit + +| Name | restrict_object_limit | +|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | No special restrictions on table names, column names, or device identifiers for `OBJECT` type (supported since V2.0.8-beta). When set to `true` and the table contains `OBJECT` columns, the following restrictions apply:
1. Naming Rules: Values in TAG columns, table names, and field names must not use `.` or `..`; Prohibited characters include `./` or `.\`, otherwise metadata creation will fail; Names containing filesystem-unsupported characters will cause write errors.
2. Case Sensitivity: If the underlying filesystem is case-insensitive, device identifiers like `'d1'` and `'D1'` are treated as identical; Creating similar identifiers may overwrite `OBJECT` data files, leading to data corruption.
3. Storage Path: Actual storage path format: `${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin` | +| Type | boolean | +| Default | false | +| Effective | Can only be modified before the first service startup. | + +### 4.20 Compaction Configurations + +- enable_seq_space_compaction + +| Name | enable_seq_space_compaction | +| ----------- | ---------------------------------------------------------- | +| Description | sequence space compaction: only compact the sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_unseq_space_compaction + +| Name | enable_unseq_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | unsequence space compaction: only compact the unsequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_cross_space_compaction + +| Name | enable_cross_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | cross space compaction: compact the unsequence files into the overlapped sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_auto_repair_compaction + +| Name | enable_auto_repair_compaction | +| ----------- | ---------------------------------------------- | +| Description | enable auto repair unsorted file by compaction | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- cross_selector + +| Name | cross_selector | +| ----------- | ------------------------------------------- | +| Description | the selector of cross space compaction task | +| Type | String | +| Default | rewrite | +| Effective | Restart required. | + +- cross_performer + +| Name | cross_performer | +| ----------- |-----------------------------------------------------------| +| Description | the compaction performer of cross space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload . | + +- inner_seq_selector + +| Name | inner_seq_selector | +| ----------- |--------------------------------------------------------| +| Description | the selector of inner sequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_seq_performer + +| Name | inner_seq_performer | +| ----------- |---------------------------------------------------------| +| Description | the performer of inner sequence space compaction task, Options: read_chunk, fast | +| Type | String | +| Default | read_chunk | +| Effective | Hot reload | + +- inner_unseq_selector + +| Name | inner_unseq_selector | +| ----------- |----------------------------------------------------------| +| Description | the selector of inner unsequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_unseq_performer + +| Name | inner_unseq_performer | +| ----------- |-----------------------------------------------------------| +| Description | the performer of inner unsequence space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload | + +- compaction_priority + +| Name | compaction_priority | +| ----------- | ------------------------------------------------------------ | +| Description | The priority of compaction executionINNER_CROSS: prioritize inner space compaction, reduce the number of files firstCROSS_INNER: prioritize cross space compaction, eliminate the unsequence files firstBALANCE: alternate two compaction types | +| Type | String | +| Default | INNER_CROSS | +| Effective | Restart required. | + +- candidate_compaction_task_queue_size + +| Name | candidate_compaction_task_queue_size | +| ----------- | -------------------------------------------- | +| Description | The size of candidate compaction task queue. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- target_compaction_file_size + +| Name | target_compaction_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | This parameter is used in two places:The target tsfile size of inner space compaction.The candidate size of seq tsfile in cross space compaction will be smaller than target_compaction_file_size * 1.5.In most cases, the target file size of cross compaction won't exceed this threshold, and if it does, it will not be much larger than it. | +| Type | Int64 | +| Default | 2147483648 | +| Effective | Hot reload | + +- inner_compaction_total_file_size_threshold + +| Name | inner_compaction_total_file_size_threshold | +| ----------- | ---------------------------------------------------- | +| Description | The total file size limit in inner space compaction. | +| Type | int64 | +| Default | 10737418240 | +| Effective | Hot reload | + +- inner_compaction_total_file_num_threshold + +| Name | inner_compaction_total_file_num_threshold | +| ----------- | --------------------------------------------------- | +| Description | The total file num limit in inner space compaction. | +| Type | int32 | +| Default | 100 | +| Effective | Hot reload | + +- max_level_gap_in_inner_compaction + +| Name | max_level_gap_in_inner_compaction | +| ----------- | ----------------------------------------------- | +| Description | The max level gap in inner compaction selection | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- target_chunk_size + +| Name | target_chunk_size | +| ----------- | ------------------------------------------------------------ | +| Description | The target chunk size in flushing and compaction. If the size of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks.| +| Type | Int64 | +| Default | 1600000 | +| Effective | Restart required. | + +- target_chunk_point_num + +| Name | target_chunk_point_num | +| ----------- |-----------------------------------------------------------------| +| Description | The target point nums in one chunk in flushing and compaction. If the point number of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks. | +| Type | Int64 | +| Default | 100000 | +| Effective | Restart required. | + +- chunk_size_lower_bound_in_compaction + +| Name | chunk_size_lower_bound_in_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | If the chunk size is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 128 | +| Effective | Restart required. | + +- chunk_point_num_lower_bound_in_compaction + +| Name | chunk_point_num_lower_bound_in_compaction | +| ----------- |------------------------------------------------------------------------------------------| +| Description | If the chunk point num is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- inner_compaction_candidate_file_num + +| Name | inner_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The file num requirement when selecting inner space compaction candidate files | +| Type | int32 | +| Default | 30 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_num + +| Name | max_cross_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The max file when selecting cross space compaction candidate files | +| Type | int32 | +| Default | 500 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_size + +| Name | max_cross_compaction_candidate_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | The max total size when selecting cross space compaction candidate files | +| Type | Int64 | +| Default | 5368709120 | +| Effective | Hot reload | + +- min_cross_compaction_unseq_file_level + +| Name | min_cross_compaction_unseq_file_level | +| ----------- | ------------------------------------------------------------ | +| Description | The min inner compaction level of unsequence file which can be selected as candidate | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- compaction_thread_count + +| Name | compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads will be set up to perform compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_max_aligned_series_num_in_one_batch + +| Name | compaction_max_aligned_series_num_in_one_batch | +| ----------- | ------------------------------------------------------------ | +| Description | How many chunk will be compacted in aligned series compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_schedule_interval_in_ms + +| Name | compaction_schedule_interval_in_ms | +| ----------- | ---------------------------------------- | +| Description | The interval of compaction task schedule | +| Type | Int64 | +| Default | 60000 | +| Effective | Restart required. | + +- compaction_write_throughput_mb_per_sec + +| Name | compaction_write_throughput_mb_per_sec | +| ----------- | -------------------------------------------------------- | +| Description | The limit of write throughput merge can reach per second | +| Type | int32 | +| Default | 16 | +| Effective | Restart required. | + +- compaction_read_throughput_mb_per_sec + +| Name | compaction_read_throughput_mb_per_sec | +| ----------- | ------------------------------------------------------- | +| Description | The limit of read throughput merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- compaction_read_operation_per_sec + +| Name | compaction_read_operation_per_sec | +| ----------- | ------------------------------------------------------ | +| Description | The limit of read operation merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- sub_compaction_thread_count + +| Name | sub_compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of sub compaction threads to be set up to perform compaction. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +- inner_compaction_task_selection_disk_redundancy + +| Name | inner_compaction_task_selection_disk_redundancy | +| ----------- | ------------------------------------------------------------ | +| Description | Redundancy value of disk availability, only use for inner compaction. | +| Type | double | +| Default | 0.05 | +| Effective | Hot reload | + +- inner_compaction_task_selection_mods_file_threshold + +| Name | inner_compaction_task_selection_mods_file_threshold | +| ----------- | -------------------------------------------------------- | +| Description | Mods file size threshold, only use for inner compaction. | +| Type | long | +| Default | 131072 | +| Effective | Hot reload | + +- compaction_schedule_thread_num + +| Name | compaction_schedule_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads to be set up to select compaction task. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +### 4.21 Write Ahead Log Configuration + +- wal_mode + +| Name | wal_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The details of these three modes are as follows:DISABLE: the system will disable wal.SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully. | +| Type | String | +| Default | ASYNC | +| Effective | Restart required. | + +- max_wal_nodes_num + +| Name | max_wal_nodes_num | +| ----------- | ------------------------------------------------------------ | +| Description | each node corresponds to one wal directory The default value 0 means the number is determined by the system, the number is in the range of [data region num / 2, data region num]. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- wal_async_mode_fsync_delay_in_ms + +| Name | wal_async_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the async mode | +| Type | int32 | +| Default | 1000 | +| Effective | Hot reload | + +- wal_sync_mode_fsync_delay_in_ms + +| Name | wal_sync_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the sync mode | +| Type | int32 | +| Default | 3 | +| Effective | Hot reload | + +- wal_buffer_size_in_byte + +| Name | wal_buffer_size_in_byte | +| ----------- | ---------------------------- | +| Description | Buffer size of each wal node | +| Type | int32 | +| Default | 33554432 | +| Effective | Restart required. | + +- wal_buffer_queue_capacity + +| Name | wal_buffer_queue_capacity | +| ----------- | --------------------------------- | +| Description | Buffer capacity of each wal queue | +| Type | int32 | +| Default | 500 | +| Effective | Restart required. | + +- wal_file_size_threshold_in_byte + +| Name | wal_file_size_threshold_in_byte | +| ----------- | ------------------------------- | +| Description | Size threshold of each wal file | +| Type | int32 | +| Default | 31457280 | +| Effective | Hot reload | + +- wal_min_effective_info_ratio + +| Name | wal_min_effective_info_ratio | +| ----------- | --------------------------------------------------- | +| Description | Minimum ratio of effective information in wal files | +| Type | double | +| Default | 0.1 | +| Effective | Hot reload | + +- wal_memtable_snapshot_threshold_in_byte + +| Name | wal_memtable_snapshot_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | MemTable size threshold for triggering MemTable snapshot in wal | +| Type | int64 | +| Default | 8388608 | +| Effective | Hot reload | + +- max_wal_memtable_snapshot_num + +| Name | max_wal_memtable_snapshot_num | +| ----------- | ------------------------------------- | +| Description | MemTable's max snapshot number in wal | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- delete_wal_files_period_in_ms + +| Name | delete_wal_files_period_in_ms | +| ----------- | ----------------------------------------------------------- | +| Description | The period when outdated wal files are periodically deleted | +| Type | int64 | +| Default | 20000 | +| Effective | Hot reload | + +- wal_throttle_threshold_in_byte + +| Name | wal_throttle_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum size of wal files when throttle down in IoTConsensus | +| Type | long | +| Default | 53687091200 | +| Effective | Hot reload | + +- iot_consensus_cache_window_time_in_ms + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------------------ | +| Description | Maximum wait time of write cache in IoTConsensus | +| Type | long | +| Default | -1 | +| Effective | Hot reload | + +- enable_wal_compression + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------- | +| Description | Enable Write Ahead Log compression. | +| Type | boolean | +| Default | true | +| Effective | Hot reload | + +### 4.22 **IoTConsensus Configuration** + +- data_region_iot_max_log_entries_num_per_batch + +| Name | data_region_iot_max_log_entries_num_per_batch | +| ----------- | ------------------------------------------------- | +| Description | The maximum log entries num in IoTConsensus Batch | +| Type | int32 | +| Default | 1024 | +| Effective | Restart required. | + +- data_region_iot_max_size_per_batch + +| Name | data_region_iot_max_size_per_batch | +| ----------- | -------------------------------------- | +| Description | The maximum size in IoTConsensus Batch | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_iot_max_pending_batches_num + +| Name | data_region_iot_max_pending_batches_num | +| ----------- | ----------------------------------------------- | +| Description | The maximum pending batches num in IoTConsensus | +| Type | int32 | +| Default | 5 | +| Effective | Restart required. | + +- data_region_iot_max_memory_ratio_for_queue + +| Name | data_region_iot_max_memory_ratio_for_queue | +| ----------- | -------------------------------------------------- | +| Description | The maximum memory ratio for queue in IoTConsensus | +| Type | double | +| Default | 0.6 | +| Effective | Restart required. | + +- region_migration_speed_limit_bytes_per_second + +| Name | region_migration_speed_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum transit size in byte per second for region migration | +| Type | long | +| Default | 33554432 | +| Effective | Restart required. | + +### 4.23 TsFile Configurations + +- group_size_in_byte + +| Name | group_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of bytes written to disk each time the data in memory is written to disk | +| Type | int32 | +| Default | 134217728 | +| Effective | Hot reload | + +- page_size_in_byte + +| Name | page_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The memory size for each series writer to pack page, default value is 64KB | +| Type | int32 | +| Default | 65536 | +| Effective | Hot reload | + +- max_number_of_points_in_page + +| Name | max_number_of_points_in_page | +| ----------- | ------------------------------------------- | +| Description | The maximum number of data points in a page | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- pattern_matching_threshold + +| Name | pattern_matching_threshold | +| ----------- | ------------------------------------------- | +| Description | The threshold for pattern matching in regex | +| Type | int32 | +| Default | 1000000 | +| Effective | Hot reload | + +- float_precision + +| Name | float_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Floating-point precision of query results.Only effective for RLE and TS_2DIFF encodings.Due to the limitation of machine precision, some values may not be interpreted strictly. | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- value_encoder + +| Name | value_encoder | +| ----------- | ------------------------------------------------------------ | +| Description | Encoder of value series. default value is PLAIN. | +| Type | For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG. | +| Default | PLAIN | +| Effective | Hot reload | + +- compressor + +| Name | compressor | +| ----------- | ------------------------------------------------------------ | +| Description | Compression configuration And it is also used as the default compressor of time column in aligned timeseries. | +| Type | Data compression method, supports UNCOMPRESSED, SNAPPY, ZSTD, LZMA2 or LZ4. Default value is LZ4 | +| Default | LZ4 | +| Effective | Hot reload | + +- encrypt_flag + +| Name | encrypt_flag | +| ----------- | ---------------------- | +| Description | Enable data encryption | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- encrypt_type + +| Name | encrypt_type | +| ----------- |---------------------------------------| +| Description | The method of data encrytion | +| Type | String | +| Default | org.apache.tsfile.encrypt.UNENCRYPTED | +| Effective | Restart required. | + +- encrypt_key_path + +| Name | encrypt_key_path | +| ----------- | ----------------------------------- | +| Description | The path of key for data encryption | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.24 Authorization Configuration + +- authorizer_provider_class + +| Name | authorizer_provider_class | +| ----------- | ------------------------------------------------------------ | +| Description | which class to serve for authorization. | +| Type | String | +| Default | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| Effective | Restart required. | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| Name | openID_url | +| ----------- | ------------------------------------------------------------ | +| Description | The url of openID server If OpenIdAuthorizer is enabled, then openID_url must be set. | +| Type | String(a http link) | +| Default | None | +| Effective | Restart required. | + +- iotdb_server_encrypt_decrypt_provider + +| Name | iotdb_server_encrypt_decrypt_provider | +| ----------- | ------------------------------------------------------------ | +| Description | encryption provider class | +| Type | String | +| Default | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| Effective | Modify before the first startup. | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| Name | iotdb_server_encrypt_decrypt_provider_parameter | +| ----------- | ----------------------------------------------- | +| Description | encryption provided class parameter | +| Type | String | +| Default | None | +| Effective | Modify before the first startup. | + +- author_cache_size + +| Name | author_cache_size | +| ----------- | --------------------------- | +| Description | Cache size of user and role | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- author_cache_expire_time + +| Name | author_cache_expire_time | +| ----------- | ---------------------------------- | +| Description | Cache expire time of user and role | +| Type | int32 | +| Default | 30 | +| Effective | Restart required. | + +### 4.25 UDF Configuration + +- udf_initial_byte_array_length_for_memory_control + +| Name | udf_initial_byte_array_length_for_memory_control | +| ----------- | ------------------------------------------------------------ | +| Description | Used to estimate the memory usage of text fields in a UDF query.It is recommended to set this value to be slightly larger than the average length of all text records. | +| Type | int32 | +| Default | 48 | +| Effective | Restart required. | + +- udf_memory_budget_in_mb + +| Name | udf_memory_budget_in_mb | +| ----------- | ------------------------------------------------------------ | +| Description | How much memory may be used in ONE UDF query (in MB). The upper limit is 20% of allocated memory for read. | +| Type | Float | +| Default | 30.0 | +| Effective | Restart required. | + +- udf_reader_transformer_collector_memory_proportion + +| Name | udf_reader_transformer_collector_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | UDF memory allocation ratio.The parameter form is a:b:c, where a, b, and c are integers. | +| Type | String | +| Default | 1:1:1 | +| Effective | Restart required. | + +- udf_lib_dir + +| Name | udf_lib_dir | +| ----------- | ---------------------------- | +| Description | the udf lib directory | +| Type | String | +| Default | ext/udf(Windows:ext\\udf) | +| Effective | Restart required. | + +### 4.26 Trigger Configuration + +- trigger_lib_dir + +| Name | trigger_lib_dir | +| ----------- | ------------------------- | +| Description | the trigger lib directory | +| Type | String | +| Default | ext/trigger | +| Effective | Restart required. | + +- stateful_trigger_retry_num_when_not_found + +| Name | stateful_trigger_retry_num_when_not_found | +| ----------- | ------------------------------------------------------------ | +| Description | How many times will we retry to found an instance of stateful trigger on DataNodes | +| Type | Int32 | +| Default | 3 | +| Effective | Restart required. | + +### 4.27 **Select-Into Configuration** + +- into_operation_buffer_size_in_byte + +| Name | into_operation_buffer_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum memory occupied by the data to be written when executing select-into statements. | +| Type | long | +| Default | 104857600 | +| Effective | Hot reload | + +- select_into_insert_tablet_plan_row_limit + +| Name | select_into_insert_tablet_plan_row_limit | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements. | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- into_operation_execution_thread_count + +| Name | into_operation_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the thread pool that execute insert-tablet tasks | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +### 4.28 Continuous Query Configuration + +- continuous_query_submit_thread_count + +| Name | continuous_query_execution_thread | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +- continuous_query_min_every_interval_in_ms + +| Name | continuous_query_min_every_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum value of the continuous query execution time interval | +| Type | long (duration) | +| Default | 1000 | +| Effective | Restart required. | + +### 4.29 Pipe Configuration + +- pipe_lib_dir + +| Name | pipe_lib_dir | +| ----------- | ----------------------- | +| Description | the pipe lib directory. | +| Type | string | +| Default | ext/pipe | +| Effective | Not support modify | + +- pipe_subtask_executor_max_thread_num + +| Name | pipe_subtask_executor_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- pipe_sink_timeout_ms + +| Name | pipe_sink_timeout_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The connection timeout (in milliseconds) for the thrift client. | +| Type | int | +| Default | 900000 | +| Effective | Restart required. | + +- pipe_sink_selector_number + +| Name | pipe_sink_selector_number | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of selectors that can be used in the sink.Recommend to set this value to less than or equal to pipe_sink_max_client_number. | +| Type | int | +| Default | 4 | +| Effective | Restart required. | + +- pipe_sink_max_client_number + +| Name | pipe_sink_max_client_number | +| ----------- | ----------------------------------------------------------- | +| Description | The maximum number of clients that can be used in the sink. | +| Type | int | +| Default | 16 | +| Effective | Restart required. | + +- pipe_air_gap_receiver_enabled + +| Name | pipe_air_gap_receiver_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable receiving pipe data through air gap.The receiver can only return 0 or 1 in TCP mode to indicate whether the data is received successfully. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- pipe_air_gap_receiver_port + +| Name | pipe_air_gap_receiver_port | +| ----------- | ------------------------------------------------------------ | +| Description | The port for the server to receive pipe data through air gap. | +| Type | int | +| Default | 9780 | +| Effective | Restart required. | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| Name | pipe_all_sinks_rate_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The total bytes that all pipe sinks can transfer per second.When given a value less than or equal to 0, it means no limit. default value is -1, which means no limit. | +| Type | double | +| Default | -1 | +| Effective | Hot reload | + +### 4.30 RatisConsensus Configuration + +- config_node_ratis_log_appender_buffer_size_max + +| Name | config_node_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of ConfigNode (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- schema_region_ratis_log_appender_buffer_size_max + +| Name | schema_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of SchemaRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_ratis_log_appender_buffer_size_max + +| Name | data_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of DataRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- config_node_ratis_snapshot_trigger_threshold + +| Name | config_node_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of Confignode | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- schema_region_ratis_snapshot_trigger_threshold + +| Name | schema_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of SchemaRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- data_region_ratis_snapshot_trigger_threshold + +| Name | data_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of DataRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- config_node_ratis_log_unsafe_flush_enable + +| Name | config_node_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is confignode allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- schema_region_ratis_log_unsafe_flush_enable + +| Name | schema_region_ratis_log_unsafe_flush_enable | +| ----------- | -------------------------------------------------------- | +| Description | Is schemaregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- data_region_ratis_log_unsafe_flush_enable + +| Name | data_region_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is dataregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- config_node_ratis_log_segment_size_max_in_byte + +| Name | config_node_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of confignode (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- schema_region_ratis_log_segment_size_max_in_byte + +| Name | schema_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of schemaregion (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- data_region_ratis_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of dataregion(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a simple log segment file of confignode(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_ratis_grpc_flow_control_window + +| Name | config_node_ratis_grpc_flow_control_window | +| ----------- | ---------------------------------------------------------- | +| Description | confignode flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_flow_control_window + +| Name | schema_region_ratis_grpc_flow_control_window | +| ----------- | ------------------------------------------------------------ | +| Description | schema region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- data_region_ratis_grpc_flow_control_window + +| Name | data_region_ratis_grpc_flow_control_window | +| ----------- | ----------------------------------------------------------- | +| Description | data region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| Name | config_node_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | config node grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| Name | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ------------------------------------------------------- | +| Description | schema region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| Name | data_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | data region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_log_force_sync_num + +| Name | config_node_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | config node fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_log_force_sync_num + +| Name | schema_region_ratis_log_force_sync_num | +| ----------- | -------------------------------------- | +| Description | schema region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_log_force_sync_num + +| Name | data_region_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | data region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- config_node_ratis_request_timeout_ms + +| Name | config_node_ratis_request_timeout_ms | +| ----------- | --------------------------------------- | +| Description | confignode ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_request_timeout_ms + +| Name | schema_region_ratis_request_timeout_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- data_region_ratis_request_timeout_ms + +| Name | data_region_ratis_request_timeout_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- config_node_ratis_max_retry_attempts + +| Name | config_node_ratis_max_retry_attempts | +| ----------- | ------------------------------------ | +| Description | confignode ratis client retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- config_node_ratis_initial_sleep_time_ms + +| Name | config_node_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | confignode ratis client initial sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- config_node_ratis_max_sleep_time_ms + +| Name | config_node_ratis_max_sleep_time_ms | +| ----------- | -------------------------------------------- | +| Description | confignode ratis client max retry sleep time | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_max_retry_attempts + +| Name | schema_region_ratis_max_retry_attempts | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client max retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- schema_region_ratis_initial_sleep_time_ms + +| Name | schema_region_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- schema_region_ratis_max_sleep_time_ms + +| Name | schema_region_ratis_max_sleep_time_ms | +| ----------- | ----------------------------------------- | +| Description | schema region ratis client max sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_max_retry_attempts + +| Name | data_region_ratis_max_retry_attempts | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- data_region_ratis_initial_sleep_time_ms + +| Name | data_region_ratis_initial_sleep_time_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- data_region_ratis_max_sleep_time_ms + +| Name | data_region_ratis_max_sleep_time_ms | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- ratis_first_election_timeout_min_ms + +| Name | ratis_first_election_timeout_min_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election min timeout | +| Type | int64 | +| Default | 50 (ms) | +| Effective | Restart required. | + +- ratis_first_election_timeout_max_ms + +| Name | ratis_first_election_timeout_max_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election max timeout | +| Type | int64 | +| Default | 150 (ms) | +| Effective | Restart required. | + +- config_node_ratis_preserve_logs_num_when_purge + +| Name | config_node_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | confignode snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- schema_region_ratis_preserve_logs_num_when_purge + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | schema region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_preserve_logs_num_when_purge + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | data region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- config_node_ratis_log_max_size + +| Name | config_node_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | config node Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- schema_region_ratis_log_max_size + +| Name | schema_region_ratis_log_max_size | +| ----------- | ---------------------------------------- | +| Description | schema region Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- data_region_ratis_log_max_size + +| Name | data_region_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | data region Raft Log disk size control | +| Type | int64 | +| Default | 21474836480 (20GB) | +| Effective | Restart required. | + +- config_node_ratis_periodic_snapshot_interval + +| Name | config_node_ratis_periodic_snapshot_interval | +| ----------- | -------------------------------------------- | +| Description | config node Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- schema_region_ratis_periodic_snapshot_interval + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------ | +| Description | schema region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- data_region_ratis_periodic_snapshot_interval + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ---------------------------------------------- | +| Description | data region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +### 4.31 IoTConsensusV2 Configuration + +- iot_consensus_v2_pipeline_size + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------------------------------------ | +| Description | Default event buffer size for connector and receiver in iot consensus v2 | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- iot_consensus_v2_mode + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------ | +| Description | IoTConsensusV2 mode. | +| Type | String | +| Default | batch | +| Effective | Restart required. | + +### 4.32 Procedure Configuration + +- procedure_core_worker_thread_count + +| Name | procedure_core_worker_thread_count | +| ----------- | ------------------------------------- | +| Description | Default number of worker thread count | +| Type | int32 | +| Default | 4 | +| Effective | Restart required. | + +- procedure_completed_clean_interval + +| Name | procedure_completed_clean_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Default time interval of completed procedure cleaner work in, time unit is second | +| Type | int32 | +| Default | 30(s) | +| Effective | Restart required. | + +- procedure_completed_evict_ttl + +| Name | procedure_completed_evict_ttl | +| ----------- | ------------------------------------------------------- | +| Description | Default ttl of completed procedure, time unit is second | +| Type | int32 | +| Default | 60(s) | +| Effective | Restart required. | + +### 4.33 MQTT Broker Configuration + +- enable_mqtt_service + +| Name | enable_mqtt_service。 | +| ----------- | ----------------------------------- | +| Description | whether to enable the mqtt service. | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +- mqtt_host + +| Name | mqtt_host | +| ----------- | ------------------------------ | +| Description | the mqtt service binding host. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Hot reload | + +- mqtt_port + +| Name | mqtt_port | +| ----------- | ------------------------------ | +| Description | the mqtt service binding port. | +| Type | int32 | +| Default | 1883 | +| Effective | Hot reload | + +- mqtt_handler_pool_size + +| Name | mqtt_handler_pool_size | +| ----------- | ---------------------------------------------------- | +| Description | the handler pool size for handing the mqtt messages. | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- mqtt_payload_formatter + +| Name | mqtt_payload_formatter | +| ----------- | ----------------------------------- | +| Description | the mqtt message payload formatter. | +| Type | String | +| Default | json | +| Effective | Hot reload | + +- mqtt_max_message_size + +| Name | mqtt_max_message_size | +| ----------- | ---------------------------------- | +| Description | max length of mqtt message in byte | +| Type | int32 | +| Default | 1048576 | +| Effective | Hot reload | + +### 4.34 Audit log Configuration + +- enable_audit_log + +| Name | enable_audit_log | +| ----------- | -------------------------------- | +| Description | whether to enable the audit log. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- audit_log_storage + +| Name | audit_log_storage | +| ----------- | ----------------------------- | +| Description | Output location of audit logs | +| Type | String | +| Default | IOTDB,LOGGER | +| Effective | Restart required. | + +- audit_log_operation + +| Name | audit_log_operation | +| ----------- | ------------------------------------------------------------ | +| Description | whether enable audit log for DML operation of datawhether enable audit log for DDL operation of schemawhether enable audit log for QUERY operation of data and schema | +| Type | String | +| Default | DML,DDL,QUERY | +| Effective | Restart required. | + +- enable_audit_log_for_native_insert_api + +| Name | enable_audit_log_for_native_insert_api | +| ----------- | ---------------------------------------------- | +| Description | whether the local write api records audit logs | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.35 White List Configuration + +- enable_white_list + +| Name | enable_white_list | +| ----------- | ------------------------- | +| Description | whether enable white list | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +### 4.36 IoTDB-AI Configuration + +- model_inference_execution_thread_count + +| Name | model_inference_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The thread count which can be used for model inference operation. | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +### 4.37 Load TsFile Configuration + +- load_clean_up_task_execution_delay_time_seconds + +| Name | load_clean_up_task_execution_delay_time_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | Load clean up task is used to clean up the unsuccessful loaded tsfile after a certain period of time. | +| Type | int | +| Default | 1800 | +| Effective | Hot reload | + +- load_write_throughput_bytes_per_second + +| Name | load_write_throughput_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum bytes per second of disk write throughput when loading tsfile. | +| Type | int | +| Default | -1 | +| Effective | Hot reload | + +- load_active_listening_enable + +| Name | load_active_listening_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable the active listening mode for tsfile loading. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- load_active_listening_dirs + +| Name | load_active_listening_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | The directory to be actively listened for tsfile loading.Multiple directories should be separated by a ','. | +| Type | String | +| Default | ext/load/pending | +| Effective | Hot reload | + +- load_active_listening_fail_dir + +| Name | load_active_listening_fail_dir | +| ----------- | ------------------------------------------------------------ | +| Description | The directory where tsfiles are moved if the active listening mode fails to load them. | +| Type | String | +| Default | ext/load/failed | +| Effective | Hot reload | + +- load_active_listening_max_thread_num + +| Name | load_active_listening_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to load tsfile actively.The default value, when this parameter is commented out or <= 0, use CPU core number. | +| Type | Long | +| Default | 0 | +| Effective | Restart required. | + +- load_active_listening_check_interval_seconds + +| Name | load_active_listening_check_interval_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The interval specified in seconds for the active listening mode to check the directory specified in `load_active_listening_dirs`.The active listening mode will check the directory every `load_active_listening_check_interval_seconds seconds`. | +| Type | Long | +| Default | 5 | +| Effective | Restart required. | + +* last_cache_operation_on_load + +|Name| last_cache_operation_on_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| The operation performed to LastCache when a TsFile is successfully loaded. `UPDATE`: use the data in the TsFile to update LastCache; `UPDATE_NO_BLOB`: similar to UPDATE, but will invalidate LastCache for blob series; `CLEAN_DEVICE`: invalidate LastCache of devices contained in the TsFile; `CLEAN_ALL`: clean the whole LastCache. | +|Type| String | +|Default| UPDATE_NO_BLOB | +|Effective| Effective after restart | + +* cache_last_values_for_load + +|Name| cache_last_values_for_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| Whether to cache last values before loading a TsFile. Only effective when `last_cache_operation_on_load=UPDATE_NO_BLOB` or `last_cache_operation_on_load=UPDATE`. When set to true, blob series will be ignored even with `last_cache_operation_on_load=UPDATE`. Enabling this will increase the memory footprint during loading TsFiles. | +|Type| Boolean | +|Default| true | +|Effective| Effective after restart | + +* cache_last_values_memory_budget_in_byte + +|Name| cache_last_values_memory_budget_in_byte | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| When `cache_last_values_for_load=true`, the maximum memory that can be used to cache last values. If this value is exceeded, the cached values will be abandoned and last values will be read from the TsFile in a streaming manner. | +|Type| int32 | +|Default| 4194304 | +|Effective| Effective after restart | + + +### 4.38 Dispatch Retry Configuration + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum retrying time for write request remotely dispatching, time unit is milliseconds. | +| Type | Long | +| Default | 60000 | +| Effective | Hot reload | + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------ | +| Description | Whether retrying for unknown errors. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Reference/System-Config-Manual_timecho.md b/src/UserGuide/latest-Table/Reference/System-Config-Manual_timecho.md new file mode 100644 index 000000000..d32adb558 --- /dev/null +++ b/src/UserGuide/latest-Table/Reference/System-Config-Manual_timecho.md @@ -0,0 +1,3384 @@ + + +# Config Manual + +## 1. IoTDB Configuration Files + +The configuration files for IoTDB are located in the `conf` folder under the IoTDB installation directory. Key configuration files include: + +1. `confignode-env.sh` **/** `confignode-env.bat`: + 1. Environment configuration file for ConfigNode. + 2. Used to configure memory size and other environment settings for ConfigNode. +2. `datanode-env.sh` **/** `datanode-env.bat`: + 1. Environment configuration file for DataNode. + 2. Used to configure memory size and other environment settings for DataNode. +3. `iotdb-system.properties`: + 1. Main configuration file for IoTDB. + 2. Contains configurable parameters for IoTDB. +4. `iotdb-system.properties.template`: + 1. Template for the `iotdb-system.properties` file. + 2. Provides a reference for all available configuration parameters. + +## 2. Modify Configurations + +### 2.1 **Modify Existing Parameters**: + +- Parameters already present in the `iotdb-system.properties` file can be directly modified. + +### 2.2 **Adding New Parameters**: + +- For parameters not listed in `iotdb-system.properties`, you can find them in the `iotdb-system.properties.template` file. +- Copy the desired parameter from the template file to `iotdb-system.properties` and modify its value. + +### 2.3 Configuration Update Methods + +Different configuration parameters have different update methods, categorized as follows: + +1. **Modify before the first startup.**: + 1. These parameters can only be modified before the first startup of ConfigNode/DataNode. + 2. Modifying them after the first startup will prevent ConfigNode/DataNode from starting. +2. **Restart Required for Changes to Take Effect**: + 1. These parameters can be modified after ConfigNode/DataNode has started. + 2. However, a restart of ConfigNode/DataNode is required for the changes to take effect. +3. **Hot Reload**: + 1. These parameters can be modified while ConfigNode/DataNode is running. + 2. After modification, use the following SQL commands to apply the changes: + - `load configuration`: Reloads the configuration. + - `set configuration key1 = 'value1'`: Updates specific configuration parameters. + +## 3. Environment Parameters + +The environment configuration files (`confignode-env.sh/bat` and `datanode-env.sh/bat`) are used to configure Java environment parameters for ConfigNode and DataNode, such as JVM settings. These configurations are passed to the JVM when ConfigNode or DataNode starts. + +### 3.1 **confignode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB ConfigNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 3/10 of the machine's memory, capped at 16G. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB ConfigNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB ConfigNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +### 3.2 **datanode-env.sh/bat** + +- MEMORY_SIZE + +| Name | MEMORY_SIZE | +| ----------- | ------------------------------------------------------------ | +| Description | Memory size allocated when IoTDB DataNode starts. | +| Type | String | +| Default | Depends on the operating system and machine configuration. Defaults to 1/2 of the machine's memory. | +| Effective | Restart required | + +- ON_HEAP_MEMORY + +| Name | ON_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | On-heap memory size available for IoTDB DataNode. Previously named `MAX_HEAP_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +- OFF_HEAP_MEMORY + +| Name | OFF_HEAP_MEMORY | +| ----------- | ------------------------------------------------------------ | +| Description | Off-heap memory size available for IoTDB DataNode. Previously named `MAX_DIRECT_MEMORY_SIZE`. | +| Type | String | +| Default | Depends on the `MEMORY_SIZE` configuration. | +| Effective | Restart required | + +## 4. System Parameters (`iotdb-system.properties.template`) + +The `iotdb-system.properties` file contains various configurations for managing IoTDB clusters, nodes, replication, directories, monitoring, SSL, connections, object storage, tier management, and REST services. Below is a detailed breakdown of the parameters: + +### 4.1 Cluster Configuration + +- cluster_name + +| Name | cluster_name | +| ----------- | --------------------------------------------------------- | +| Description | Name of the cluster. | +| Type | String | +| Default | default_cluster | +| Effective | Use CLI: `set configuration cluster_name='xxx'`. | +| Note | Changes are distributed across nodes. Changes may not propagate to all nodes in case of network issues or node failures. Nodes that fail to update must manually modify `cluster_name` in their configuration files and restart. Under normal circumstances, it is not recommended to modify `cluster_name` by manually modifying configuration files or to perform hot-loading via `load configuration` method. | + +### 4.2 Seed ConfigNode + +- cn_seed_config_node + +| Name | cn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Confignode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +- dn_seed_config_node + +| Name | dn_seed_config_node | +| ----------- | ------------------------------------------------------------ | +| Description | Address of the seed ConfigNode for Datanode to join the cluster. | +| Type | String | +| Default | 127.0.0.1:10710 | +| Effective | Modify before the first startup. | + +### 4.3 Node RPC Configuration + +- cn_internal_address + +| Name | cn_internal_address | +| ----------- | ---------------------------------------------- | +| Description | Internal address for ConfigNode communication. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- cn_internal_port + +| Name | cn_internal_port | +| ----------- | ------------------------------------------- | +| Description | Port for ConfigNode internal communication. | +| Type | Short Int : [0,65535] | +| Default | 10710 | +| Effective | Modify before the first startup. | + +- cn_consensus_port + +| Name | cn_consensus_port | +| ----------- | ----------------------------------------------------- | +| Description | Port for ConfigNode consensus protocol communication. | +| Type | Short Int : [0,65535] | +| Default | 10720 | +| Effective | Modify before the first startup. | + +- dn_rpc_address + +| Name | dn_rpc_address | +| ----------- |---------------------------------| +| Description | Address for client RPC service. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Restart required. | + +- dn_rpc_port + +| Name | dn_rpc_port | +| ----------- | ---------------------------- | +| Description | Port for client RPC service. | +| Type | Short Int : [0,65535] | +| Default | 6667 | +| Effective | Restart required. | + +- dn_internal_address + +| Name | dn_internal_address | +| ----------- | -------------------------------------------- | +| Description | Internal address for DataNode communication. | +| Type | string | +| Default | 127.0.0.1 | +| Effective | Modify before the first startup. | + +- dn_internal_port + +| Name | dn_internal_port | +| ----------- | ----------------------------------------- | +| Description | Port for DataNode internal communication. | +| Type | int | +| Default | 10730 | +| Effective | Modify before the first startup. | + +- dn_mpp_data_exchange_port + +| Name | dn_mpp_data_exchange_port | +| ----------- | -------------------------------- | +| Description | Port for MPP data exchange. | +| Type | int | +| Default | 10740 | +| Effective | Modify before the first startup. | + +- dn_schema_region_consensus_port + +| Name | dn_schema_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode SchemaRegion consensus protocol communication. | +| Type | int | +| Default | 10750 | +| Effective | Modify before the first startup. | + +- dn_data_region_consensus_port + +| Name | dn_data_region_consensus_port | +| ----------- | ------------------------------------------------------------ | +| Description | Port for Datanode DataRegion consensus protocol communication. | +| Type | int | +| Default | 10760 | +| Effective | Modify before the first startup. | + +- dn_join_cluster_retry_interval_ms + +| Name | dn_join_cluster_retry_interval_ms | +| ----------- | --------------------------------------------------- | +| Description | Interval for DataNode to retry joining the cluster. | +| Type | long | +| Default | 5000 | +| Effective | Restart required. | + +### 4.4 Replication configuration + +- config_node_consensus_protocol_class + +| Name | config_node_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for ConfigNode replication, only supports RatisConsensus | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- schema_replication_factor + +| Name | schema_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default schema replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- schema_region_consensus_protocol_class + +| Name | schema_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for schema region replication. Only supports RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.ratis.RatisConsensus | +| Effective | Modify before the first startup. | + +- data_replication_factor + +| Name | data_replication_factor | +| ----------- | ------------------------------------------------------------ | +| Description | Default data replication factor for databases. | +| Type | int32 | +| Default | 1 | +| Effective | Restart required. Takes effect on the new database after restarting. | + +- data_region_consensus_protocol_class + +| Name | data_region_consensus_protocol_class | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol for data region replication. Supports IoTConsensus or RatisConsensus when multi-replications. | +| Type | String | +| Default | org.apache.iotdb.consensus.iot.IoTConsensus | +| Effective | Modify before the first startup. | + +### 4.5 Directory configuration + +- cn_system_dir + +| Name | cn_system_dir | +| ----------- | ----------------------------------------------------------- | +| Description | System data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/system(Windows:data\\configndoe\\system) | +| Effective | Restart required | + +- cn_consensus_dir + +| Name | cn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus protocol data storage path for ConfigNode. | +| Type | String | +| Default | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| Effective | Restart required | + +- cn_pipe_receiver_file_dir + +| Name | cn_pipe_receiver_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in ConfigNode. | +| Type | String | +| Default | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- dn_system_dir + +| Name | dn_system_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Schema storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/system(Windows:data\\datanode\\system) | +| Effective | Restart required | + +- dn_data_dirs + +| Name | dn_data_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Data storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/data(Windows:data\\datanode\\data) | +| Effective | Restart required | + +- dn_multi_dir_strategy + +| Name | dn_multi_dir_strategy | +| ----------- | ------------------------------------------------------------ | +| Description | The strategy used by IoTDB to select directories in `data_dirs` for TsFiles. You can use either the simple class name or the fully qualified class name. The system provides the following two strategies: 1. SequenceStrategy: IoTDB selects directories sequentially, iterating through all directories in `data_dirs` in a round-robin manner. 2. MaxDiskUsableSpaceFirstStrategy IoTDB prioritizes the directory in `data_dirs` with the largest disk free space. To implement a custom strategy: 1. Inherit the `org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy `class and implement your own strategy method. 2. Fill in the configuration item with the fully qualified class name of your implementation (package name + class name, e.g., `UserDefineStrategyPackage`). 3. Add the JAR file containing your custom class to the project. | +| Type | String | +| Default | SequenceStrategy | +| Effective | Hot reload. | + +- dn_consensus_dir + +| Name | dn_consensus_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Consensus log storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| Effective | Restart required | + +- dn_wal_dirs + +| Name | dn_wal_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Write-ahead log (WAL) storage path for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/wal(Windows:data\\datanode\\wal) | +| Effective | Restart required | + +- dn_tracing_dir + +| Name | dn_tracing_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Tracing root directory for DataNode. By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | datanode/tracing(Windows:datanode\\tracing) | +| Effective | Restart required | + +- dn_sync_dir + +| Name | dn_sync_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Sync storage path for DataNode.By default, it is stored in the data directory at the same level as the sbin directory. The starting directory of the relative path is related to the operating system. It is recommended to use an absolute path. | +| Type | String | +| Default | data/datanode/sync(Windows:data\\datanode\\sync) | +| Effective | Restart required | + +- sort_tmp_dir + +| Name | sort_tmp_dir | +| ----------- | ------------------------------------------------- | +| Description | Temporary directory for sorting operations. | +| Type | String | +| Default | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| Effective | Restart required | + +- dn_pipe_receiver_file_dirs + +| Name | dn_pipe_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for pipe receiver files in DataNode. | +| Type | String | +| Default | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_receiver_file_dirs + +| Name | iot_consensus_v2_receiver_file_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 receiver files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| Effective | Restart required | + +- iot_consensus_v2_deletion_file_dir + +| Name | iot_consensus_v2_deletion_file_dir | +| ----------- | ------------------------------------------------------------ | +| Description | Directory for IoTConsensus V2 deletion files. | +| Type | String | +| Default | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| Effective | Restart required | + +### 4.6 Metric Configuration + +- cn_metric_reporter_list + +| Name | cn_metric_reporter_list | +| ----------- | ----------------------------------------- | +| Description | Systems for reporting ConfigNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- cn_metric_level + +| Name | cn_metric_level | +| ----------- | --------------------------------------- | +| Description | Level of detail for ConfigNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- cn_metric_async_collect_period + +| Name | cn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in ConfigNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- cn_metric_prometheus_reporter_port + +| Name | cn_metric_prometheus_reporter_port | +| ----------- | --------------------------------------------------- | +| Description | Port for Prometheus metric reporting in ConfigNode. | +| Type | int | +| Default | 9091 | +| Effective | Restart required. | + +- dn_metric_reporter_list + +| Name | dn_metric_reporter_list | +| ----------- | --------------------------------------- | +| Description | Systems for reporting DataNode metrics. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- dn_metric_level + +| Name | dn_metric_level | +| ----------- | ------------------------------------- | +| Description | Level of detail for DataNode metrics. | +| Type | String | +| Default | IMPORTANT | +| Effective | Restart required. | + +- dn_metric_async_collect_period + +| Name | dn_metric_async_collect_period | +| ----------- | ------------------------------------------------------------ | +| Description | Period for asynchronous metric collection in DataNode (in seconds). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- dn_metric_prometheus_reporter_port + +| Name | dn_metric_prometheus_reporter_port | +| ----------- | ------------------------------------------------- | +| Description | Port for Prometheus metric reporting in DataNode. | +| Type | int | +| Default | 9092 | +| Effective | Restart required. | + +- dn_metric_internal_reporter_type + +| Name | dn_metric_internal_reporter_type | +| ----------- | ------------------------------------------------------------ | +| Description | Internal reporter types for DataNode metrics. For internal monitoring and checking that the data has been successfully written and refreshed. | +| Type | String | +| Default | IOTDB | +| Effective | Restart required. | + +### 4.7 SSL Configuration + +- enable_thrift_ssl + +| Name | enable_thrift_ssl | +| ----------- | --------------------------------------------- | +| Description | Enables SSL encryption for RPC communication. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_https + +| Name | enable_https | +| ----------- | ------------------------------ | +| Description | Enables SSL for REST services. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- key_store_path + +| Name | key_store_path | +| ----------- | ---------------------------- | +| Description | Path to the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- key_store_pwd + +| Name | key_store_pwd | +| ----------- | --------------------------------- | +| Description | Password for the SSL certificate. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.8 Connection Configuration + +- cn_rpc_thrift_compression_enable + +| Name | cn_rpc_thrift_compression_enable | +| ----------- | ----------------------------------- | +| Description | Enables Thrift compression for RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- cn_rpc_max_concurrent_client_num + +| Name | cn_rpc_max_concurrent_client_num | +| ----------- |-------------------------------------------| +| Description | Maximum number of concurrent RPC clients. | +| Type | int | +| Default | 3000 | +| Effective | Restart required. | + +- cn_connection_timeout_ms + +| Name | cn_connection_timeout_ms | +| ----------- | ---------------------------------------------------- | +| Description | Connection timeout for ConfigNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- cn_selector_thread_nums_of_client_manager + +| Name | cn_selector_thread_nums_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | Number of selector threads for client management in ConfigNode. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- cn_max_client_count_for_each_node_in_client_manager + +| Name | cn_max_client_count_for_each_node_in_client_manager | +| ----------- | ------------------------------------------------------ | +| Description | Maximum clients per node in ConfigNode client manager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +- dn_session_timeout_threshold + +| Name | dn_session_timeout_threshold | +| ----------- | ---------------------------------------- | +| Description | Maximum idle time for DataNode sessions. | +| Type | int | +| Default | 0 | +| Effective | Restart required.t required. | + +- dn_rpc_thrift_compression_enable + +| Name | dn_rpc_thrift_compression_enable | +| ----------- | -------------------------------------------- | +| Description | Enables Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_advanced_compression_enable + +| Name | dn_rpc_advanced_compression_enable | +| ----------- | ----------------------------------------------------- | +| Description | Enables advanced Thrift compression for DataNode RPC. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- dn_rpc_selector_thread_count + +| Name | rpc_selector_thread_count | +| ----------- | -------------------------------------------- | +| Description | Number of selector threads for DataNode RPC. | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_rpc_min_concurrent_client_num + +| Name | rpc_min_concurrent_client_num | +| ----------- | ------------------------------------------------------ | +| Description | Minimum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1 | +| Effective | Restart required. | + +- dn_rpc_max_concurrent_client_num + +| Name | dn_rpc_max_concurrent_client_num | +| ----------- |--------------------------------------------------------| +| Description | Maximum number of concurrent RPC clients for DataNode. | +| Type | Short Int : [0,65535] | +| Default | 1000 | +| Effective | Restart required. | + +- dn_thrift_max_frame_size + +| Name | dn_thrift_max_frame_size | +| ----------- |------------------------------------------------| +| Description | Maximum frame size for RPC requests/responses. | +| Type | long | +| Default | 536870912 (Default 512MB) | +| Effective | Restart required. | + +- dn_thrift_init_buffer_size + +| Name | dn_thrift_init_buffer_size | +| ----------- | ----------------------------------- | +| Description | Initial buffer size for Thrift RPC. | +| Type | long | +| Default | 1024 | +| Effective | Restart required. | + +- dn_connection_timeout_ms + +| Name | dn_connection_timeout_ms | +| ----------- | -------------------------------------------------- | +| Description | Connection timeout for DataNode (in milliseconds). | +| Type | int | +| Default | 60000 | +| Effective | Restart required. | + +- dn_selector_thread_count_of_client_manager + +| Name | dn_selector_thread_count_of_client_manager | +| ----------- | ------------------------------------------------------------ | +| Description | selector thread (TAsyncClientManager) nums for async thread in a clientManager | +| Type | int | +| Default | 1 | +| Effective | Restart required.t required. | + +- dn_max_client_count_for_each_node_in_client_manager + +| Name | dn_max_client_count_for_each_node_in_client_manager | +| ----------- | --------------------------------------------------- | +| Description | Maximum clients per node in DataNode clientmanager. | +| Type | int | +| Default | 300 | +| Effective | Restart required. | + +### 4.9 Object storage management + +- remote_tsfile_cache_dirs + +| Name | remote_tsfile_cache_dirs | +| ----------- | ---------------------------------------- | +| Description | Local cache directory for cloud storage. | +| Type | String | +| Default | data/datanode/data/cache | +| Effective | Restart required. | + +- remote_tsfile_cache_page_size_in_kb + +| Name | remote_tsfile_cache_page_size_in_kb | +| ----------- | --------------------------------------------- | +| Description | Block size for cached files in cloud storage. | +| Type | int | +| Default | 20480 | +| Effective | Restart required. | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| Name | remote_tsfile_cache_max_disk_usage_in_mb | +| ----------- | ------------------------------------------- | +| Description | Maximum disk usage for cloud storage cache. | +| Type | long | +| Default | 51200 | +| Effective | Restart required. | + +- object_storage_type + +| Name | object_storage_type | +| ----------- | ---------------------- | +| Description | Type of cloud storage. | +| Type | String | +| Default | AWS_S3 | +| Effective | Restart required. | + +- object_storage_endpoint + +| Name | object_storage_endpoint | +| ----------- | --------------------------- | +| Description | Endpoint for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_bucket + +| Name | object_storage_bucket | +| ----------- | ------------------------------ | +| Description | Bucket name for cloud storage. | +| Type | String | +| Default | iotdb_data | +| Effective | Restart required. | + +- object_storage_access_key + +| Name | object_storage_access_key | +| ----------- | ----------------------------- | +| Description | Access key for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +- object_storage_access_secret + +| Name | object_storage_access_secret | +| ----------- | -------------------------------- | +| Description | Access secret for cloud storage. | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.10 Tier management + +- dn_default_space_usage_thresholds + +| Name | dn_default_space_usage_thresholds | +| ----------- | ------------------------------------------------------------ | +| Description | Disk usage threshold, data will be moved to the next tier when the usage of the tier is higher than this threshold.If tiered storage is enabled, please separate thresholds of different tiers by semicolons ";". | +| Type | double | +| Default | 0.85 | +| Effective | Hot reload. | + +- dn_tier_full_policy + +| Name | dn_tier_full_policy | +| ----------- | ------------------------------------------------------------ | +| Description | How to deal with the last tier's data when its used space has been higher than its dn_default_space_usage_thresholds. | +| Type | String | +| Default | NULL | +| Effective | Hot reload. | + +- migrate_thread_count + +| Name | migrate_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | thread pool size for migrate operation in the DataNode's data directories. | +| Type | int | +| Default | 1 | +| Effective | Hot reload. | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| Name | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ----------- | ------------------------------------------------------------ | +| Description | The migrate speed limit of different tiers can reach per second | +| Type | int | +| Default | 10485760 | +| Effective | Hot reload. | + +### 4.11 REST Service Configuration + +- enable_rest_service + +| Name | enable_rest_service | +| ----------- | --------------------------- | +| Description | Is the REST service enabled | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_service_port + +| Name | rest_service_port | +| ----------- | ------------------------------------ | +| Description | the binding port of the REST service | +| Type | int32 | +| Default | 18080 | +| Effective | Restart required. | + +- enable_swagger + +| Name | enable_swagger | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to display rest service interface information through swagger. eg: http://ip:port/swagger.json | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- rest_query_default_row_size_limit + +| Name | rest_query_default_row_size_limit | +| ----------- | ------------------------------------------------------------ | +| Description | the default row limit to a REST query response when the rowSize parameter is not given in request | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- cache_expire_in_seconds + +| Name | cache_expire_in_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The expiration time of the user login information cache (in seconds) | +| Type | int32 | +| Default | 28800 | +| Effective | Restart required. | + +- cache_max_num + +| Name | cache_max_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of users can be stored in the user login cache. | +| Type | int32 | +| Default | 100 | +| Effective | Restart required. | + +- cache_init_num + +| Name | cache_init_num | +| ----------- | ------------------------------------------------------------ | +| Description | The initial capacity of users can be stored in the user login cache. | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- client_auth + +| Name | client_auth | +| ----------- | --------------------------------- | +| Description | Is client authentication required | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- trust_store_path + +| Name | trust_store_path | +| ----------- | -------------------- | +| Description | SSL trust store path | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- trust_store_pwd + +| Name | trust_store_pwd | +| ----------- | ------------------------- | +| Description | SSL trust store password. | +| Type | String | +| Default | "" | +| Effective | Restart required. | + +- idle_timeout_in_seconds + +| Name | idle_timeout_in_seconds | +| ----------- | ------------------------ | +| Description | SSL timeout (in seconds) | +| Type | int32 | +| Default | 5000 | +| Effective | Restart required. | + +### 4.12 Load balancing configuration + +- series_slot_num + +| Name | series_slot_num | +| ----------- | ------------------------------------------- | +| Description | Number of SeriesPartitionSlots per Database | +| Type | int32 | +| Default | 10000 | +| Effective | Modify before the first startup. | + +- series_partition_executor_class + +| Name | series_partition_executor_class | +| ----------- | ------------------------------------------------------------ | +| Description | SeriesPartitionSlot executor class | +| Type | String | +| Default | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| Effective | Modify before the first startup. | + +- schema_region_group_extension_policy + +| Name | schema_region_group_extension_policy | +| ----------- | ------------------------------------------------------------ | +| Description | The policy of extension SchemaRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_schema_region_group_num_per_database + +| Name | default_schema_region_group_num_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set schema_region_group_extension_policy=CUSTOM, this parameter is the default number of SchemaRegionGroups for each Database.When set schema_region_group_extension_policy=AUTO, this parameter is the default minimal number of SchemaRegionGroups for each Database. | +| Type | int | +| Default | 1 | +| Effective | Restart required. | + +- schema_region_per_data_node + +| Name | schema_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set schema_region_group_extension_policy=AUTO.This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 1.0 | +| Effective | Restart required. | + +- data_region_group_extension_policy + +| Name | data_region_group_extension_policy | +| ----------- | ---------------------------------------------------------- | +| Description | The policy of extension DataRegionGroup for each Database. | +| Type | string | +| Default | AUTO | +| Effective | Restart required. | + +- default_data_region_group_num_per_database + +| Name | default_data_region_group_per_database | +| ----------- | ------------------------------------------------------------ | +| Description | When set data_region_group_extension_policy=CUSTOM, this parameter is the default number of DataRegionGroups for each Database.When set data_region_group_extension_policy=AUTO, this parameter is the default minimal number of DataRegionGroups for each Database. | +| Type | int | +| Default | 2 | +| Effective | Restart required. | + +- data_region_per_data_node + +| Name | data_region_per_data_node | +| ----------- | ------------------------------------------------------------ | +| Description | It only takes effect when set data_region_group_extension_policy=AUTO.This parameter is the maximum number of DataRegions expected to be managed by each DataNode. | +| Type | double | +| Default | 5.0 | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_ratis_consensus + +| Name | enable_auto_leader_balance_for_ratis_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for Ratis consensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- enable_auto_leader_balance_for_iot_consensus + +| Name | enable_auto_leader_balance_for_iot_consensus | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable auto leader balance for IoTConsensus protocol. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.13 Cluster management + +- time_partition_origin + +| Name | time_partition_origin | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition origin in milliseconds, default is equal to zero. | +| Type | Long | +| Unit | ms | +| Default | 0 | +| Effective | Modify before the first startup. | + +- time_partition_interval + +| Name | time_partition_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Time partition interval in milliseconds, and partitioning data inside each data region, default is equal to one week | +| Type | Long | +| Unit | ms | +| Default | 604800000 | +| Effective | Modify before the first startup. | + +- heartbeat_interval_in_ms + +| Name | heartbeat_interval_in_ms | +| ----------- | -------------------------------------- | +| Description | The heartbeat interval in milliseconds | +| Type | Long | +| Unit | ms | +| Default | 1000 | +| Effective | Restart required. | + +- disk_space_warning_threshold + +| Name | disk_space_warning_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | Disk remaining threshold at which DataNode is set to ReadOnly status | +| Type | double(percentage) | +| Default | 0.05 | +| Effective | Restart required. | + +### 4.14 Memory Control Configuration + +- datanode_memory_proportion + +| Name | datanode_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory Allocation Ratio: StorageEngine, QueryEngine, SchemaEngine, Consensus, StreamingEngine and Free Memory. | +| Type | Ratio | +| Default | 3:3:1:1:1:1 | +| Effective | Restart required. | + +- schema_memory_proportion + +| Name | schema_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, and PartitionCache. | +| Type | Ratio | +| Default | 5:4:1 | +| Effective | Restart required. | + +- storage_engine_memory_proportion + +| Name | storage_engine_memory_proportion | +| ----------- | ----------------------------------------------------------- | +| Description | Memory allocation ratio in StorageEngine: Write, Compaction | +| Type | Ratio | +| Default | 8:2 | +| Effective | Restart required. | + +- write_memory_proportion + +| Name | write_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Memory allocation ratio in writing: Memtable, TimePartitionInfo | +| Type | Ratio | +| Default | 19:1 | +| Effective | Restart required. | + +- primitive_array_size + +| Name | primitive_array_size | +| ----------- | --------------------------------------------------------- | +| Description | primitive array size (length of each array) in array pool | +| Type | int32 | +| Default | 64 | +| Effective | Restart required. | + +- chunk_metadata_size_proportion + +| Name | chunk_metadata_size_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of compaction memory for chunk metadata maintains in memory when doing compaction | +| Type | Double | +| Default | 0.1 | +| Effective | Restart required. | + +- flush_proportion + +| Name | flush_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for invoking flush disk, 0.4 by defaultIf you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 | +| Type | Double | +| Default | 0.4 | +| Effective | Restart required. | + +- buffered_arrays_memory_proportion + +| Name | buffered_arrays_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory allocated for buffered arrays, 0.6 by default | +| Type | Double | +| Default | 0.6 | +| Effective | Restart required. | + +- reject_proportion + +| Name | reject_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for rejecting insertion, 0.8 by defaultIf you have extremely high write load (like batch=1000) and the physical memory size is large enough, it can be set higher than the default value like 0.9 | +| Type | Double | +| Default | 0.8 | +| Effective | Restart required. | + +- device_path_cache_proportion + +| Name | device_path_cache_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Ratio of memtable memory for the DevicePathCache. DevicePathCache is the deviceId cache, keeping only one copy of the same deviceId in memory | +| Type | Double | +| Default | 0.05 | +| Effective | Restart required. | + +- write_memory_variation_report_proportion + +| Name | write_memory_variation_report_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | If memory cost of data region increased more than proportion of allocated memory for writing, report to system. The default value is 0.001 | +| Type | Double | +| Default | 0.001 | +| Effective | Restart required. | + +- check_period_when_insert_blocked + +| Name | check_period_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When an insertion is rejected, the waiting period (in ms) to check system again, 50 by default.If the insertion has been rejected and the read load is low, it can be set larger. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- io_task_queue_size_for_flushing + +| Name | io_task_queue_size_for_flushing | +| ----------- | -------------------------------------------- | +| Description | size of ioTaskQueue. The default value is 10 | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- enable_query_memory_estimation + +| Name | enable_query_memory_estimation | +| ----------- | ------------------------------------------------------------ | +| Description | If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory | +| Type | bool | +| Default | true | +| Effective | Hot reload. | + +### 4.15 Schema Engine Configuration + +- schema_engine_mode + +| Name | schema_engine_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The schema management mode of schema engine. Currently, support Memory and PBTree.This config of all DataNodes in one cluster must keep same. | +| Type | string | +| Default | Memory | +| Effective | Modify before the first startup. | + +- partition_cache_size + +| Name | partition_cache_size | +| ----------- | ------------------------- | +| Description | cache size for partition. | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- sync_mlog_period_in_ms + +| Name | sync_mlog_period_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The cycle when metadata log is periodically forced to be written to disk(in milliseconds)If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshmentSetting this parameter to 0 may slow down the operation on slow disk. | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- tag_attribute_flush_interval + +| Name | tag_attribute_flush_interval | +| ----------- | ------------------------------------------------------------ | +| Description | interval num for tag and attribute records when force flushing to disk | +| Type | int32 | +| Default | 1000 | +| Effective | Modify before the first startup. | + +- tag_attribute_total_size + +| Name | tag_attribute_total_size | +| ----------- | ------------------------------------------------------------ | +| Description | max size for a storage block for tags and attributes of a one-time series | +| Type | int32 | +| Default | 700 | +| Effective | Modify before the first startup. | + +- max_measurement_num_of_internal_request + +| Name | max_measurement_num_of_internal_request | +| ----------- | ------------------------------------------------------------ | +| Description | max measurement num of internal requestWhen creating timeseries with Session.createMultiTimeseries, the user input plan, the timeseries num ofwhich exceeds this num, will be split to several plans with timeseries no more than this num. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- datanode_schema_cache_eviction_policy + +| Name | datanode_schema_cache_eviction_policy | +| ----------- | --------------------------------------- | +| Description | Policy of DataNodeSchemaCache eviction. | +| Type | String | +| Default | FIFO | +| Effective | Restart required. | + +- cluster_timeseries_limit_threshold + +| Name | cluster_timeseries_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of time series allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- cluster_device_limit_threshold + +| Name | cluster_device_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of devices allowed in the cluster. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +- database_limit_threshold + +| Name | database_limit_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | This configuration parameter sets the maximum number of Cluster Databases allowed. | +| Type | Int32 | +| Default | -1 | +| Effective | Restart required. | + +### 4.16 Configurations for creating schema automatically + +- enable_auto_create_schema + +| Name | enable_auto_create_schema | +| ----------- | ------------------------------------------------ | +| Description | Whether creating schema automatically is enabled | +| Value | true or false | +| Default | true | +| Effective | Restart required. | + +- default_storage_group_level + +| Name | default_storage_group_level | +| ----------- | ------------------------------------------------------------ | +| Description | Database level when creating schema automatically is enabled e.g. root.sg0.d1.s2We will set root.sg0 as the database if database level is 1If the incoming path is shorter than this value, the creation/insertion will fail. | +| Value | int32 | +| Default | 1 | +| Effective | Restart required. | + +- boolean_string_infer_type + +| Name | boolean_string_infer_type | +| ----------- |------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving boolean string "true" or "false" | +| Value | BOOLEAN or TEXT | +| Default | BOOLEAN | +| Effective | Hot_reload | + +- integer_string_infer_type + +| Name | integer_string_infer_type | +| ----------- |------------------------------------------------------------------------------------------------------------------| +| Description | register time series as which type when receiving an integer string and using float or double may lose precision | +| Value | INT32, INT64, FLOAT, DOUBLE, TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- floating_string_infer_type + +| Name | floating_string_infer_type | +| ----------- |----------------------------------------------------------------------------------| +| Description | register time series as which type when receiving a floating number string "6.7" | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- nan_string_infer_type + +| Name | nan_string_infer_type | +| ----------- |--------------------------------------------------------------------| +| Description | register time series as which type when receiving the Literal NaN. | +| Value | DOUBLE, FLOAT or TEXT | +| Default | DOUBLE | +| Effective | Hot_reload | + +- default_boolean_encoding + +| Name | default_boolean_encoding | +| ----------- |----------------------------------------------------------------| +| Description | BOOLEAN encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE | +| Default | RLE | +| Effective | Hot_reload | + +- default_int32_encoding + +| Name | default_int32_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT32 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_int64_encoding + +| Name | default_int64_encoding | +| ----------- |--------------------------------------------------------------| +| Description | INT64 encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| Default | TS_2DIFF | +| Effective | Hot_reload | + +- default_float_encoding + +| Name | default_float_encoding | +| ----------- |--------------------------------------------------------------| +| Description | FLOAT encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_double_encoding + +| Name | default_double_encoding | +| ----------- |---------------------------------------------------------------| +| Description | DOUBLE encoding when creating schema automatically is enabled | +| Value | PLAIN, RLE, TS_2DIFF, GORILLA | +| Default | GORILLA | +| Effective | Hot_reload | + +- default_text_encoding + +| Name | default_text_encoding | +| ----------- |-------------------------------------------------------------| +| Description | TEXT encoding when creating schema automatically is enabled | +| Value | PLAIN | +| Default | PLAIN | +| Effective | Hot_reload | + + +* boolean_compressor + +| Name | boolean_compressor | +|------------------|-----------------------------------------------------------------------------------------| +| Description | BOOLEAN compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int32_compressor + +| Name | int32_compressor | +|----------------------|--------------------------------------------------------------------------------------------| +| Description | INT32/DATE compression when creating schema automatically is enabled(Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* int64_compressor + +| Name | int64_compressor | +|--------------------|-------------------------------------------------------------------------------------------------| +| Description | INT64/TIMESTAMP compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* float_compressor + +| Name | float_compressor | +|-----------------------|---------------------------------------------------------------------------------------| +| Description | FLOAT compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* double_compressor + +| Name | double_compressor | +|-------------------|----------------------------------------------------------------------------------------| +| Description | DOUBLE compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + +* text_compressor + +| Name | text_compressor | +|--------------------|--------------------------------------------------------------------------------------------------| +| Description | TEXT/BINARY/BLOB compression when creating schema automatically is enabled (Supports from V2.0.6) | +| Type | String | +| Default | LZ4 | +| Effective | Hot_reload | + + +### 4.17 Query Configurations + +- read_consistency_level + +| Name | read_consistency_level | +| ----------- | ------------------------------------------------------------ | +| Description | The read consistency levelThese consistency levels are currently supported:strong(Default, read from the leader replica)weak(Read from a random replica) | +| Type | String | +| Default | strong | +| Effective | Restart required. | + +- meta_data_cache_enable + +| Name | meta_data_cache_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to cache meta data (BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not. | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- chunk_timeseriesmeta_free_memory_proportion + +| Name | chunk_timeseriesmeta_free_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others.The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50 | +| Type | String | +| Default | 1 : 100 : 200 : 300 : 400 | +| Effective | Restart required. | + +- enable_last_cache + +| Name | enable_last_cache | +| ----------- | ---------------------------- | +| Description | Whether to enable LAST cache | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- mpp_data_exchange_core_pool_size + +| Name | mpp_data_exchange_core_pool_size | +| ----------- | -------------------------------------------- | +| Description | Core size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_max_pool_size + +| Name | mpp_data_exchange_max_pool_size | +| ----------- | ------------------------------------------- | +| Description | Max size of ThreadPool of MPP data exchange | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- mpp_data_exchange_keep_alive_time_in_ms + +| Name | mpp_data_exchange_keep_alive_time_in_ms | +| ----------- | --------------------------------------- | +| Description | Max waiting time for MPP data exchange | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- driver_task_execution_time_slice_in_ms + +| Name | driver_task_execution_time_slice_in_ms | +| ----------- | -------------------------------------- | +| Description | The max execution time of a DriverTask | +| Type | int32 | +| Default | 200 | +| Effective | Restart required. | + +- max_tsblock_size_in_bytes + +| Name | max_tsblock_size_in_bytes | +| ----------- | ----------------------------- | +| Description | The max capacity of a TsBlock | +| Type | int32 | +| Default | 131072 | +| Effective | Restart required. | + +- max_tsblock_line_numbers + +| Name | max_tsblock_line_numbers | +| ----------- | ------------------------------------------- | +| Description | The max number of lines in a single TsBlock | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- slow_query_threshold + +| Name | slow_query_threshold | +| ----------- | -------------------------------------- | +| Description | Time cost(ms) threshold for slow query | +| Type | long | +| Default | 10000 | +| Effective | Hot reload | + +- query_cost_stat_window + +| Name | query_cost_stat_window | +|-------------|--------------------| +| Description | Time window threshold(min) for record of history queries. | +| Type | Int32 | +| Default | 0 | +| Effective | Hot reload | + +- query_timeout_threshold + +| Name | query_timeout_threshold | +| ----------- | ----------------------------------------- | +| Description | The max executing time of query. unit: ms | +| Type | Int32 | +| Default | 60000 | +| Effective | Restart required. | + +- max_allowed_concurrent_queries + +| Name | max_allowed_concurrent_queries | +| ----------- | -------------------------------------------------- | +| Description | The maximum allowed concurrently executing queries | +| Type | Int32 | +| Default | 1000 | +| Effective | Restart required. | + +- query_thread_count + +| Name | query_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently execute query statement. When <= 0, use CPU core number. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- degree_of_query_parallelism + +| Name | degree_of_query_parallelism | +| ----------- | ------------------------------------------------------------ | +| Description | How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2. | +| Type | Int32 | +| Default | 0 | +| Effective | Restart required. | + +- mode_map_size_threshold + +| Name | mode_map_size_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of count map size when calculating the MODE aggregation function | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- batch_size + +| Name | batch_size | +| ----------- | ------------------------------------------------------------ | +| Description | The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) | +| Type | Int32 | +| Default | 100000 | +| Effective | Restart required. | + +- sort_buffer_size_in_bytes + +| Name | sort_buffer_size_in_bytes | +| ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort. | +| Type | long | +| Default | 1048576(Before V2.0.6)
0(Supports from V2.0.6), if `sort_buffer_size_in_bytes <= 0`, default value will be used, `default value = min(32MB, memory for query operators / query_thread_count / 2)`, if `sort_buffer_size_in_bytes > 0`, the specified value will be used. | +| Effective | Hot_reload | + +- merge_threshold_of_explain_analyze + +| Name | merge_threshold_of_explain_analyze | +| ----------- | ------------------------------------------------------------ | +| Description | The threshold of operator count in the result set of EXPLAIN ANALYZE, if the number of operator in the result set is larger than this threshold, operator will be merged. | +| Type | int | +| Default | 10 | +| Effective | Hot reload | + +### 4.18 TTL Configuration + +- ttl_check_interval + +| Name | ttl_check_interval | +| ----------- | ------------------------------------------------------------ | +| Description | The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours. | +| Type | int | +| Default | 7200000 | +| Effective | Restart required. | + +- max_expired_time + +| Name | max_expired_time | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum expiring time of device which has a ttl. Default is 1 month.If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction. | +| Type | int | +| Default | 2592000000 | +| Effective | Restart required. | + +- expired_data_ratio + +| Name | expired_data_ratio | +| ----------- | ------------------------------------------------------------ | +| Description | The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction. | +| Type | float | +| Default | 0.3 | +| Effective | Restart required. | + +### 4.19 Storage Engine Configuration + +- timestamp_precision + +| Name | timestamp_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Use this value to set timestamp precision as "ms", "us" or "ns". | +| Type | String | +| Default | ms | +| Effective | Modify before the first startup. | + +- timestamp_precision_check_enabled + +| Name | timestamp_precision_check_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted. | +| Type | Boolean | +| Default | true | +| Effective | Modify before the first startup. | + +- max_waiting_time_when_insert_blocked + +| Name | max_waiting_time_when_insert_blocked | +| ----------- | ------------------------------------------------------------ | +| Description | When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. | +| Type | Int32 | +| Default | 10000 | +| Effective | Restart required. | + +- handle_system_error + +| Name | handle_system_error | +| ----------- | -------------------------------------------------------- | +| Description | What will the system do when unrecoverable error occurs. | +| Type | String | +| Default | CHANGE_TO_READ_ONLY | +| Effective | Restart required. | + +- enable_timed_flush_seq_memtable + +| Name | enable_timed_flush_seq_memtable | +| ----------- | --------------------------------------------------- | +| Description | Whether to timed flush sequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- seq_memtable_flush_interval_in_ms + +| Name | seq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- seq_memtable_flush_check_interval_in_ms + +| Name | seq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether sequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- enable_timed_flush_unseq_memtable + +| Name | enable_timed_flush_unseq_memtable | +| ----------- | ----------------------------------------------------- | +| Description | Whether to timed flush unsequence tsfiles' memtables. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- unseq_memtable_flush_interval_in_ms + +| Name | unseq_memtable_flush_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. | +| Type | long | +| Default | 600000 | +| Effective | Hot reload | + +- unseq_memtable_flush_check_interval_in_ms + +| Name | unseq_memtable_flush_check_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The interval to check whether unsequence memtables need flushing. | +| Type | long | +| Default | 30000 | +| Effective | Hot reload | + +- tvlist_sort_algorithm + +| Name | tvlist_sort_algorithm | +| ----------- | ------------------------------------------------- | +| Description | The sort algorithms used in the memtable's TVList | +| Type | String | +| Default | TIM | +| Effective | Restart required. | + +- avg_series_point_number_threshold + +| Name | avg_series_point_number_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. | +| Type | int32 | +| Default | 100000 | +| Effective | Restart required. | + +- flush_thread_count + +| Name | flush_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads can concurrently flush. When <= 0, use CPU core number. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- enable_partial_insert + +| Name | enable_partial_insert | +| ----------- | ------------------------------------------------------------ | +| Description | In one insert (one device, one timestamp, multiple measurements), if enable partial insert, one measurement failure will not impact other measurements | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +- recovery_log_interval_in_ms + +| Name | recovery_log_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | the interval to log recover progress of each vsg when starting iotdb | +| Type | Int32 | +| Default | 5000 | +| Effective | Restart required. | + +- 0.13_data_insert_adapt + +| Name | 0.13_data_insert_adapt | +| ----------- | ------------------------------------------------------------ | +| Description | If using a v0.13 client to insert data, please set this configuration to true. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- enable_tsfile_validation + +| Name | enable_tsfile_validation | +| ----------- | ------------------------------------------------------------ | +| Description | Verify that TSfiles generated by Flush, Load, and Compaction are correct. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | + +- tier_ttl_in_ms + +| Name | tier_ttl_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Default tier TTL. When the survival time of the data exceeds the threshold, it will be migrated to the next tier. | +| Type | long | +| Default | -1 | +| Effective | Restart required. | + +- max_object_file_size_in_byte + +| Name | max_object_file_size_in_byte | +|-------------|-----------------------------------------------------------------------| +| Description | Maximum size limit for a single object file (supported since V2.0.8). | +| Type | long | +| Default | 4294967296 (4 GB in bytes) | +| Effective | Hot reload | + +- restrict_object_limit + +| Name | restrict_object_limit | +|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Description | No special restrictions on table names, column names, or device identifiers for `OBJECT` type (supported since V2.0.8). When set to `true` and the table contains `OBJECT` columns, the following restrictions apply:
1. Naming Rules: Values in TAG columns, table names, and field names must not use `.` or `..`; Prohibited characters include `./` or `.\`, otherwise metadata creation will fail; Names containing filesystem-unsupported characters will cause write errors.
2. Case Sensitivity: If the underlying filesystem is case-insensitive, device identifiers like `'d1'` and `'D1'` are treated as identical; Creating similar identifiers may overwrite `OBJECT` data files, leading to data corruption.
3. Storage Path: Actual storage path format: `${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin` | +| Type | boolean | +| Default | false | +| Effective | Can only be modified before the first service startup. | + +### 4.20 Compaction Configurations + +- enable_seq_space_compaction + +| Name | enable_seq_space_compaction | +| ----------- | ---------------------------------------------------------- | +| Description | sequence space compaction: only compact the sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_unseq_space_compaction + +| Name | enable_unseq_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | unsequence space compaction: only compact the unsequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_cross_space_compaction + +| Name | enable_cross_space_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | cross space compaction: compact the unsequence files into the overlapped sequence files | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- enable_auto_repair_compaction + +| Name | enable_auto_repair_compaction | +| ----------- | ---------------------------------------------- | +| Description | enable auto repair unsorted file by compaction | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- cross_selector + +| Name | cross_selector | +| ----------- | ------------------------------------------- | +| Description | the selector of cross space compaction task | +| Type | String | +| Default | rewrite | +| Effective | Restart required. | + +- cross_performer + +| Name | cross_performer | +| ----------- |-----------------------------------------------------------| +| Description | the compaction performer of cross space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload . | + +- inner_seq_selector + +| Name | inner_seq_selector | +| ----------- |--------------------------------------------------------| +| Description | the selector of inner sequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_seq_performer + +| Name | inner_seq_performer | +| ----------- |---------------------------------------------------------| +| Description | the performer of inner sequence space compaction task, Options: read_chunk, fast | +| Type | String | +| Default | read_chunk | +| Effective | Hot reload | + +- inner_unseq_selector + +| Name | inner_unseq_selector | +| ----------- |----------------------------------------------------------| +| Description | the selector of inner unsequence space compaction task, Options: size_tiered_single_target,size_tiered_multi_target | +| Type | String | +| Default | size_tiered_multi_target | +| Effective | Hot reload | + +- inner_unseq_performer + +| Name | inner_unseq_performer | +| ----------- |-----------------------------------------------------------| +| Description | the performer of inner unsequence space compaction task, Options: read_point, fast | +| Type | String | +| Default | fast | +| Effective | Hot reload | + +- compaction_priority + +| Name | compaction_priority | +| ----------- | ------------------------------------------------------------ | +| Description | The priority of compaction executionINNER_CROSS: prioritize inner space compaction, reduce the number of files firstCROSS_INNER: prioritize cross space compaction, eliminate the unsequence files firstBALANCE: alternate two compaction types | +| Type | String | +| Default | INNER_CROSS | +| Effective | Restart required. | + +- candidate_compaction_task_queue_size + +| Name | candidate_compaction_task_queue_size | +| ----------- | -------------------------------------------- | +| Description | The size of candidate compaction task queue. | +| Type | int32 | +| Default | 50 | +| Effective | Restart required. | + +- target_compaction_file_size + +| Name | target_compaction_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | This parameter is used in two places:The target tsfile size of inner space compaction.The candidate size of seq tsfile in cross space compaction will be smaller than target_compaction_file_size * 1.5.In most cases, the target file size of cross compaction won't exceed this threshold, and if it does, it will not be much larger than it. | +| Type | Int64 | +| Default | 2147483648 | +| Effective | Hot reload | + +- inner_compaction_total_file_size_threshold + +| Name | inner_compaction_total_file_size_threshold | +| ----------- | ---------------------------------------------------- | +| Description | The total file size limit in inner space compaction. | +| Type | int64 | +| Default | 10737418240 | +| Effective | Hot reload | + +- inner_compaction_total_file_num_threshold + +| Name | inner_compaction_total_file_num_threshold | +| ----------- | --------------------------------------------------- | +| Description | The total file num limit in inner space compaction. | +| Type | int32 | +| Default | 100 | +| Effective | Hot reload | + +- max_level_gap_in_inner_compaction + +| Name | max_level_gap_in_inner_compaction | +| ----------- | ----------------------------------------------- | +| Description | The max level gap in inner compaction selection | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- target_chunk_size + +| Name | target_chunk_size | +| ----------- | ------------------------------------------------------------ | +| Description | The target chunk size in flushing and compaction. If the size of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks.| +| Type | Int64 | +| Default | 1600000 | +| Effective | Restart required. | + +- target_chunk_point_num + +| Name | target_chunk_point_num | +| ----------- |-----------------------------------------------------------------| +| Description | The target point nums in one chunk in flushing and compaction. If the point number of a timeseries in memtable exceeds this, the data will be flushed to multiple chunks. | +| Type | Int64 | +| Default | 100000 | +| Effective | Restart required. | + +- chunk_size_lower_bound_in_compaction + +| Name | chunk_size_lower_bound_in_compaction | +| ----------- | ------------------------------------------------------------ | +| Description | If the chunk size is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 128 | +| Effective | Restart required. | + +- chunk_point_num_lower_bound_in_compaction + +| Name | chunk_point_num_lower_bound_in_compaction | +| ----------- |------------------------------------------------------------------------------------------| +| Description | If the chunk point num is lower than this threshold, it will be deserialized into points | +| Type | Int64 | +| Default | 100 | +| Effective | Restart required. | + +- inner_compaction_candidate_file_num + +| Name | inner_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The file num requirement when selecting inner space compaction candidate files | +| Type | int32 | +| Default | 30 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_num + +| Name | max_cross_compaction_candidate_file_num | +| ----------- | ------------------------------------------------------------ | +| Description | The max file when selecting cross space compaction candidate files | +| Type | int32 | +| Default | 500 | +| Effective | Hot reload | + +- max_cross_compaction_candidate_file_size + +| Name | max_cross_compaction_candidate_file_size | +| ----------- | ------------------------------------------------------------ | +| Description | The max total size when selecting cross space compaction candidate files | +| Type | Int64 | +| Default | 5368709120 | +| Effective | Hot reload | + +- min_cross_compaction_unseq_file_level + +| Name | min_cross_compaction_unseq_file_level | +| ----------- | ------------------------------------------------------------ | +| Description | The min inner compaction level of unsequence file which can be selected as candidate | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- compaction_thread_count + +| Name | compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | How many threads will be set up to perform compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_max_aligned_series_num_in_one_batch + +| Name | compaction_max_aligned_series_num_in_one_batch | +| ----------- | ------------------------------------------------------------ | +| Description | How many chunk will be compacted in aligned series compaction, 10 by default. | +| Type | int32 | +| Default | 10 | +| Effective | Hot reload | + +- compaction_schedule_interval_in_ms + +| Name | compaction_schedule_interval_in_ms | +| ----------- | ---------------------------------------- | +| Description | The interval of compaction task schedule | +| Type | Int64 | +| Default | 60000 | +| Effective | Restart required. | + +- compaction_write_throughput_mb_per_sec + +| Name | compaction_write_throughput_mb_per_sec | +| ----------- | -------------------------------------------------------- | +| Description | The limit of write throughput merge can reach per second | +| Type | int32 | +| Default | 16 | +| Effective | Restart required. | + +- compaction_read_throughput_mb_per_sec + +| Name | compaction_read_throughput_mb_per_sec | +| ----------- | ------------------------------------------------------- | +| Description | The limit of read throughput merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- compaction_read_operation_per_sec + +| Name | compaction_read_operation_per_sec | +| ----------- | ------------------------------------------------------ | +| Description | The limit of read operation merge can reach per second | +| Type | int32 | +| Default | 0 | +| Effective | Hot reload | + +- sub_compaction_thread_count + +| Name | sub_compaction_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of sub compaction threads to be set up to perform compaction. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +- inner_compaction_task_selection_disk_redundancy + +| Name | inner_compaction_task_selection_disk_redundancy | +| ----------- | ------------------------------------------------------------ | +| Description | Redundancy value of disk availability, only use for inner compaction. | +| Type | double | +| Default | 0.05 | +| Effective | Hot reload | + +- inner_compaction_task_selection_mods_file_threshold + +| Name | inner_compaction_task_selection_mods_file_threshold | +| ----------- | -------------------------------------------------------- | +| Description | Mods file size threshold, only use for inner compaction. | +| Type | long | +| Default | 131072 | +| Effective | Hot reload | + +- compaction_schedule_thread_num + +| Name | compaction_schedule_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads to be set up to select compaction task. | +| Type | int32 | +| Default | 4 | +| Effective | Hot reload | + +### 4.21 Write Ahead Log Configuration + +- wal_mode + +| Name | wal_mode | +| ----------- | ------------------------------------------------------------ | +| Description | The details of these three modes are as follows:DISABLE: the system will disable wal.SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully.ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully. | +| Type | String | +| Default | ASYNC | +| Effective | Restart required. | + +- max_wal_nodes_num + +| Name | max_wal_nodes_num | +| ----------- | ------------------------------------------------------------ | +| Description | each node corresponds to one wal directory The default value 0 means the number is determined by the system, the number is in the range of [data region num / 2, data region num]. | +| Type | int32 | +| Default | 0 | +| Effective | Restart required. | + +- wal_async_mode_fsync_delay_in_ms + +| Name | wal_async_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the async mode | +| Type | int32 | +| Default | 1000 | +| Effective | Hot reload | + +- wal_sync_mode_fsync_delay_in_ms + +| Name | wal_sync_mode_fsync_delay_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | Duration a wal flush operation will wait before calling fsync in the sync mode | +| Type | int32 | +| Default | 3 | +| Effective | Hot reload | + +- wal_buffer_size_in_byte + +| Name | wal_buffer_size_in_byte | +| ----------- | ---------------------------- | +| Description | Buffer size of each wal node | +| Type | int32 | +| Default | 33554432 | +| Effective | Restart required. | + +- wal_buffer_queue_capacity + +| Name | wal_buffer_queue_capacity | +| ----------- | --------------------------------- | +| Description | Buffer capacity of each wal queue | +| Type | int32 | +| Default | 500 | +| Effective | Restart required. | + +- wal_file_size_threshold_in_byte + +| Name | wal_file_size_threshold_in_byte | +| ----------- | ------------------------------- | +| Description | Size threshold of each wal file | +| Type | int32 | +| Default | 31457280 | +| Effective | Hot reload | + +- wal_min_effective_info_ratio + +| Name | wal_min_effective_info_ratio | +| ----------- | --------------------------------------------------- | +| Description | Minimum ratio of effective information in wal files | +| Type | double | +| Default | 0.1 | +| Effective | Hot reload | + +- wal_memtable_snapshot_threshold_in_byte + +| Name | wal_memtable_snapshot_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | MemTable size threshold for triggering MemTable snapshot in wal | +| Type | int64 | +| Default | 8388608 | +| Effective | Hot reload | + +- max_wal_memtable_snapshot_num + +| Name | max_wal_memtable_snapshot_num | +| ----------- | ------------------------------------- | +| Description | MemTable's max snapshot number in wal | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- delete_wal_files_period_in_ms + +| Name | delete_wal_files_period_in_ms | +| ----------- | ----------------------------------------------------------- | +| Description | The period when outdated wal files are periodically deleted | +| Type | int64 | +| Default | 20000 | +| Effective | Hot reload | + +- wal_throttle_threshold_in_byte + +| Name | wal_throttle_threshold_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum size of wal files when throttle down in IoTConsensus | +| Type | long | +| Default | 53687091200 | +| Effective | Hot reload | + +- iot_consensus_cache_window_time_in_ms + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------------------ | +| Description | Maximum wait time of write cache in IoTConsensus | +| Type | long | +| Default | -1 | +| Effective | Hot reload | + +- enable_wal_compression + +| Name | iot_consensus_cache_window_time_in_ms | +| ----------- | ------------------------------------- | +| Description | Enable Write Ahead Log compression. | +| Type | boolean | +| Default | true | +| Effective | Hot reload | + +### 4.22 **IoTConsensus Configuration** + +- data_region_iot_max_log_entries_num_per_batch + +| Name | data_region_iot_max_log_entries_num_per_batch | +| ----------- | ------------------------------------------------- | +| Description | The maximum log entries num in IoTConsensus Batch | +| Type | int32 | +| Default | 1024 | +| Effective | Restart required. | + +- data_region_iot_max_size_per_batch + +| Name | data_region_iot_max_size_per_batch | +| ----------- | -------------------------------------- | +| Description | The maximum size in IoTConsensus Batch | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_iot_max_pending_batches_num + +| Name | data_region_iot_max_pending_batches_num | +| ----------- | ----------------------------------------------- | +| Description | The maximum pending batches num in IoTConsensus | +| Type | int32 | +| Default | 5 | +| Effective | Restart required. | + +- data_region_iot_max_memory_ratio_for_queue + +| Name | data_region_iot_max_memory_ratio_for_queue | +| ----------- | -------------------------------------------------- | +| Description | The maximum memory ratio for queue in IoTConsensus | +| Type | double | +| Default | 0.6 | +| Effective | Restart required. | + +- region_migration_speed_limit_bytes_per_second + +| Name | region_migration_speed_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum transit size in byte per second for region migration | +| Type | long | +| Default | 33554432 | +| Effective | Restart required. | + +### 4.23 TsFile Configurations + +- group_size_in_byte + +| Name | group_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of bytes written to disk each time the data in memory is written to disk | +| Type | int32 | +| Default | 134217728 | +| Effective | Hot reload | + +- page_size_in_byte + +| Name | page_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The memory size for each series writer to pack page, default value is 64KB | +| Type | int32 | +| Default | 65536 | +| Effective | Hot reload | + +- max_number_of_points_in_page + +| Name | max_number_of_points_in_page | +| ----------- | ------------------------------------------- | +| Description | The maximum number of data points in a page | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- pattern_matching_threshold + +| Name | pattern_matching_threshold | +| ----------- | ------------------------------------------- | +| Description | The threshold for pattern matching in regex | +| Type | int32 | +| Default | 1000000 | +| Effective | Hot reload | + +- float_precision + +| Name | float_precision | +| ----------- | ------------------------------------------------------------ | +| Description | Floating-point precision of query results.Only effective for RLE and TS_2DIFF encodings.Due to the limitation of machine precision, some values may not be interpreted strictly. | +| Type | int32 | +| Default | 2 | +| Effective | Hot reload | + +- value_encoder + +| Name | value_encoder | +| ----------- | ------------------------------------------------------------ | +| Description | Encoder of value series. default value is PLAIN. | +| Type | For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG. | +| Default | PLAIN | +| Effective | Hot reload | + +- compressor + +| Name | compressor | +| ----------- | ------------------------------------------------------------ | +| Description | Compression configuration And it is also used as the default compressor of time column in aligned timeseries. | +| Type | Data compression method, supports UNCOMPRESSED, SNAPPY, ZSTD, LZMA2 or LZ4. Default value is LZ4 | +| Default | LZ4 | +| Effective | Hot reload | + +- encrypt_flag + +| Name | encrypt_flag | +| ----------- | ---------------------- | +| Description | Enable data encryption | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- encrypt_type + +| Name | encrypt_type | +| ----------- |---------------------------------------| +| Description | The method of data encrytion | +| Type | String | +| Default | org.apache.tsfile.encrypt.UNENCRYPTED | +| Effective | Restart required. | + +- encrypt_key_path + +| Name | encrypt_key_path | +| ----------- | ----------------------------------- | +| Description | The path of key for data encryption | +| Type | String | +| Default | None | +| Effective | Restart required. | + +### 4.24 Authorization Configuration + +- authorizer_provider_class + +| Name | authorizer_provider_class | +| ----------- | ------------------------------------------------------------ | +| Description | which class to serve for authorization. | +| Type | String | +| Default | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| Effective | Restart required. | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| Name | openID_url | +| ----------- | ------------------------------------------------------------ | +| Description | The url of openID server If OpenIdAuthorizer is enabled, then openID_url must be set. | +| Type | String(a http link) | +| Default | None | +| Effective | Restart required. | + +- iotdb_server_encrypt_decrypt_provider + +| Name | iotdb_server_encrypt_decrypt_provider | +| ----------- | ------------------------------------------------------------ | +| Description | encryption provider class | +| Type | String | +| Default | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| Effective | Modify before the first startup. | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| Name | iotdb_server_encrypt_decrypt_provider_parameter | +| ----------- | ----------------------------------------------- | +| Description | encryption provided class parameter | +| Type | String | +| Default | None | +| Effective | Modify before the first startup. | + +- author_cache_size + +| Name | author_cache_size | +| ----------- | --------------------------- | +| Description | Cache size of user and role | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- author_cache_expire_time + +| Name | author_cache_expire_time | +| ----------- | ---------------------------------- | +| Description | Cache expire time of user and role | +| Type | int32 | +| Default | 30 | +| Effective | Restart required. | + +### 4.25 UDF Configuration + +- udf_initial_byte_array_length_for_memory_control + +| Name | udf_initial_byte_array_length_for_memory_control | +| ----------- | ------------------------------------------------------------ | +| Description | Used to estimate the memory usage of text fields in a UDF query.It is recommended to set this value to be slightly larger than the average length of all text records. | +| Type | int32 | +| Default | 48 | +| Effective | Restart required. | + +- udf_memory_budget_in_mb + +| Name | udf_memory_budget_in_mb | +| ----------- | ------------------------------------------------------------ | +| Description | How much memory may be used in ONE UDF query (in MB). The upper limit is 20% of allocated memory for read. | +| Type | Float | +| Default | 30.0 | +| Effective | Restart required. | + +- udf_reader_transformer_collector_memory_proportion + +| Name | udf_reader_transformer_collector_memory_proportion | +| ----------- | ------------------------------------------------------------ | +| Description | UDF memory allocation ratio.The parameter form is a:b:c, where a, b, and c are integers. | +| Type | String | +| Default | 1:1:1 | +| Effective | Restart required. | + +- udf_lib_dir + +| Name | udf_lib_dir | +| ----------- | ---------------------------- | +| Description | the udf lib directory | +| Type | String | +| Default | ext/udf(Windows:ext\\udf) | +| Effective | Restart required. | + +### 4.26 Trigger Configuration + +- trigger_lib_dir + +| Name | trigger_lib_dir | +| ----------- | ------------------------- | +| Description | the trigger lib directory | +| Type | String | +| Default | ext/trigger | +| Effective | Restart required. | + +- stateful_trigger_retry_num_when_not_found + +| Name | stateful_trigger_retry_num_when_not_found | +| ----------- | ------------------------------------------------------------ | +| Description | How many times will we retry to found an instance of stateful trigger on DataNodes | +| Type | Int32 | +| Default | 3 | +| Effective | Restart required. | + +### 4.27 **Select-Into Configuration** + +- into_operation_buffer_size_in_byte + +| Name | into_operation_buffer_size_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum memory occupied by the data to be written when executing select-into statements. | +| Type | long | +| Default | 104857600 | +| Effective | Hot reload | + +- select_into_insert_tablet_plan_row_limit + +| Name | select_into_insert_tablet_plan_row_limit | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements. | +| Type | int32 | +| Default | 10000 | +| Effective | Hot reload | + +- into_operation_execution_thread_count + +| Name | into_operation_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the thread pool that execute insert-tablet tasks | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +### 4.28 Continuous Query Configuration + +- continuous_query_submit_thread_count + +| Name | continuous_query_execution_thread | +| ----------- | ------------------------------------------------------------ | +| Description | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | +| Type | int32 | +| Default | 2 | +| Effective | Restart required. | + +- continuous_query_min_every_interval_in_ms + +| Name | continuous_query_min_every_interval_in_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The minimum value of the continuous query execution time interval | +| Type | long (duration) | +| Default | 1000 | +| Effective | Restart required. | + +### 4.29 Pipe Configuration + +- pipe_lib_dir + +| Name | pipe_lib_dir | +| ----------- | ----------------------- | +| Description | the pipe lib directory. | +| Type | string | +| Default | ext/pipe | +| Effective | Not support modify | + +- pipe_subtask_executor_max_thread_num + +| Name | pipe_subtask_executor_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- pipe_sink_timeout_ms + +| Name | pipe_sink_timeout_ms | +| ----------- | ------------------------------------------------------------ | +| Description | The connection timeout (in milliseconds) for the thrift client. | +| Type | int | +| Default | 900000 | +| Effective | Restart required. | + +- pipe_sink_selector_number + +| Name | pipe_sink_selector_number | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of selectors that can be used in the sink.Recommend to set this value to less than or equal to pipe_sink_max_client_number. | +| Type | int | +| Default | 4 | +| Effective | Restart required. | + +- pipe_sink_max_client_number + +| Name | pipe_sink_max_client_number | +| ----------- | ----------------------------------------------------------- | +| Description | The maximum number of clients that can be used in the sink. | +| Type | int | +| Default | 16 | +| Effective | Restart required. | + +- pipe_air_gap_receiver_enabled + +| Name | pipe_air_gap_receiver_enabled | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable receiving pipe data through air gap.The receiver can only return 0 or 1 in TCP mode to indicate whether the data is received successfully. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- pipe_air_gap_receiver_port + +| Name | pipe_air_gap_receiver_port | +| ----------- | ------------------------------------------------------------ | +| Description | The port for the server to receive pipe data through air gap. | +| Type | int | +| Default | 9780 | +| Effective | Restart required. | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| Name | pipe_all_sinks_rate_limit_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The total bytes that all pipe sinks can transfer per second.When given a value less than or equal to 0, it means no limit. default value is -1, which means no limit. | +| Type | double | +| Default | -1 | +| Effective | Hot reload | + +### 4.30 RatisConsensus Configuration + +- config_node_ratis_log_appender_buffer_size_max + +| Name | config_node_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of ConfigNode (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- schema_region_ratis_log_appender_buffer_size_max + +| Name | schema_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of SchemaRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- data_region_ratis_log_appender_buffer_size_max + +| Name | data_region_ratis_log_appender_buffer_size_max | +| ----------- | ------------------------------------------------------------ | +| Description | max payload size for a single log-sync-RPC from leader to follower of DataRegion (in byte, by default 16MB) | +| Type | int32 | +| Default | 16777216 | +| Effective | Restart required. | + +- config_node_ratis_snapshot_trigger_threshold + +| Name | config_node_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of Confignode | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- schema_region_ratis_snapshot_trigger_threshold + +| Name | schema_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of SchemaRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- data_region_ratis_snapshot_trigger_threshold + +| Name | data_region_ratis_snapshot_trigger_threshold | +| ----------- | ------------------------------------------------------------ | +| Description | max numbers of snapshot_trigger_threshold logs to trigger a snapshot of DataRegion | +| Type | int32 | +| Default | 400,000 | +| Effective | Restart required. | + +- config_node_ratis_log_unsafe_flush_enable + +| Name | config_node_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is confignode allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- schema_region_ratis_log_unsafe_flush_enable + +| Name | schema_region_ratis_log_unsafe_flush_enable | +| ----------- | -------------------------------------------------------- | +| Description | Is schemaregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- data_region_ratis_log_unsafe_flush_enable + +| Name | data_region_ratis_log_unsafe_flush_enable | +| ----------- | ------------------------------------------------------ | +| Description | Is dataregion allowed flushing Raft Log asynchronously | +| Type | boolean | +| Default | false | +| Effective | Restart required. | + +- config_node_ratis_log_segment_size_max_in_byte + +| Name | config_node_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of confignode (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- schema_region_ratis_log_segment_size_max_in_byte + +| Name | schema_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of schemaregion (in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- data_region_ratis_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a RaftLog segment file of dataregion(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| Name | data_region_ratis_log_segment_size_max_in_byte | +| ----------- | ------------------------------------------------------------ | +| Description | max capacity of a simple log segment file of confignode(in byte, by default 24MB) | +| Type | int32 | +| Default | 25165824 | +| Effective | Restart required. | + +- config_node_ratis_grpc_flow_control_window + +| Name | config_node_ratis_grpc_flow_control_window | +| ----------- | ---------------------------------------------------------- | +| Description | confignode flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_flow_control_window + +| Name | schema_region_ratis_grpc_flow_control_window | +| ----------- | ------------------------------------------------------------ | +| Description | schema region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- data_region_ratis_grpc_flow_control_window + +| Name | data_region_ratis_grpc_flow_control_window | +| ----------- | ----------------------------------------------------------- | +| Description | data region flow control window for ratis grpc log appender | +| Type | int32 | +| Default | 4194304 | +| Effective | Restart required. | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| Name | config_node_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | config node grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| Name | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ------------------------------------------------------- | +| Description | schema region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| Name | data_region_ratis_grpc_leader_outstanding_appends_max | +| ----------- | ----------------------------------------------------- | +| Description | data region grpc line concurrency threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_log_force_sync_num + +| Name | config_node_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | config node fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- schema_region_ratis_log_force_sync_num + +| Name | schema_region_ratis_log_force_sync_num | +| ----------- | -------------------------------------- | +| Description | schema region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- data_region_ratis_log_force_sync_num + +| Name | data_region_ratis_log_force_sync_num | +| ----------- | ------------------------------------ | +| Description | data region fsync threshold | +| Type | int32 | +| Default | 128 | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader min election timeout | +| Type | int32 | +| Default | 2000ms | +| Effective | Restart required. | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| Name | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | confignode leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ------------------------------------------------------ | +| Description | schema region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| Name | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ----------- | ---------------------------------------------------- | +| Description | data region leader max election timeout | +| Type | int32 | +| Default | 4000ms | +| Effective | Restart required. | + +- config_node_ratis_request_timeout_ms + +| Name | config_node_ratis_request_timeout_ms | +| ----------- | --------------------------------------- | +| Description | confignode ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_request_timeout_ms + +| Name | schema_region_ratis_request_timeout_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- data_region_ratis_request_timeout_ms + +| Name | data_region_ratis_request_timeout_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client retry threshold | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- config_node_ratis_max_retry_attempts + +| Name | config_node_ratis_max_retry_attempts | +| ----------- | ------------------------------------ | +| Description | confignode ratis client retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- config_node_ratis_initial_sleep_time_ms + +| Name | config_node_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | confignode ratis client initial sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- config_node_ratis_max_sleep_time_ms + +| Name | config_node_ratis_max_sleep_time_ms | +| ----------- | -------------------------------------------- | +| Description | confignode ratis client max retry sleep time | +| Type | int32 | +| Default | 10000 | +| Effective | Restart required. | + +- schema_region_ratis_max_retry_attempts + +| Name | schema_region_ratis_max_retry_attempts | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client max retry times | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- schema_region_ratis_initial_sleep_time_ms + +| Name | schema_region_ratis_initial_sleep_time_ms | +| ----------- | ------------------------------------------ | +| Description | schema region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- schema_region_ratis_max_sleep_time_ms + +| Name | schema_region_ratis_max_sleep_time_ms | +| ----------- | ----------------------------------------- | +| Description | schema region ratis client max sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_max_retry_attempts + +| Name | data_region_ratis_max_retry_attempts | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 10 | +| Effective | Restart required. | + +- data_region_ratis_initial_sleep_time_ms + +| Name | data_region_ratis_initial_sleep_time_ms | +| ----------- | ---------------------------------------- | +| Description | data region ratis client init sleep time | +| Type | int32 | +| Default | 100ms | +| Effective | Restart required. | + +- data_region_ratis_max_sleep_time_ms + +| Name | data_region_ratis_max_sleep_time_ms | +| ----------- | --------------------------------------------- | +| Description | data region ratis client max retry sleep time | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- ratis_first_election_timeout_min_ms + +| Name | ratis_first_election_timeout_min_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election min timeout | +| Type | int64 | +| Default | 50 (ms) | +| Effective | Restart required. | + +- ratis_first_election_timeout_max_ms + +| Name | ratis_first_election_timeout_max_ms | +| ----------- | ----------------------------------- | +| Description | Ratis first election max timeout | +| Type | int64 | +| Default | 150 (ms) | +| Effective | Restart required. | + +- config_node_ratis_preserve_logs_num_when_purge + +| Name | config_node_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | confignode snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- schema_region_ratis_preserve_logs_num_when_purge + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | schema region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- data_region_ratis_preserve_logs_num_when_purge + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------------------ | +| Description | data region snapshot preserves certain logs when taking snapshot and purge | +| Type | int32 | +| Default | 1000 | +| Effective | Restart required. | + +- config_node_ratis_log_max_size + +| Name | config_node_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | config node Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- schema_region_ratis_log_max_size + +| Name | schema_region_ratis_log_max_size | +| ----------- | ---------------------------------------- | +| Description | schema region Raft Log disk size control | +| Type | int64 | +| Default | 2147483648 (2GB) | +| Effective | Restart required. | + +- data_region_ratis_log_max_size + +| Name | data_region_ratis_log_max_size | +| ----------- | -------------------------------------- | +| Description | data region Raft Log disk size control | +| Type | int64 | +| Default | 21474836480 (20GB) | +| Effective | Restart required. | + +- config_node_ratis_periodic_snapshot_interval + +| Name | config_node_ratis_periodic_snapshot_interval | +| ----------- | -------------------------------------------- | +| Description | config node Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- schema_region_ratis_periodic_snapshot_interval + +| Name | schema_region_ratis_preserve_logs_num_when_purge | +| ----------- | ------------------------------------------------ | +| Description | schema region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +- data_region_ratis_periodic_snapshot_interval + +| Name | data_region_ratis_preserve_logs_num_when_purge | +| ----------- | ---------------------------------------------- | +| Description | data region Raft periodic snapshot interval | +| Type | int64 | +| Default | 86400 (s) | +| Effective | Restart required. | + +### 4.31 IoTConsensusV2 Configuration + +- iot_consensus_v2_pipeline_size + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------------------------------------ | +| Description | Default event buffer size for connector and receiver in iot consensus v2 | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +- iot_consensus_v2_mode + +| Name | iot_consensus_v2_pipeline_size | +| ----------- | ------------------------------ | +| Description | IoTConsensusV2 mode. | +| Type | String | +| Default | batch | +| Effective | Restart required. | + +### 4.32 Procedure Configuration + +- procedure_core_worker_thread_count + +| Name | procedure_core_worker_thread_count | +| ----------- | ------------------------------------- | +| Description | Default number of worker thread count | +| Type | int32 | +| Default | 4 | +| Effective | Restart required. | + +- procedure_completed_clean_interval + +| Name | procedure_completed_clean_interval | +| ----------- | ------------------------------------------------------------ | +| Description | Default time interval of completed procedure cleaner work in, time unit is second | +| Type | int32 | +| Default | 30(s) | +| Effective | Restart required. | + +- procedure_completed_evict_ttl + +| Name | procedure_completed_evict_ttl | +| ----------- | ------------------------------------------------------- | +| Description | Default ttl of completed procedure, time unit is second | +| Type | int32 | +| Default | 60(s) | +| Effective | Restart required. | + +### 4.33 MQTT Broker Configuration + +- enable_mqtt_service + +| Name | enable_mqtt_service。 | +| ----------- | ----------------------------------- | +| Description | whether to enable the mqtt service. | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +- mqtt_host + +| Name | mqtt_host | +| ----------- | ------------------------------ | +| Description | the mqtt service binding host. | +| Type | String | +| Default | 127.0.0.1 | +| Effective | Hot reload | + +- mqtt_port + +| Name | mqtt_port | +| ----------- | ------------------------------ | +| Description | the mqtt service binding port. | +| Type | int32 | +| Default | 1883 | +| Effective | Hot reload | + +- mqtt_handler_pool_size + +| Name | mqtt_handler_pool_size | +| ----------- | ---------------------------------------------------- | +| Description | the handler pool size for handing the mqtt messages. | +| Type | int32 | +| Default | 1 | +| Effective | Hot reload | + +- mqtt_payload_formatter + +| Name | mqtt_payload_formatter | +| ----------- | ----------------------------------- | +| Description | the mqtt message payload formatter. | +| Type | String | +| Default | json | +| Effective | Hot reload | + +- mqtt_max_message_size + +| Name | mqtt_max_message_size | +| ----------- | ---------------------------------- | +| Description | max length of mqtt message in byte | +| Type | int32 | +| Default | 1048576 | +| Effective | Hot reload | + +### 4.34 Audit log Configuration + +- enable_audit_log + +| Name | enable_audit_log | +| ----------- | -------------------------------- | +| Description | whether to enable the audit log. | +| Type | Boolean | +| Default | false | +| Effective | Restart required. | + +- audit_log_storage + +| Name | audit_log_storage | +| ----------- | ----------------------------- | +| Description | Output location of audit logs | +| Type | String | +| Default | IOTDB,LOGGER | +| Effective | Restart required. | + +- audit_log_operation + +| Name | audit_log_operation | +| ----------- | ------------------------------------------------------------ | +| Description | whether enable audit log for DML operation of datawhether enable audit log for DDL operation of schemawhether enable audit log for QUERY operation of data and schema | +| Type | String | +| Default | DML,DDL,QUERY | +| Effective | Restart required. | + +- enable_audit_log_for_native_insert_api + +| Name | enable_audit_log_for_native_insert_api | +| ----------- | ---------------------------------------------- | +| Description | whether the local write api records audit logs | +| Type | Boolean | +| Default | true | +| Effective | Restart required. | + +### 4.35 White List Configuration + +- enable_white_list + +| Name | enable_white_list | +| ----------- | ------------------------- | +| Description | whether enable white list | +| Type | Boolean | +| Default | false | +| Effective | Hot reload | + +### 4.36 IoTDB-AI Configuration + +- model_inference_execution_thread_count + +| Name | model_inference_execution_thread_count | +| ----------- | ------------------------------------------------------------ | +| Description | The thread count which can be used for model inference operation. | +| Type | int | +| Default | 5 | +| Effective | Restart required. | + +### 4.37 Load TsFile Configuration + +- load_clean_up_task_execution_delay_time_seconds + +| Name | load_clean_up_task_execution_delay_time_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | Load clean up task is used to clean up the unsuccessful loaded tsfile after a certain period of time. | +| Type | int | +| Default | 1800 | +| Effective | Hot reload | + +- load_write_throughput_bytes_per_second + +| Name | load_write_throughput_bytes_per_second | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum bytes per second of disk write throughput when loading tsfile. | +| Type | int | +| Default | -1 | +| Effective | Hot reload | + +- load_active_listening_enable + +| Name | load_active_listening_enable | +| ----------- | ------------------------------------------------------------ | +| Description | Whether to enable the active listening mode for tsfile loading. | +| Type | Boolean | +| Default | true | +| Effective | Hot reload | + +- load_active_listening_dirs + +| Name | load_active_listening_dirs | +| ----------- | ------------------------------------------------------------ | +| Description | The directory to be actively listened for tsfile loading.Multiple directories should be separated by a ','. | +| Type | String | +| Default | ext/load/pending | +| Effective | Hot reload | + +- load_active_listening_fail_dir + +| Name | load_active_listening_fail_dir | +| ----------- | ------------------------------------------------------------ | +| Description | The directory where tsfiles are moved if the active listening mode fails to load them. | +| Type | String | +| Default | ext/load/failed | +| Effective | Hot reload | + +- load_active_listening_max_thread_num + +| Name | load_active_listening_max_thread_num | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum number of threads that can be used to load tsfile actively.The default value, when this parameter is commented out or <= 0, use CPU core number. | +| Type | Long | +| Default | 0 | +| Effective | Restart required. | + +- load_active_listening_check_interval_seconds + +| Name | load_active_listening_check_interval_seconds | +| ----------- | ------------------------------------------------------------ | +| Description | The interval specified in seconds for the active listening mode to check the directory specified in `load_active_listening_dirs`.The active listening mode will check the directory every `load_active_listening_check_interval_seconds seconds`. | +| Type | Long | +| Default | 5 | +| Effective | Restart required. | + +* last_cache_operation_on_load + +|Name| last_cache_operation_on_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| The operation performed to LastCache when a TsFile is successfully loaded. `UPDATE`: use the data in the TsFile to update LastCache; `UPDATE_NO_BLOB`: similar to UPDATE, but will invalidate LastCache for blob series; `CLEAN_DEVICE`: invalidate LastCache of devices contained in the TsFile; `CLEAN_ALL`: clean the whole LastCache. | +|Type| String | +|Default| UPDATE_NO_BLOB | +|Effective| Effective after restart | + +* cache_last_values_for_load + +|Name| cache_last_values_for_load | +|:---:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| Whether to cache last values before loading a TsFile. Only effective when `last_cache_operation_on_load=UPDATE_NO_BLOB` or `last_cache_operation_on_load=UPDATE`. When set to true, blob series will be ignored even with `last_cache_operation_on_load=UPDATE`. Enabling this will increase the memory footprint during loading TsFiles. | +|Type| Boolean | +|Default| true | +|Effective| Effective after restart | + +* cache_last_values_memory_budget_in_byte + +|Name| cache_last_values_memory_budget_in_byte | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Description| When `cache_last_values_for_load=true`, the maximum memory that can be used to cache last values. If this value is exceeded, the cached values will be abandoned and last values will be read from the TsFile in a streaming manner. | +|Type| int32 | +|Default| 4194304 | +|Effective| Effective after restart | + + +### 4.38 Dispatch Retry Configuration + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------------------------------ | +| Description | The maximum retrying time for write request remotely dispatching, time unit is milliseconds. | +| Type | Long | +| Default | 60000 | +| Effective | Hot reload | + +- enable_retry_for_unknown_error + +| Name | enable_retry_for_unknown_error | +| ----------- | ------------------------------------ | +| Description | Whether retrying for unknown errors. | +| Type | boolean | +| Default | false | +| Effective | Hot reload | \ No newline at end of file diff --git a/src/UserGuide/latest-Table/Reference/System-Tables_timecho.md b/src/UserGuide/latest-Table/Reference/System-Tables_timecho.md index e7b43683a..7bb37e58c 100644 --- a/src/UserGuide/latest-Table/Reference/System-Tables_timecho.md +++ b/src/UserGuide/latest-Table/Reference/System-Tables_timecho.md @@ -371,7 +371,7 @@ IoTDB> select * from information_schema.views ### 2.11 MODELS Table -> This system table is available starting from version V 2.0.5 and has been discontinued since version V 2.0.8-beta. +> This system table is available starting from version V 2.0.5 and has been discontinued since version V 2.0.8. * Contains information about all models in the database. * The table structure is as follows: @@ -596,7 +596,7 @@ IoTDB> select * from information_schema.data_nodes ### 2.18 CONNECTIONS Table -> This system table is available starting from version V 2.0.8-beta +> This system table is available starting from version V 2.0.8 * Contains all connections in the cluster. * The table structure is as follows: @@ -623,7 +623,7 @@ IoTDB> select * from information_schema.connections; ### 2.19 CURRENT_QUERIES Table -> This system table is available starting from version V 2.0.8-beta +> This system table is available starting from version V 2.0.8 * Contains all queries whose execution end time falls within the range `[now() - query_cost_stat_window, now())`, including currently executing queries. The `query_cost_stat_window` parameter represents the query cost statistics window. Its default value is 0 and can be configured via the `iotdb-system.properties` configuration file. * The table structure is as follows: @@ -654,7 +654,7 @@ IoTDB> select * from information_schema.current_queries; ### 2.20 QUERIES_COSTS_HISTOGRAM Table -> This system table is available starting from version V 2.0.8-beta +> This system table is available starting from version V 2.0.8 * Contains a histogram of query execution times within the past `query_cost_stat_window` period (only statistics for completed SQL queries). The `query_cost_stat_window` parameter represents the query cost statistics window. Its default value is 0 and can be configured via the `iotdb-system.properties` configuration file. * The table structure is as follows: diff --git a/src/UserGuide/latest-Table/SQL-Manual/Basis-Function.md b/src/UserGuide/latest-Table/SQL-Manual/Basis-Function.md index 721776597..46a8529cd 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/Basis-Function.md +++ b/src/UserGuide/latest-Table/SQL-Manual/Basis-Function.md @@ -1,3 +1,6 @@ +--- +redirectTo: Basis-Function_apache.html +--- - -# Basic Functions - -## 1. Comparison Functions and Operators - -### 1.1 Basic Comparison Operators - -Comparison operators are used to compare two values and return the comparison result (`true` or `false`). - -| Operators | Description | -| :-------- | :----------------------- | -| < | Less than | -| > | Greater than | -| <= | Less than or equal to | -| >= | Greater than or equal to | -| = | Equal to | -| <> | Not equal to | -| != | Not equal to | - -#### 1.1.1 Comparison rules: - -1. All types can be compared with themselves. -2. Numeric types (INT32, INT64, FLOAT, DOUBLE, TIMESTAMP) can be compared with each other. -3. Character types (STRING, TEXT) can also be compared with each other. -4. Comparisons between types other than those mentioned above will result in an error. - -### 1.2 BETWEEN Operator - -1. The `BETWEEN `operator is used to determine whether a value falls within a specified range. -2. The `NOT BETWEEN` operator is used to determine whether a value does not fall within a specified range. -3. The `BETWEEN` and `NOT BETWEEN` operators can be used to evaluate any sortable type. -4. The value, minimum, and maximum parameters for `BETWEEN` and `NOT BETWEEN` must be of the same type, otherwise an error will occur. - -Syntax: - -```SQL - value BETWEEN min AND max: - value NOT BETWEEN min AND max: -``` - -Example 1 :BETWEEN - -```SQL --- Query records where temperature is between 85.0 and 90.0 -SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; -``` - -Example 2 : NOT BETWEEN - -``` --- Query records where humidity is not between 35.0 and 40.0 -SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; -``` - -### 1.3 IS NULL Operator - -1. These operators apply to all data types. - -Example 1: Query records where temperature is NULL - -```SQL -SELECT * FROM table1 WHERE temperature IS NULL; -``` - -Example 2: Query records where humidity is not NULL - -```SQL -SELECT * FROM table1 WHERE humidity IS NOT NULL; -``` - -### 1.4 IN Operator - -1. The `IN` operator can be used in the `WHERE `clause to compare a column with a list of values. -2. These values can be provided by a static array or scalar expressions. - -Syntax: - -```SQL -... WHERE column [NOT] IN ('value1','value2', expression1) -``` - -Example 1: Static array: Query records where region is 'Beijing' or 'Shanghai' - -```SQL -SELECT * FROM table1 WHERE region IN ('Beijing', 'Shanghai'); ---Equivalent to -SELECT * FROM region WHERE name = 'Beijing' OR name = 'Shanghai'; -``` - -Example 2: Scalar expression: Query records where temperature is among specific values - -```SQL -SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); -``` - -Example 3: Query records where region is not 'Beijing' or 'Shanghai' - -```SQL -SELECT * FROM table1 WHERE region NOT IN ('Beijing', 'Shanghai'); -``` - -### 1.5 GREATEST and LEAST - -The `GREATEST` function returns the maximum value from a list of arguments, while the `LEAST` function returns the minimum value. The return type matches the input data type. - -Key Behaviors: -1. NULL Handling: Returns NULL if all arguments are NULL. -2. Parameter Requirements: Requires at least 2 arguments. -3. Type Constraints: All arguments must have the same data type. -4. Supported Types: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` - -**Syntax:** - -```sql - greatest(value1, value2, ..., valueN) - least(value1, value2, ..., valueN) -``` - -**Examples:** - -```sql --- Retrieve the maximum value between `temperature` and `humidity` in `table2` -SELECT GREATEST(temperature,humidity) FROM table2; - --- Retrieve the minimum value between `temperature` and `humidity` in `table2` -SELECT LEAST(temperature,humidity) FROM table2; -``` - -## 2. Aggregate functions - -### 2.1 Overview - -1. Aggregate functions are many-to-one functions. They perform aggregate calculations on a set of values to obtain a single aggregate result. - -2. Except for `COUNT()`, all other aggregate functions ignore null values and return null when there are no input rows or all values are null. For example, `SUM()` returns null instead of zero, and `AVG()` does not include null values in the count. - -### 2.2 Supported Aggregate Functions - -| Function Name | Description | Allowed Input Types | Output Type | -|:-----------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------| -| COUNT | Counts the number of data points. | All types | INT64 | -| COUNT_IF | COUNT_IF(exp) counts the number of rows that satisfy a specified boolean expression. | `exp` must be a boolean expression,(e.g. `count_if(temperature>20)`) | INT64 | -| APPROX_COUNT_DISTINCT | The APPROX_COUNT_DISTINCT(x[, maxStandardError]) function provides an approximation of COUNT(DISTINCT x), returning the estimated number of distinct input values. | `x`: The target column to be calculated, supports all data types.
`maxStandardError` (optional): Specifies the maximum standard error allowed for the function's result. Valid range is [0.0040625, 0.26]. Defaults to 0.023 if not specified. | INT64 | -| APPROX_MOST_FREQUENT | The APPROX_MOST_FREQUENT(x, k, capacity) function is used to approximately calculate the top k most frequent elements in a dataset. It returns a JSON-formatted string where the keys are the element values and the values are their corresponding approximate frequencies. (Available since V2.0.5.1) | `x` : The column to be calculated, supporting all existing data types in IoTDB;
`k`: The number of top-k most frequent values to return;
`capacity`: The number of buckets used for computation, which relates to memory usage—a larger value reduces error but consumes more memory, while a smaller value increases error but uses less memory. | STRING | -| SUM | Calculates the sum. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| AVG | Calculates the average. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| MAX | Finds the maximum value. | All types | Same as input type | -| MIN | Finds the minimum value. | All types | Same as input type | -| FIRST | Finds the value with the smallest timestamp that is not NULL. | All types | Same as input type | -| LAST | Finds the value with the largest timestamp that is not NULL. | All types | Same as input type | -| STDDEV | Alias for STDDEV_SAMP, calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_POP | Calculates the population standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_SAMP | Calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VARIANCE | Alias for VAR_SAMP, calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_POP | Calculates the population variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_SAMP | Calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| EXTREME | Finds the value with the largest absolute value. If the largest absolute values of positive and negative values are equal, returns the positive value. | INT32 INT64 FLOAT DOUBLE | Same as input type | -| MODE | Finds the mode. Note: 1. There is a risk of memory exception when the number of distinct values in the input sequence is too large; 2. If all elements have the same frequency, i.e., there is no mode, a random element is returned; 3. If there are multiple modes, a random mode is returned; 4. NULL values are also counted in frequency, so even if not all values in the input sequence are NULL, the final result may still be NULL. | All types | Same as input type | -| MAX_BY | MAX_BY(x, y) finds the value of x corresponding to the maximum y in the binary input x and y. MAX_BY(time, x) returns the timestamp when x is at its maximum. | x and y can be of any type | Same as the data type of the first input x | -| MIN_BY | MIN_BY(x, y) finds the value of x corresponding to the minimum y in the binary input x and y. MIN_BY(time, x) returns the timestamp when x is at its minimum. | x and y can be of any type | Same as the data type of the first input x | -| FIRST_BY | FIRST_BY(x, y) finds the value of x in the same row when y is the first non-null value. | x and y can be of any type | Same as the data type of the first input x | -| LAST_BY | LAST_BY(x, y) finds the value of x in the same row when y is the last non-null value. | x and y can be of any type | Same as the data type of the first input x | - - -### 2.3 Examples - -#### 2.3.1 Example Data - -The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. - -#### 2.3.2 Count - -Counts the number of rows in the entire table and the number of non-null values in the `temperature` column. - -```SQL -IoTDB> select count(*), count(temperature) from table1; -``` - -The execution result is as follows: - -> Note: Only the COUNT function can be used with *, otherwise an error will occur. - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 18| 12| -+-----+-----+ -Total line number = 1 -It costs 0.834s -``` - - -#### 2.3.3 Count_if - -Count `Non-Null` `arrival_time` Records in `table2` - -```sql -select count_if(arrival_time is not null) from table2; -``` - -The execution result is as follows: - -```sql -+-----+ -|_col0| -+-----+ -| 4| -+-----+ -Total line number = 1 -It costs 0.047s -``` - -#### 2.3.4 Approx_count_distinct - -Retrieve the number of distinct values in the `temperature` column from `table1`. - -```sql -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; -``` - -The execution result is as follows: - -```sql -+------+------+ -|origin|approx| -+------+------+ -| 3| 3| -+------+------+ -Total line number = 1 -It costs 0.022s -``` - -#### 2.3.5 Approx_most_frequent - -Query the ​​top 2 most frequent values​​ in the `temperature` column of `table1`. - -```sql -IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; -``` - -The execution result is as follows: - -```sql -+-------------------+ -| topk| -+-------------------+ -|{"85.0":6,"90.0":5}| -+-------------------+ -Total line number = 1 -It costs 0.064s -``` - - -#### 2.3.6 First - -Finds the values with the smallest timestamp that are not NULL in the `temperature` and `humidity` columns. - -```SQL -IoTDB> select first(temperature), first(humidity) from table1; -``` - -The execution result is as follows: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 35.1| -+-----+-----+ -Total line number = 1 -It costs 0.170s -``` - -#### 2.3.7 Last - -Finds the values with the largest timestamp that are not NULL in the `temperature` and `humidity` columns. - -```SQL -IoTDB> select last(temperature), last(humidity) from table1; -``` - -The execution result is as follows: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 34.8| -+-----+-----+ -Total line number = 1 -It costs 0.211s -``` - -#### 2.3.8 First_by - -Finds the `time` value of the row with the smallest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the smallest timestamp that is not NULL in the `temperature` column. - -```SQL -IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-26T13:37:00.000+08:00| 35.1| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.269s -``` - -#### 2.3.9 Last_by - -Queries the `time` value of the row with the largest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the largest timestamp that is not NULL in the `temperature` column. - -```SQL -IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T14:30:00.000+08:00| 34.8| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.070s -``` - -#### 2.3.10 Max_by - -Queries the `time` value of the row where the `temperature` column is at its maximum, and the `humidity` value of the row where the `temperature` column is at its maximum. - -```SQL -IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T09:30:00.000+08:00| 35.2| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.172s -``` - -#### 2.3.11 Min_by - -Queries the `time` value of the row where the `temperature` column is at its minimum, and the `humidity` value of the row where the `temperature` column is at its minimum. - -```SQL -select min_by(time, temperature), min_by(humidity, temperature) from table1; -``` - -The execution result is as follows: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-29T10:00:00.000+08:00| null| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.244s -``` - - -## 3. Logical operators - -### 3.1 Overview - -Logical operators are used to combine conditions or negate conditions, returning a Boolean result (`true` or `false`). - -Below are the commonly used logical operators along with their descriptions: - -| Operator | Description | Example | -| :------- | :-------------------------------- | :------ | -| AND | True only if both values are true | a AND b | -| OR | True if either value is true | a OR b | -| NOT | True when the value is false | NOT a | - -### 3.2 Impact of NULL on Logical Operators - -#### 3.2.1 AND Operator - -- If one or both sides of the expression are `NULL`, the result may be `NULL`. -- If one side of the `AND` operator is `FALSE`, the expression result is `FALSE`. - -Examples: - -```SQL -NULL AND true -- null -NULL AND false -- false -NULL AND NULL -- null -``` - -#### 3.2.2 OR Operator - -- If one or both sides of the expression are `NULL`, the result may be `NULL`. -- If one side of the `OR` operator is `TRUE`, the expression result is `TRUE`. - -Examples: - -```SQL -NULL OR NULL -- null -NULL OR false -- null -NULL OR true -- true -``` - -##### 3.2.2.1 Truth Table - -The following truth table illustrates how `NULL` is handled in `AND` and `OR` operators: - -| a | b | a AND b | a OR b | -| :---- | :---- | :------ | :----- | -| TRUE | TRUE | TRUE | TRUE | -| TRUE | FALSE | FALSE | TRUE | -| TRUE | NULL | NULL | TRUE | -| FALSE | TRUE | FALSE | TRUE | -| FALSE | FALSE | FALSE | FALSE | -| FALSE | NULL | FALSE | NULL | -| NULL | TRUE | NULL | TRUE | -| NULL | FALSE | FALSE | NULL | -| NULL | NULL | NULL | NULL | - -#### 3.2.3 NOT Operator - -The logical negation of `NULL` remains `NULL`. - -Example: - -```SQL -NOT NULL -- null -``` - -##### 3.2.3.1 Truth Table - -The following truth table illustrates how `NULL` is handled in the `NOT` operator: - -| a | NOT a | -| :---- | :---- | -| TRUE | FALSE | -| FALSE | TRUE | -| NULL | NULL | - -## 4. Date and Time Functions and Operators - -### 4.1 now() -> Timestamp - -Returns the current timestamp. - -### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp - -The `date_bin` function is used for handling time data by rounding a timestamp (`Timestamp`) to the boundary of a specified time interval (`interval`). - -#### **Syntax:** - -```SQL --- Calculates the time interval starting from timestamp 0 and returns the nearest interval boundary to the specified timestamp. -date_bin(interval,source) - --- Calculates the time interval starting from the origin timestamp and returns the nearest interval boundary to the specified timestamp. -date_bin(interval,source,origin) - ---Supported time units for interval: ---Years (y), months (mo), weeks (week), days (d), hours (h), minutes (M), seconds (s), milliseconds (ms), microseconds (µs), nanoseconds (ns). ---source: Must be of timestamp type. -``` - -#### **Parameters**: - -| Parameter | Description | -| :-------- | :----------------------------------------------------------- | -| interval | 1. Time interval 2. Supported units: `y`, `mo`, `week`, `d`, `h`, `M`, `s`, `ms`, `µs`, `ns`. | -| source | 1. The timestamp column or expression to be calculated. 2. Must be of timestamp type. | -| origin | The reference timestamp. | - -#### 4.2.1Syntax Rules : - -1. If `origin` is not specified, the default reference timestamp is `1970-01-01T00:00:00Z` (Beijing time: `1970-01-01 08:00:00`). -2. `interval` must be a non-negative number with a time unit. If `interval` is `0ms`, the function returns `source` directly without calculation. -3. If `origin` or `source` is negative, it represents a time point before the epoch. `date_bin` will calculate and return the relevant time period. -4. If `source` is `null`, the function returns `null`. -5. Mixing months and non-month time units (e.g., `1 MONTH 1 DAY`) is not supported due to ambiguity. - -> For example, if the starting point is **April 30, 2000**, calculating `1 DAY` first and then `1 MONTH` results in **June 1, 2000**, whereas calculating `1 MONTH` first and then `1 DAY` results in **May 31, 2000**. The resulting dates are different. - -#### 4.2.2 Examples - -##### Example Data - -The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. - -#### Example 1: Without Specifying the Origin Timestamp - -```SQL -SELECT - time, - date_bin(1h,time) as time_bin -FROM - table1; -``` - -Result**:** - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.683s -``` - -#### Example 2: Specifying the Origin Timestamp - -```SQL -SELECT - time, - date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin -FROM - table1; -``` - -Result: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.056s -``` - -#### Example 3: Negative Origin - -```SQL -SELECT - time, - date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin -FROM - table1; -``` - -Result: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.203s -``` - -#### Example 4: Interval of 0 - -```SQL -SELECT - time, - date_bin(0ms, time) as time_bin -FROM - table1; -``` - -Result**:** - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.107s -``` - -#### Example 5: Source is NULL - -```SQL -SELECT - arrival_time, - date_bin(1h,arrival_time) as time_bin -FROM - table1; -``` - -Result: - -```Plain -+-----------------------------+-----------------------------+ -| arrival_time| time_bin| -+-----------------------------+-----------------------------+ -| null| null| -|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -| null| null| -|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| -| null| null| -|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.319s -``` - -### 4.3 Extract Function - -This function is used to extract the value of a specific part of a date. (Supported from version V2.0.6) - -#### 4.3.1 Syntax Definition - -```SQL -EXTRACT (identifier FROM expression) -``` - -* Parameter Description - * **expression**: `TIMESTAMP` type or a time constant - * **identifier**: The valid ranges and corresponding return value types are shown in the table below. - - | Valid Range | Return Type | Return Range | - |----------------------|---------------|--------------------| - | `YEAR` | `INT64` | `/` | - | `QUARTER` | `INT64` | `1-4` | - | `MONTH` | `INT64` | `1-12` | - | `WEEK` | `INT64` | `1-53` | - | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | - | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | - | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | - | `HOUR` | `INT64` | `0-23` | - | `MINUTE` | `INT64` | `0-59` | - | `SECOND` | `INT64` | `0-59` | - | `MS` | `INT64` | `0-999` | - | `US` | `INT64` | `0-999` | - | `NS` | `INT64` | `0-999` | - - -#### 4.3.2 Usage Example - -Using table1 from the [Sample Data](../Reference/Sample-Data.md) as the source data, query the average temperature for the first 12 hours of each day within a certain period. - -```SQL -IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) -+----------+-----+ -| fmtdate|avgtp| -+----------+-----+ -|2024-11-28| 86.0| -|2024-11-29| 85.0| -|2024-11-30| 90.0| -+----------+-----+ -Total line number = 3 -It costs 0.041s -``` - -Introduction to the `Format` function: [Format Function](../SQL-Manual/Basis-Function.md#_7-2-format-function) - -Introduction to the `Date_bin` function: [Date_bin Funtion](../SQL-Manual/Basis-Function.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) - - -## 5. Mathematical Functions and Operators - -### 5.1 Mathematical Operators - -| **Operator** | **Description** | -| :----------- | :---------------------------------------------- | -| + | Addition | -| - | Subtraction | -| * | Multiplication | -| / | Division (integer division performs truncation) | -| % | Modulus (remainder) | -| - | Negation | - -### 5.2 Mathematical functions - -| Function Name | Description | Input | Output | Usage | -|:--------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------|:-------------------| :--------- | -| sin | Sine | double, float, INT64, INT32 | double | sin(x) | -| cos | Cosine | double, float, INT64, INT32 | double | cos(x) | -| tan | Tangent | double, float, INT64, INT32 | double | tan(x) | -| asin | Inverse Sine | double, float, INT64, INT32 | double | asin(x) | -| acos | Inverse Cosine | double, float, INT64, INT32 | double | acos(x) | -| atan | Inverse Tangent | double, float, INT64, INT32 | double | atan(x) | -| sinh | Hyperbolic Sine | double, float, INT64, INT32 | double | sinh(x) | -| cosh | Hyperbolic Cosine | double, float, INT64, INT32 | double | cosh(x) | -| tanh | Hyperbolic Tangent | double, float, INT64, INT32 | double | tanh(x) | -| degrees | Converts angle `x` in radians to degrees | double, float, INT64, INT32 | double | degrees(x) | -| radians | Radian Conversion from Degrees | double, float, INT64, INT32 | double | radians(x) | -| abs | Absolute Value | double, float, INT64, INT32 | Same as input type | abs(x) | -| sign | Returns the sign of `x`: - If `x = 0`, returns `0` - If `x > 0`, returns `1` - If `x < 0`, returns `-1` For `double/float` inputs: - If `x = NaN`, returns `NaN` - If `x = +Infinity`, returns `1.0` - If `x = -Infinity`, returns `-1.0` | double, float, INT64, INT32 | Same as input type | sign(x) | -| ceil | Rounds `x` up to the nearest integer | double, float, INT64, INT32 | double | ceil(x) | -| floor | Rounds `x` down to the nearest integer | double, float, INT64, INT32 | double | floor(x) | -| exp | Returns `e^x` (Euler's number raised to the power of `x`) | double, float, INT64, INT32 | double | exp(x) | -| ln | Returns the natural logarithm of `x` | double, float, INT64, INT32 | double | ln(x) | -| log10 | Returns the base 10 logarithm of `x` | double, float, INT64, INT32 | double | log10(x) | -| round | Rounds `x` to the nearest integer | double, float, INT64, INT32 | double | round(x) | -| round | Rounds `x` to `d` decimal places | double, float, INT64, INT32 | double | round(x, d) | -| sqrt | Returns the square root of `x`. | double, float, INT64, INT32 | double | sqrt(x) | -| e | Returns Euler’s number `e`. | | double | e() | -| pi | Pi (π) | | double | pi() | - -## 6. Bitwise Functions - -> Supported from version V2.0.6 - -Example raw data is as follows: - -``` -IoTDB:database1> select * from bit_table -+-----------------------------+---------+------+-----+ -| time|device_id|length|width| -+-----------------------------+---------+------+-----+ -|2025-10-29T15:59:42.957+08:00| d1| 14| 12| -|2025-10-29T15:58:59.399+08:00| d3| 15| 10| -|2025-10-29T15:59:32.769+08:00| d2| 13| 12| -+-----------------------------+---------+------+-----+ - --- Table creation statement -CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); - --- Write data -INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); -``` - -### 6.1 bit\_count(num, bits) - -The `bit_count(num, bits)`function is used to count the number of 1s in the binary representation of the integer `num`under the specified bit width `bits`. - -#### 6.1.1 Syntax Definition - -``` -bit_count(num, bits) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * **​num:​**​ Any integer value (int32 or int64) - * **​bits:​**​ Integer value, with a valid range of 2\~64 - -Note: An error will be raised if the number of `bits`is insufficient to represent `num`(using ​**two's complement signed representation**​): `Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` - -* Usage Methods - - * Two specific numbers: `bit_count(9, 64)` - * Column and a number: `bit_count(column1, 64)` - * Between two columns: `bit_count(column1, column2)` - -#### 6.1.2 Usage Examples - -``` --- Two specific numbers -IoTDB:database1> select distinct bit_count(2,8) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ --- Two specific numbers -IoTDB:database1> select distinct bit_count(-5,8) from bit_table -+-----+ -|_col0| -+-----+ -| 7| -+-----+ --- Column and a number -IoTDB:database1> select length,bit_count(length,8) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 3| -| 15| 4| -| 13| 3| -+------+-----+ --- Insufficient bits -IoTDB:database1> select length,bit_count(length,2) from bit_table -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. -``` - -### 6.2 bitwise\_and(x, y) - -The `bitwise_and(x, y)`function performs a logical AND operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise AND operation result. - -#### 6.2.1 Syntax Definition - -``` -bitwise_and(x, y) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x, y**​: Must be integer values of data type Int32 or Int64 -* Usage Methods - - * Two specific numbers: `bitwise_and(19, 25)` - * Column and a number: `bitwise_and(column1, 25)` - * Between two columns: `bitwise_and(column1, column2)` - -#### 6.2.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_and(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 17| -+-----+ ---Column and a number -IoTDB:database1> select length, bitwise_and(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 8| -| 15| 9| -| 13| 9| -+------+-----+ ---Between two columns -IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 12| -| 15| 10| 10| -| 13| 12| 12| -+------+-----+-----+ -``` - -### 6.3 bitwise\_not(x) - -The `bitwise_not(x)`function performs a logical NOT operation on each bit of the integer x based on its two's complement representation, and returns the bitwise NOT operation result. - -#### 6.3.1 Syntax Definition - -``` -bitwise_not(x) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x**​: Must be an integer value of data type Int32 or Int64 -* Usage Methods - - * Specific number: `bitwise_not(5)` - * Single column operation: `bitwise_not(column1)` - -#### 6.3.2 Usage Examples - -``` --- Specific number -IoTDB:database1> select distinct bitwise_not(5) from bit_table -+-----+ -|_col0| -+-----+ -| -6| -+-----+ --- Single column -IoTDB:database1> select length, bitwise_not(length) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| -15| -| 15| -16| -| 13| -14| -+------+-----+ -``` - -### 6.4 bitwise\_or(x, y) - -The `bitwise_or(x,y)`function performs a logical OR operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise OR operation result. - -#### 6.4.1 Syntax Definition - -``` -bitwise_or(x, y) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x, y**​: Must be integer values of data type Int32 or Int64 -* Usage Methods - - * Two specific numbers: `bitwise_or(19, 25)` - * Column and a number: `bitwise_or(column1, 25)` - * Between two columns: `bitwise_or(column1, column2)` - -#### 6.4.2 Usage Examples - -``` --- Two specific numbers -IoTDB:database1> select distinct bitwise_or(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 27| -+-----+ --- Column and a number -IoTDB:database1> select length,bitwise_or(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 31| -| 15| 31| -| 13| 29| -+------+-----+ --- Between two columns -IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 14| -| 15| 10| 15| -| 13| 12| 13| -+------+-----+-----+ -``` - -### 6.5 bitwise\_xor(x, y) - -The `bitwise_xor(x,y)`function performs a logical XOR (exclusive OR) operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise XOR operation result. XOR rule: same bits result in 0, different bits result in 1. - -#### 6.5.1 Syntax Definition - -``` -bitwise_xor(x, y) -> INT64 -- The return type is Int64 -``` - -* Parameter Description - - * ​**x, y**​: Must be integer values of data type Int32 or Int64 -* Usage Methods - - * Two specific numbers: `bitwise_xor(19, 25)` - * Column and a number: `bitwise_xor(column1, 25)` - * Between two columns: `bitwise_xor(column1, column2)` - -#### 6.5.2 Usage Examples - -``` --- Two specific numbers -IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 10| -+-----+ --- Column and a number -IoTDB:database1> select length,bitwise_xor(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 23| -| 15| 22| -| 13| 20| -+------+-----+ --- Between two columns -IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 2| -| 15| 10| 5| -| 13| 12| 1| -+------+-----+-----+ -``` - -### 6.6 bitwise\_left\_shift(value, shift) - -The `bitwise_left_shift(value, shift)`function returns the result of shifting the binary representation of integer `value`left by `shift`bits. The left shift operation moves bits towards the higher-order direction, filling the vacated lower-order bits with 0s, and discarding the higher-order bits that overflow. Equivalent to: `value << shift`. - -#### 6.6.1 Syntax Definition - -``` -bitwise_left_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value -``` - -* Parameter Description - - * ​**value**​: The integer value to shift left. Must be of data type Int32 or Int64. - * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. -* Usage Methods - - * Two specific numbers: `bitwise_left_shift(1, 2)` - * Column and a number: `bitwise_left_shift(column1, 2)` - * Between two columns: `bitwise_left_shift(column1, column2)` - -#### 6.6.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table -+-----+ -|_col0| -+-----+ -| 4| -+-----+ --- Column and a number -IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 56| -| 15| 60| -| 13| 52| -+------+-----+ --- Between two columns -IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - -### 6.7 bitwise\_right\_shift(value, shift) - -The `bitwise_right_shift(value, shift)`function returns the result of logically (unsigned) right shifting the binary representation of integer `value`by `shift`bits. The logical right shift operation moves bits towards the lower-order direction, filling the vacated higher-order bits with 0s, and discarding the lower-order bits that overflow. - -#### 6.7.1 Syntax Definition - -``` -bitwise_right_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value -``` - -* Parameter Description - - * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. - * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. -* Usage Methods - - * Two specific numbers: `bitwise_right_shift(8, 3)` - * Column and a number: `bitwise_right_shift(column1, 3)` - * Between two columns: `bitwise_right_shift(column1, column2)` - -#### 6.7.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ ---Column and a number -IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---Between two columns -IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -``` - -### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) - -The `bitwise_right_shift_arithmetic(value, shift)`function returns the result of arithmetically right shifting the binary representation of integer `value`by `shift`bits. The arithmetic right shift operation moves bits towards the lower-order direction, discarding the lower-order bits that overflow, and filling the vacated higher-order bits with the sign bit (0 for positive numbers, 1 for negative numbers) to preserve the sign of the number. - -#### 6.8.1 Syntax Definition - -``` -bitwise_right_shift_arithmetic(value, shift) -> [same as value]-- The return type is the same as the data type of value -``` - -* Parameter Description - - * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. - * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. -* Usage Methods: - - * Two specific numbers: `bitwise_right_shift_arithmetic(12, 2)` - * Column and a number: `bitwise_right_shift_arithmetic(column1, 64)` - * Between two columns: `bitwise_right_shift_arithmetic(column1, column2)` - -#### 6.8.2 Usage Examples - -``` ---Two specific numbers -IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table -+-----+ -|_col0| -+-----+ -| 3| -+-----+ --- Column and a number -IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---Between two columns -IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - - -## 7. Conditional Expressions - -### 7.1 CASE - -CASE expressions come in two forms: **Simple CASE** and **Searched CASE**. - -#### 7.1.1 Simple CASE - -The simple form evaluates each value expression from left to right until it finds a match with the given expression: - -```SQL -CASE expression - WHEN value THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -If a matching value is found, the corresponding result is returned. If no match is found, the result from the `ELSE` clause (if provided) is returned; otherwise, `NULL` is returned. - -Example: - -```SQL -SELECT a, - CASE a - WHEN 1 THEN 'one' - WHEN 2 THEN 'two' - ELSE 'many' - END -``` - -#### 7.1.2 Searched CASE - -The searched form evaluates each Boolean condition from left to right until a `TRUE` condition is found, then returns the corresponding result: - -```SQL -CASE - WHEN condition THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -If no condition evaluates to `TRUE`, the `ELSE` clause result (if provided) is returned; otherwise, `NULL` is returned. - -Example: - -```SQL -SELECT a, b, - CASE - WHEN a = 1 THEN 'aaa' - WHEN b = 2 THEN 'bbb' - ELSE 'ccc' - END -``` - -### 7.2 COALESCE - -Returns the first non-null value from the given list of parameters. - -```SQL -coalesce(value1, value2[, ...]) -``` - -## 8. Conversion Functions - -### 8.1 Conversion Functions - -#### 8.1.1 cast(value AS type) → type - -Explicitly converts a value to the specified type. This can be used to convert strings (`VARCHAR`) to numeric types or numeric values to string types. Starting from V2.0.8-beta, OBJECT type can be explicitly cast to STRING type. - -If the conversion fails, a runtime error is thrown. - -Example: - -```SQL -SELECT * - FROM table1 - WHERE CAST(time AS DATE) - IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); -``` - -#### 8.1.2 try_cast(value AS type) → type - -Similar to `CAST()`. If the conversion fails, returns `NULL` instead of throwing an error. - -Example: - -```SQL -SELECT * - FROM table1 - WHERE try_cast(time AS DATE) - IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); -``` - -### 8.2 Format Function - -This function generates and returns a formatted string based on a specified format string and input arguments. Similar to Java’s `String.format` or C’s `printf`, it allows developers to construct dynamic string templates using placeholder syntax. Predefined format specifiers in the template are replaced precisely with corresponding argument values, producing a complete string that adheres to specific formatting requirements. - -#### 8.2.1 Syntax - -```SQL -format(pattern, ...args) -> STRING -``` - -**Parameters** - -* `pattern`: A format string containing static text and one or more format specifiers (e.g., `%s`, `%d`), or any expression returning a `STRING`/`TEXT` type. -* `args`: Input arguments to replace format specifiers. Constraints: - * Number of arguments ≥ 1. - * Multiple arguments must be comma-separated (e.g., `arg1, arg2`). - * Total arguments can exceed the number of specifiers in `pattern` but cannot be fewer, otherwise an exception is triggered. - -**Return Value** - -* Formatted result string of type `STRING`. - -#### 8.2.2 Usage Examples - -1. Format Floating-Point Numbers - ```SQL - IoTDB:database1> SELECT format('%.5f', humidity) FROM table1 WHERE humidity = 35.4; - +--------+ - | _col0| - +--------+ - |35.40000| - +--------+ - ``` -2. Format Integers - ```SQL - IoTDB:database1> SELECT format('%03d', 8) FROM table1 LIMIT 1; - +-----+ - |_col0| - +-----+ - | 008| - +-----+ - ``` -3. Format Dates and Timestamps - -* Locale-Specific Date - -```SQL -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) FROM table1 LIMIT 1; -+--------------------+ -| _col0| -+--------------------+ -|Monday, January 1, 2024| -+--------------------+ -``` - -* Remove Timezone Information - -```SQL -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; -+-----------------------+ -| _col0| -+-----------------------+ -|2024-01-01 00:00:00.000| -+-----------------------+ -``` - -* Second-Level Timestamp Precision - -```SQL -IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; -+-------------------+ -| _col0| -+-------------------+ -|2024-01-01 00:00:00| -+-------------------+ -``` - -* Date/Time Format Symbols - -| **Symbol** | **​ Description** | -| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 'H' | 24-hour format (two digits, zero-padded), i.e. 00 - 23 | -| 'I' | 12-hour format (two digits, zero-padded), i.e. 01 - 12 | -| 'k' | 24-hour format (no padding), i.e. 0 - 23 | -| 'l' | 12-hour format (no padding), i.e. 1 - 12 | -| 'M' | Minute (two digits, zero-padded), i.e. 00 - 59 | -| 'S' | Second (two digits, zero-padded; supports leap seconds), i.e. 00 - 60 | -| 'L' | Millisecond (three digits, zero-padded), i.e. 000 - 999 | -| 'N' | Nanosecond (nine digits, zero-padded), i.e. 000000000 - 999999999。 | -| 'p' | Locale-specific lowercase AM/PM marker (e.g., "am", "pm"). Prefix with`T`to force uppercase (e.g., "AM"). | -| 'z' | RFC 822 timezone offset from GMT (e.g.,`-0800`). Adjusts for daylight saving. Uses the JVM's default timezone for`long`/`Long`/`Date`. | -| 'Z' | Timezone abbreviation (e.g., "PST"). Adjusts for daylight saving. Uses the JVM's default timezone; Formatter's timezone overrides the argument's timezone if specified. | -| 's' | Seconds since Unix epoch (1970-01-01 00:00:00 UTC), i.e. Long.MIN\_VALUE/1000 to Long.MAX\_VALUE/1000。 | -| 'Q' | Milliseconds since Unix epoch, i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | - -* Common Date/Time Conversion Characters - -| **Symbol** | **​ Description** | -| ---------------- | -------------------------------------------------------------------- | -| 'B' | Locale-specific full month name, for example "January", "February" | -| 'b' | Locale-specific abbreviated month name, for example "Jan", "Feb" | -| 'h' | Same as`b` | -| 'A' | Locale-specific full weekday name, for example "Sunday", "Monday" | -| 'a' | Locale-specific short weekday name, for example "Sun", "Mon" | -| 'C' | Year divided by 100 (two digits, zero-padded) | -| 'Y' | Year (minimum 4 digits, zero-padded) | -| 'y' | Last two digits of year (zero-padded) | -| 'j' | Day of year (three digits, zero-padded) | -| 'm' | Month (two digits, zero-padded) | -| 'd' | Day of month (two digits, zero-padded) | -| 'e' | Day of month (no padding) | - -4. Format Strings - ```SQL - IoTDB:database1> SELECT format('The measurement status is: %s', status) FROM table2 LIMIT 1; - +-------------------------------+ - | _col0| - +-------------------------------+ - |The measurement status is: true| - +-------------------------------+ - ``` -5. Format Percentage Sign - ```SQL - IoTDB:database1> SELECT format('%s%%', 99.9) FROM table1 LIMIT 1; - +-----+ - |_col0| - +-----+ - |99.9%| - +-----+ - ``` - -#### 8.2.3 Format Conversion Failure Scenarios - -1. Type Mismatch Errors - -* Timestamp Type Conflict - - If the format specifier includes time-related tokens (e.g., `%Y-%m-%d`) but the argument: - - * Is a non-`DATE`/`TIMESTAMP` type value. ◦ - * Requires sub-day precision (e.g., `%H`, `%M`) but the argument is not `TIMESTAMP`. - -```SQL --- Example 1 -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) - --- Example 2 -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) -``` - -* Floating-Point Type Conflict - - Using `%f` with non-numeric arguments (e.g., strings or booleans): - -```SQL -IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) -``` - -2. Argument Count Mismatch - The number of arguments must equal or exceed the number of format specifiers. - - ```SQL - IoTDB:database1> SELECT format('%.5f %03d', humidity) FROM table1 WHERE humidity = 35.4; - Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') - ``` -3. Invalid Invocation Errors - - Triggered if: - - * Total arguments < 2 (must include `pattern` and at least one argument).• - * `pattern` is not of type `STRING`/`TEXT`. - -```SQL --- Example 1 -IoTDB:database1> select format('%s') from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. - ---Example 2 -IoTDB:database1> select format(123, humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. -``` - - -## 9. String Functions and Operators - -### 9.1 String operators - -#### 9.1.1 || Operator - -The `||` operator is used for string concatenation and functions the same as the `concat` function. - -#### 9.1.2 LIKE Statement - - The `LIKE` statement is used for pattern matching. For detailed usage, refer to Pattern Matching:[LIKE](#1-like-运算符). - -### 9.2 String Functions - -| Function Name | Description | Input | Output | Usage | -| :------------ |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------| :------ | :----------------------------------------------------------- | -| `length` | Returns the number of characters in a string (not byte length). | `string` (the string whose length is to be calculated) | INT32 | length(string) | -| `upper` | Converts all letters in a string to uppercase. | string | String | upper(string) | -| `lower` | Converts all letters in a string to lowercase. | string | String | lower(string) | -| `trim` | Removes specified leading and/or trailing characters from a string. **Parameters:** - `specification` (optional): Specifies which side to trim: - `BOTH`: Removes characters from both sides (default). - `LEADING`: Removes characters from the beginning. - `TRAILING`: Removes characters from the end. - `trimcharacter` (optional): Character to be removed (default is whitespace). - `string`: The target string. | string | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) Example:`trim('!' FROM '!foo!');` —— `'foo'` | -| `strpos` | Returns the position of the first occurrence of `subStr` in `sourceStr`. **Notes:** - Position starts at `1`. - Returns `0` if `subStr` is not found. - Positioning is based on characters, not byte arrays. | `sourceStr` (string to be searched), `subStr` (substring to find) | INT32 | strpos(sourceStr, subStr) | -| `starts_with` | Checks if `sourceStr` starts with the specified `prefix`. | `sourceStr`, `prefix` | Boolean | starts_with(sourceStr, prefix) | -| `ends_with` | Checks if `sourceStr` ends with the specified `suffix`. | `sourceStr`, `suffix` | Boolean | ends_with(sourceStr, suffix) | -| `concat` | Concatenates `string1, string2, ..., stringN`. Equivalent to the `\|\|` operator. | `string`, `text` | String | concat(str1, str2, ...) or str1 \|\| str2 ... | -| `strcmp` | Compares two strings lexicographically. **Returns:** - `-1` if `str1 < str2` - `0` if `str1 = str2` - `1` if `str1 > str2` - `NULL` if either `str1` or `str2` is `NULL` | `string1`, `string2` | INT32 | strcmp(str1, str2) | -| `replace` | Removes all occurrences of `search` in `string`. | `string`, `search` | String | replace(string, search) | -| `replace` | Replaces all occurrences of `search` in `string` with `replace`. | `string`, `search`, `replace` | String | replace(string, search, replace) | -| `substring` | Extracts a substring from `start_index` to the end of the string. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. | `string`, `start_index` | String | substring(string from start_index)or substring(string, start_index) | -| `substring` | Extracts a substring of `length` characters starting from `start_index`. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. - Throws an error if `length` is negative. - If `start_index + length` exceeds `int.MAX`, an overflow error may occur. | `string`, `start_index`, `length` | String | substring(string from start_index for length) or substring(string, start_index, length) | - -## 10. Pattern Matching Functions - -### 10.1 LIKE - -#### 10.1.1 Usage - -The `LIKE `operator is used to compare a value with a pattern. It is commonly used in the `WHERE `clause to match specific patterns within strings. - -#### 10.1.2 Syntax - -```SQL -... column [NOT] LIKE 'pattern' ESCAPE 'character'; -``` - -#### 10.1.3 Match rules - -- Matching characters is case-sensitive -- The pattern supports two wildcard characters: - - `_` matches any single character - - `%` matches zero or more characters - -#### 10.1.4 Notes - -- `LIKE` pattern matching applies to the entire string by default. Therefore, if it's desired to match a sequence anywhere within a string, the pattern must start and end with a percent sign. -- To match the escape character itself, double it (e.g., `\\` to match `\`). For example, you can use `\\` to match for `\`. - -#### 10.1.5 Examples - -#### **Example 1: Match Strings Starting with a Specific Character** - -- **Description:** Find all names that start with the letter `E` (e.g., `Europe`). - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'E%'; -``` - -#### **Example 2: Exclude a Specific Pattern** - -- **Description:** Find all names that do **not** start with the letter `E`. - -```SQL -SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; -``` - -#### **Example 3: Match Strings of a Specific Length** - -- **Description:** Find all names that start with `A`, end with `a`, and have exactly two characters in between (e.g., `Asia`). - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'A__a'; -``` - -#### **Example 4: Escape Special Characters** - -- **Description:** Find all names that start with `South_` (e.g., `South_America`). The underscore (`_`) is a wildcard character, so it needs to be escaped using `\`. - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; -``` - -#### **Example 5: Match the Escape Character Itself** - -- **Description:** Find all names that start with 'South\'. Since `\` is the escape character, it must be escaped using `\\`. - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; -``` - -### 10.2 regexp_like - -#### 10.2.1 Usage - -Evaluates whether the regular expression pattern is present within the given string. - -#### 10.2.2 Syntax - -```SQL -regexp_like(string, pattern); -``` - -#### 10.2.3 Notes - -- The pattern for `regexp_like` only needs to be contained within the string, and does not need to match the entire string. -- To match the entire string, use the `^` and `$` anchors. -- `^` signifies the "start of the string," and `$` signifies the "end of the string." -- Regular expressions use the Java-defined regular syntax, but there are the following exceptions to be aware of: - - Multiline mode - 1. Enabled by: `(?m)`. - 2. Recognizes only `\n` as the line terminator. - 3. Does not support the `(?d)` flag, and its use is prohibited. - - Case-insensitive matching - 1. Enabled by: `(?i)`. - 2. Based on Unicode rules, it does not support context-dependent and localized matching. - 3. Does not support the `(?u)` flag, and its use is prohibited. - - Character classes - 1. Within character classes (e.g., `[A-Z123]`), `\Q` and `\E` are not supported and are treated as literals. - - Unicode character classes (`\p{prop}`) - 1. Underscores in names: All underscores in names must be removed (e.g., `OldItalic `instead of `Old_Italic`). - 2. Scripts: Specify directly, without the need for `Is`, `script=`, or `sc=` prefixes (e.g., `\p{Hiragana}`). - 3. Blocks: Must use the `In` prefix, `block=` or `blk=` prefixes are not supported (e.g., `\p{InMongolian}`). - 4. Categories: Specify directly, without the need for `Is`, `general_category=`, or `gc=` prefixes (e.g., `\p{L}`). - 5. Binary properties: Specify directly, without `Is` (e.g., `\p{NoncharacterCodePoint}`). - -#### 10.2.4 Examples - -#### Example 1: **Matching strings containing a specific pattern** - -```SQL -SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true -``` - -- **Explanation**: Determines whether the string '1a 2b 14m' contains a substring that matches the pattern `\d+b`. - - `\d+` means "one or more digits". - - `b` represents the letter b. - - In `'1a 2b 14m'`, the substring `'2b'` matches this pattern, so it returns `true`. - - -#### **Example 2: Matching the entire string** - -```SQL -SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false -``` - -- **Explanation**: Checks if the string `'1a 2b 14m'` matches the pattern `^\\d+b$` exactly. - - `\d+` means "one or more digits". - - `b` represents the letter b. - - `'1a 2b 14m'` does not match this pattern because it does not start with digits and does not end with `b`, so it returns `false`. - -## 11. Timeseries Windowing Functions - -The sample data is as follows: - -```SQL -IoTDB> SELECT * FROM bid; -+-----------------------------+--------+-----+ -| time|stock_id|price| -+-----------------------------+--------+-----+ -|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+--------+-----+ - --- Create table statement -CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); --- Insert data -INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); -``` - -### 11.1 HOP - -#### 11.1.1 Function Description - -The HOP function segments data into overlapping time windows for analysis, assigning each row to all windows that overlap with its timestamp. If windows overlap (when SLIDE < SIZE), data will be duplicated across multiple windows. - -#### 11.1.2 Function Definition - -```SQL -HOP(data, timecol, size, slide[, origin]) -``` - -#### 11.1.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | --------------------------------- | ------------------------- | -| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | -| TIMECOL | Scalar | String (default: 'time') | Time column | -| SIZE | Scalar | Long integer | Window size | -| SLIDE | Scalar | Long integer | Sliding step | -| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | - - -#### 11.1.4 Returned Results - -The HOP function returns: - -* `window_start`: Window start time (inclusive) -* `window_end`: Window end time (exclusive) -* Pass-through columns: All input columns from DATA - -#### 11.1.5 Usage Example - -```SQL -IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.2 SESSION - -#### 11.2.1 Function Description - -The SESSION function groups data into sessions based on time intervals. It checks the time gap between consecutive rows—rows with gaps smaller than the threshold (GAP) are grouped into the current window, while larger gaps trigger a new window. - -#### 11.2.2 Function Definition - -```SQL -SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) -``` -#### 11.2.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | ---------------------------- | -------------------------------------- | -| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | -| TIMECOL | Scalar | String (default: 'time') | Time column name | -| GAP | Scalar | Long integer | Session gap threshold | - -#### 11.2.4 Returned Results - -The SESSION function returns: - -* `window_start`: Time of the first row in the session -* `window_end`: Time of the last row in the session -* Pass-through columns: All input columns from DATA - -#### 11.2.5 Usage Example - -```SQL -IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree mode's GROUP BY SESSION when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.3 VARIATION - -#### 11.3.1 Function Description - -The VARIATION function groups data based on value differences. The first row becomes the baseline for the first window. Subsequent rows are compared to the baseline—if the difference is within the threshold (DELTA), they join the current window; otherwise, a new window starts with that row as the new baseline. - -#### 11.3.2 Function Definition - -```sql -VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) -``` - -#### 11.3.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | ---------------------------- | -------------------------------------- | -| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | -| COL | Scalar | String | Column for difference calculation | -| DELTA | Scalar | Float | Difference threshold | - -#### 11.3.4 Returned Results - -The VARIATION function returns: - -* `window_index`: Window identifier -* Pass-through columns: All input columns from DATA - -#### 11.3.5 Usage Example - -```sql -IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- Equivalent to tree mode's GROUP BY VARIATION when combined with GROUP BY -IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.4 CAPACITY - -#### 11.4.1 Function Description - -The CAPACITY function groups data into fixed-size windows, where each window contains up to SIZE rows. - -#### 11.4.2 Function Definition - -```sql -CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) -``` - -#### 11.4.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | ---------------------------- | -------------------------------------- | -| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | -| SIZE | Scalar | Long integer | Window size (row count) | - -#### 11.4.4 Returned Results - -The CAPACITY function returns: - -* `window_index`: Window identifier -* Pass-through columns: All input columns from DATA - -#### 11.4.5 Usage Example - -```sql -IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- Equivalent to tree mode's GROUP BY COUNT when combined with GROUP BY -IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| start_time| end_time|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| -|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.5 TUMBLE - -#### 11.5.1 Function Description - -The TUMBLE function assigns each row to a non-overlapping, fixed-size time window based on a timestamp attribute. - -#### 11.5.2 Function Definition - -```sql -TUMBLE(data, timecol, size[, origin]) -``` -#### 11.5.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | --------------------------------- | ------------------------- | -| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | -| TIMECOL | Scalar | String (default: 'time') | Time column | -| SIZE | Scalar | Long integer (positive) | Window size | -| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | - -#### 11.5.4 Returned Results - -The TUMBLE function returns: - -* `window_start`: Window start time (inclusive) -* `window_end`: Window end time (exclusive) -* Pass-through columns: All input columns from DATA - -#### 11.5.5 Usage Example - -```SQL -IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.6 CUMULATE - -#### 11.6.1 Function Description - -The CUMULATE function creates expanding windows from an initial window, maintaining the same start time while incrementally extending the end time by STEP until reaching SIZE. Each window contains all elements within its range. For example, with a 1-hour STEP and 24-hour SIZE, daily windows would be: `[00:00, 01:00)`, `[00:00, 02:00)`, ..., `[00:00, 24:00)`. - -#### 11.6.2 Function Definition - -```sql -CUMULATE(data, timecol, size, step[, origin]) -``` - -#### 11.6.3 Parameter Description - -| Parameter | Type | Attributes | Description | -| ----------- | -------- | --------------------------------- | --------------------------------------------------- | -| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | -| TIMECOL | Scalar | String (default: 'time') | Time column | -| SIZE | Scalar | Long integer (positive) | Window size (must be an integer multiple of STEP) | -| STEP | Scalar | Long integer (positive) | Expansion step | -| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | - -> Note: An error `Cumulative table function requires size must be an integral multiple of step` occurs if SIZE is not divisible by STEP. - -#### 11.6.4 Returned Results - -The CUMULATE function returns: - -* `window_start`: Window start time (inclusive) -* `window_end`: Window end time (exclusive) -* Pass-through columns: All input columns from DATA - -#### 11.6.5 Usage Example - -```sql -IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/Basis-Function_apache.md b/src/UserGuide/latest-Table/SQL-Manual/Basis-Function_apache.md new file mode 100644 index 000000000..c6e11f5c9 --- /dev/null +++ b/src/UserGuide/latest-Table/SQL-Manual/Basis-Function_apache.md @@ -0,0 +1,2036 @@ + + +# Basic Functions + +## 1. Comparison Functions and Operators + +### 1.1 Basic Comparison Operators + +Comparison operators are used to compare two values and return the comparison result (`true` or `false`). + +| Operators | Description | +| :-------- | :----------------------- | +| < | Less than | +| > | Greater than | +| <= | Less than or equal to | +| >= | Greater than or equal to | +| = | Equal to | +| <> | Not equal to | +| != | Not equal to | + +#### 1.1.1 Comparison rules: + +1. All types can be compared with themselves. +2. Numeric types (INT32, INT64, FLOAT, DOUBLE, TIMESTAMP) can be compared with each other. +3. Character types (STRING, TEXT) can also be compared with each other. +4. Comparisons between types other than those mentioned above will result in an error. + +### 1.2 BETWEEN Operator + +1. The `BETWEEN `operator is used to determine whether a value falls within a specified range. +2. The `NOT BETWEEN` operator is used to determine whether a value does not fall within a specified range. +3. The `BETWEEN` and `NOT BETWEEN` operators can be used to evaluate any sortable type. +4. The value, minimum, and maximum parameters for `BETWEEN` and `NOT BETWEEN` must be of the same type, otherwise an error will occur. + +Syntax: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +Example 1 :BETWEEN + +```SQL +-- Query records where temperature is between 85.0 and 90.0 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +Example 2 : NOT BETWEEN + +``` +-- Query records where humidity is not between 35.0 and 40.0 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL Operator + +1. These operators apply to all data types. + +Example 1: Query records where temperature is NULL + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +Example 2: Query records where humidity is not NULL + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN Operator + +1. The `IN` operator can be used in the `WHERE `clause to compare a column with a list of values. +2. These values can be provided by a static array or scalar expressions. + +Syntax: + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +Example 1: Static array: Query records where region is 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region IN ('Beijing', 'Shanghai'); +--Equivalent to +SELECT * FROM region WHERE name = 'Beijing' OR name = 'Shanghai'; +``` + +Example 2: Scalar expression: Query records where temperature is among specific values + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +Example 3: Query records where region is not 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('Beijing', 'Shanghai'); +``` + +### 1.5 GREATEST and LEAST + +The `GREATEST` function returns the maximum value from a list of arguments, while the `LEAST` function returns the minimum value. The return type matches the input data type. + +Key Behaviors: +1. NULL Handling: Returns NULL if all arguments are NULL. +2. Parameter Requirements: Requires at least 2 arguments. +3. Type Constraints: All arguments must have the same data type. +4. Supported Types: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**Syntax:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**Examples:** + +```sql +-- Retrieve the maximum value between `temperature` and `humidity` in `table2` +SELECT GREATEST(temperature,humidity) FROM table2; + +-- Retrieve the minimum value between `temperature` and `humidity` in `table2` +SELECT LEAST(temperature,humidity) FROM table2; +``` + +## 2. Aggregate functions + +### 2.1 Overview + +1. Aggregate functions are many-to-one functions. They perform aggregate calculations on a set of values to obtain a single aggregate result. + +2. Except for `COUNT()`, all other aggregate functions ignore null values and return null when there are no input rows or all values are null. For example, `SUM()` returns null instead of zero, and `AVG()` does not include null values in the count. + +### 2.2 Supported Aggregate Functions + +| Function Name | Description | Allowed Input Types | Output Type | +|:-----------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------| +| COUNT | Counts the number of data points. | All types | INT64 | +| COUNT_IF | COUNT_IF(exp) counts the number of rows that satisfy a specified boolean expression. | `exp` must be a boolean expression,(e.g. `count_if(temperature>20)`) | INT64 | +| APPROX_COUNT_DISTINCT | The APPROX_COUNT_DISTINCT(x[, maxStandardError]) function provides an approximation of COUNT(DISTINCT x), returning the estimated number of distinct input values. | `x`: The target column to be calculated, supports all data types.
`maxStandardError` (optional): Specifies the maximum standard error allowed for the function's result. Valid range is [0.0040625, 0.26]. Defaults to 0.023 if not specified. | INT64 | +| APPROX_MOST_FREQUENT | The APPROX_MOST_FREQUENT(x, k, capacity) function is used to approximately calculate the top k most frequent elements in a dataset. It returns a JSON-formatted string where the keys are the element values and the values are their corresponding approximate frequencies. (Available since V2.0.5.1) | `x` : The column to be calculated, supporting all existing data types in IoTDB;
`k`: The number of top-k most frequent values to return;
`capacity`: The number of buckets used for computation, which relates to memory usage—a larger value reduces error but consumes more memory, while a smaller value increases error but uses less memory. | STRING | +| SUM | Calculates the sum. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | Calculates the average. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | Finds the maximum value. | All types | Same as input type | +| MIN | Finds the minimum value. | All types | Same as input type | +| FIRST | Finds the value with the smallest timestamp that is not NULL. | All types | Same as input type | +| LAST | Finds the value with the largest timestamp that is not NULL. | All types | Same as input type | +| STDDEV | Alias for STDDEV_SAMP, calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | Calculates the population standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | Calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | Alias for VAR_SAMP, calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | Calculates the population variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | Calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | Finds the value with the largest absolute value. If the largest absolute values of positive and negative values are equal, returns the positive value. | INT32 INT64 FLOAT DOUBLE | Same as input type | +| MODE | Finds the mode. Note: 1. There is a risk of memory exception when the number of distinct values in the input sequence is too large; 2. If all elements have the same frequency, i.e., there is no mode, a random element is returned; 3. If there are multiple modes, a random mode is returned; 4. NULL values are also counted in frequency, so even if not all values in the input sequence are NULL, the final result may still be NULL. | All types | Same as input type | +| MAX_BY | MAX_BY(x, y) finds the value of x corresponding to the maximum y in the binary input x and y. MAX_BY(time, x) returns the timestamp when x is at its maximum. | x and y can be of any type | Same as the data type of the first input x | +| MIN_BY | MIN_BY(x, y) finds the value of x corresponding to the minimum y in the binary input x and y. MIN_BY(time, x) returns the timestamp when x is at its minimum. | x and y can be of any type | Same as the data type of the first input x | +| FIRST_BY | FIRST_BY(x, y) finds the value of x in the same row when y is the first non-null value. | x and y can be of any type | Same as the data type of the first input x | +| LAST_BY | LAST_BY(x, y) finds the value of x in the same row when y is the last non-null value. | x and y can be of any type | Same as the data type of the first input x | + + +### 2.3 Examples + +#### 2.3.1 Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### 2.3.2 Count + +Counts the number of rows in the entire table and the number of non-null values in the `temperature` column. + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +The execution result is as follows: + +> Note: Only the COUNT function can be used with *, otherwise an error will occur. + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +Count `Non-Null` `arrival_time` Records in `table2` + +```sql +select count_if(arrival_time is not null) from table2; +``` + +The execution result is as follows: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +Retrieve the number of distinct values in the `temperature` column from `table1`. + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +The execution result is as follows: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +Query the ​​top 2 most frequent values​​ in the `temperature` column of `table1`. + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +The execution result is as follows: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +Finds the values with the smallest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +Finds the values with the largest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +Finds the `time` value of the row with the smallest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the smallest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +Queries the `time` value of the row with the largest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the largest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +Queries the `time` value of the row where the `temperature` column is at its maximum, and the `humidity` value of the row where the `temperature` column is at its maximum. + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +Queries the `time` value of the row where the `temperature` column is at its minimum, and the `humidity` value of the row where the `temperature` column is at its minimum. + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. Logical operators + +### 3.1 Overview + +Logical operators are used to combine conditions or negate conditions, returning a Boolean result (`true` or `false`). + +Below are the commonly used logical operators along with their descriptions: + +| Operator | Description | Example | +| :------- | :-------------------------------- | :------ | +| AND | True only if both values are true | a AND b | +| OR | True if either value is true | a OR b | +| NOT | True when the value is false | NOT a | + +### 3.2 Impact of NULL on Logical Operators + +#### 3.2.1 AND Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `AND` operator is `FALSE`, the expression result is `FALSE`. + +Examples: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `OR` operator is `TRUE`, the expression result is `TRUE`. + +Examples: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 Truth Table + +The following truth table illustrates how `NULL` is handled in `AND` and `OR` operators: + +| a | b | a AND b | a OR b | +| :---- | :---- | :------ | :----- | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT Operator + +The logical negation of `NULL` remains `NULL`. + +Example: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1 Truth Table + +The following truth table illustrates how `NULL` is handled in the `NOT` operator: + +| a | NOT a | +| :---- | :---- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + +## 4. Date and Time Functions and Operators + +### 4.1 now() -> Timestamp + +Returns the current timestamp. + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +The `date_bin` function is used for handling time data by rounding a timestamp (`Timestamp`) to the boundary of a specified time interval (`interval`). + +#### **Syntax:** + +```SQL +-- Calculates the time interval starting from timestamp 0 and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source) + +-- Calculates the time interval starting from the origin timestamp and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source,origin) + +--Supported time units for interval: +--Years (y), months (mo), weeks (week), days (d), hours (h), minutes (M), seconds (s), milliseconds (ms), microseconds (µs), nanoseconds (ns). +--source: Must be of timestamp type. +``` + +#### **Parameters**: + +| Parameter | Description | +| :-------- | :----------------------------------------------------------- | +| interval | 1. Time interval 2. Supported units: `y`, `mo`, `week`, `d`, `h`, `M`, `s`, `ms`, `µs`, `ns`. | +| source | 1. The timestamp column or expression to be calculated. 2. Must be of timestamp type. | +| origin | The reference timestamp. | + +#### 4.2.1Syntax Rules : + +1. If `origin` is not specified, the default reference timestamp is `1970-01-01T00:00:00Z` (Beijing time: `1970-01-01 08:00:00`). +2. `interval` must be a non-negative number with a time unit. If `interval` is `0ms`, the function returns `source` directly without calculation. +3. If `origin` or `source` is negative, it represents a time point before the epoch. `date_bin` will calculate and return the relevant time period. +4. If `source` is `null`, the function returns `null`. +5. Mixing months and non-month time units (e.g., `1 MONTH 1 DAY`) is not supported due to ambiguity. + +> For example, if the starting point is **April 30, 2000**, calculating `1 DAY` first and then `1 MONTH` results in **June 1, 2000**, whereas calculating `1 MONTH` first and then `1 DAY` results in **May 31, 2000**. The resulting dates are different. + +#### 4.2.2 Examples + +##### Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### Example 1: Without Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +#### Example 2: Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +#### Example 3: Negative Origin + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +#### Example 4: Interval of 0 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +#### Example 5: Source is NULL + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract Function + +This function is used to extract the value of a specific part of a date. (Supported from version V2.0.6) + +#### 4.3.1 Syntax Definition + +```SQL +EXTRACT (identifier FROM expression) +``` + +* Parameter Description + * **expression**: `TIMESTAMP` type or a time constant + * **identifier**: The valid ranges and corresponding return value types are shown in the table below. + + | Valid Range | Return Type | Return Range | + |----------------------|---------------|--------------------| + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 Usage Example + +Using table1 from the [Sample Data](../Reference/Sample-Data.md) as the source data, query the average temperature for the first 12 hours of each day within a certain period. + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +Introduction to the `Format` function: [Format Function](../SQL-Manual/Basis-Function_apache.md#_7-2-format-function) + +Introduction to the `Date_bin` function: [Date_bin Funtion](../SQL-Manual/Basis-Function_apache.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. Mathematical Functions and Operators + +### 5.1 Mathematical Operators + +| **Operator** | **Description** | +| :----------- | :---------------------------------------------- | +| + | Addition | +| - | Subtraction | +| * | Multiplication | +| / | Division (integer division performs truncation) | +| % | Modulus (remainder) | +| - | Negation | + +### 5.2 Mathematical functions + +| Function Name | Description | Input | Output | Usage | +|:--------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------|:-------------------| :--------- | +| sin | Sine | double, float, INT64, INT32 | double | sin(x) | +| cos | Cosine | double, float, INT64, INT32 | double | cos(x) | +| tan | Tangent | double, float, INT64, INT32 | double | tan(x) | +| asin | Inverse Sine | double, float, INT64, INT32 | double | asin(x) | +| acos | Inverse Cosine | double, float, INT64, INT32 | double | acos(x) | +| atan | Inverse Tangent | double, float, INT64, INT32 | double | atan(x) | +| sinh | Hyperbolic Sine | double, float, INT64, INT32 | double | sinh(x) | +| cosh | Hyperbolic Cosine | double, float, INT64, INT32 | double | cosh(x) | +| tanh | Hyperbolic Tangent | double, float, INT64, INT32 | double | tanh(x) | +| degrees | Converts angle `x` in radians to degrees | double, float, INT64, INT32 | double | degrees(x) | +| radians | Radian Conversion from Degrees | double, float, INT64, INT32 | double | radians(x) | +| abs | Absolute Value | double, float, INT64, INT32 | Same as input type | abs(x) | +| sign | Returns the sign of `x`: - If `x = 0`, returns `0` - If `x > 0`, returns `1` - If `x < 0`, returns `-1` For `double/float` inputs: - If `x = NaN`, returns `NaN` - If `x = +Infinity`, returns `1.0` - If `x = -Infinity`, returns `-1.0` | double, float, INT64, INT32 | Same as input type | sign(x) | +| ceil | Rounds `x` up to the nearest integer | double, float, INT64, INT32 | double | ceil(x) | +| floor | Rounds `x` down to the nearest integer | double, float, INT64, INT32 | double | floor(x) | +| exp | Returns `e^x` (Euler's number raised to the power of `x`) | double, float, INT64, INT32 | double | exp(x) | +| ln | Returns the natural logarithm of `x` | double, float, INT64, INT32 | double | ln(x) | +| log10 | Returns the base 10 logarithm of `x` | double, float, INT64, INT32 | double | log10(x) | +| round | Rounds `x` to the nearest integer | double, float, INT64, INT32 | double | round(x) | +| round | Rounds `x` to `d` decimal places | double, float, INT64, INT32 | double | round(x, d) | +| sqrt | Returns the square root of `x`. | double, float, INT64, INT32 | double | sqrt(x) | +| e | Returns Euler’s number `e`. | | double | e() | +| pi | Pi (π) | | double | pi() | + +## 6. Bitwise Functions + +> Supported from version V2.0.6 + +Example raw data is as follows: + +``` +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +-- Table creation statement +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +-- Write data +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +The `bit_count(num, bits)`function is used to count the number of 1s in the binary representation of the integer `num`under the specified bit width `bits`. + +#### 6.1.1 Syntax Definition + +``` +bit_count(num, bits) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * **​num:​**​ Any integer value (int32 or int64) + * **​bits:​**​ Integer value, with a valid range of 2\~64 + +Note: An error will be raised if the number of `bits`is insufficient to represent `num`(using ​**two's complement signed representation**​): `Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* Usage Methods + + * Two specific numbers: `bit_count(9, 64)` + * Column and a number: `bit_count(column1, 64)` + * Between two columns: `bit_count(column1, column2)` + +#### 6.1.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- Two specific numbers +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +-- Column and a number +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +-- Insufficient bits +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +The `bitwise_and(x, y)`function performs a logical AND operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise AND operation result. + +#### 6.2.1 Syntax Definition + +``` +bitwise_and(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_and(19, 25)` + * Column and a number: `bitwise_and(column1, 25)` + * Between two columns: `bitwise_and(column1, column2)` + +#### 6.2.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +The `bitwise_not(x)`function performs a logical NOT operation on each bit of the integer x based on its two's complement representation, and returns the bitwise NOT operation result. + +#### 6.3.1 Syntax Definition + +``` +bitwise_not(x) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x**​: Must be an integer value of data type Int32 or Int64 +* Usage Methods + + * Specific number: `bitwise_not(5)` + * Single column operation: `bitwise_not(column1)` + +#### 6.3.2 Usage Examples + +``` +-- Specific number +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- Single column +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +The `bitwise_or(x,y)`function performs a logical OR operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise OR operation result. + +#### 6.4.1 Syntax Definition + +``` +bitwise_or(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_or(19, 25)` + * Column and a number: `bitwise_or(column1, 25)` + * Between two columns: `bitwise_or(column1, column2)` + +#### 6.4.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +The `bitwise_xor(x,y)`function performs a logical XOR (exclusive OR) operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise XOR operation result. XOR rule: same bits result in 0, different bits result in 1. + +#### 6.5.1 Syntax Definition + +``` +bitwise_xor(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_xor(19, 25)` + * Column and a number: `bitwise_xor(column1, 25)` + * Between two columns: `bitwise_xor(column1, column2)` + +#### 6.5.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +The `bitwise_left_shift(value, shift)`function returns the result of shifting the binary representation of integer `value`left by `shift`bits. The left shift operation moves bits towards the higher-order direction, filling the vacated lower-order bits with 0s, and discarding the higher-order bits that overflow. Equivalent to: `value << shift`. + +#### 6.6.1 Syntax Definition + +``` +bitwise_left_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift left. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_left_shift(1, 2)` + * Column and a number: `bitwise_left_shift(column1, 2)` + * Between two columns: `bitwise_left_shift(column1, column2)` + +#### 6.6.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +The `bitwise_right_shift(value, shift)`function returns the result of logically (unsigned) right shifting the binary representation of integer `value`by `shift`bits. The logical right shift operation moves bits towards the lower-order direction, filling the vacated higher-order bits with 0s, and discarding the lower-order bits that overflow. + +#### 6.7.1 Syntax Definition + +``` +bitwise_right_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_right_shift(8, 3)` + * Column and a number: `bitwise_right_shift(column1, 3)` + * Between two columns: `bitwise_right_shift(column1, column2)` + +#### 6.7.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +The `bitwise_right_shift_arithmetic(value, shift)`function returns the result of arithmetically right shifting the binary representation of integer `value`by `shift`bits. The arithmetic right shift operation moves bits towards the lower-order direction, discarding the lower-order bits that overflow, and filling the vacated higher-order bits with the sign bit (0 for positive numbers, 1 for negative numbers) to preserve the sign of the number. + +#### 6.8.1 Syntax Definition + +``` +bitwise_right_shift_arithmetic(value, shift) -> [same as value]-- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods: + + * Two specific numbers: `bitwise_right_shift_arithmetic(12, 2)` + * Column and a number: `bitwise_right_shift_arithmetic(column1, 64)` + * Between two columns: `bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + + +## 7. Conditional Expressions + +### 7.1 CASE + +CASE expressions come in two forms: **Simple CASE** and **Searched CASE**. + +#### 7.1.1 Simple CASE + +The simple form evaluates each value expression from left to right until it finds a match with the given expression: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If a matching value is found, the corresponding result is returned. If no match is found, the result from the `ELSE` clause (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 Searched CASE + +The searched form evaluates each Boolean condition from left to right until a `TRUE` condition is found, then returns the corresponding result: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If no condition evaluates to `TRUE`, the `ELSE` clause result (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE + +Returns the first non-null value from the given list of parameters. + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. Conversion Functions + +### 8.1 Conversion Functions + +#### 8.1.1 cast(value AS type) → type + +Explicitly converts a value to the specified type. This can be used to convert strings (`VARCHAR`) to numeric types or numeric values to string types. Starting from V2.0.8-beta, OBJECT type can be explicitly cast to STRING type. + +If the conversion fails, a runtime error is thrown. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +Similar to `CAST()`. If the conversion fails, returns `NULL` instead of throwing an error. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format Function + +This function generates and returns a formatted string based on a specified format string and input arguments. Similar to Java’s `String.format` or C’s `printf`, it allows developers to construct dynamic string templates using placeholder syntax. Predefined format specifiers in the template are replaced precisely with corresponding argument values, producing a complete string that adheres to specific formatting requirements. + +#### 8.2.1 Syntax + +```SQL +format(pattern, ...args) -> STRING +``` + +**Parameters** + +* `pattern`: A format string containing static text and one or more format specifiers (e.g., `%s`, `%d`), or any expression returning a `STRING`/`TEXT` type. +* `args`: Input arguments to replace format specifiers. Constraints: + * Number of arguments ≥ 1. + * Multiple arguments must be comma-separated (e.g., `arg1, arg2`). + * Total arguments can exceed the number of specifiers in `pattern` but cannot be fewer, otherwise an exception is triggered. + +**Return Value** + +* Formatted result string of type `STRING`. + +#### 8.2.2 Usage Examples + +1. Format Floating-Point Numbers + ```SQL + IoTDB:database1> SELECT format('%.5f', humidity) FROM table1 WHERE humidity = 35.4; + +--------+ + | _col0| + +--------+ + |35.40000| + +--------+ + ``` +2. Format Integers + ```SQL + IoTDB:database1> SELECT format('%03d', 8) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + | 008| + +-----+ + ``` +3. Format Dates and Timestamps + +* Locale-Specific Date + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) FROM table1 LIMIT 1; ++--------------------+ +| _col0| ++--------------------+ +|Monday, January 1, 2024| ++--------------------+ +``` + +* Remove Timezone Information + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* Second-Level Timestamp Precision + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* Date/Time Format Symbols + +| **Symbol** | **​ Description** | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'H' | 24-hour format (two digits, zero-padded), i.e. 00 - 23 | +| 'I' | 12-hour format (two digits, zero-padded), i.e. 01 - 12 | +| 'k' | 24-hour format (no padding), i.e. 0 - 23 | +| 'l' | 12-hour format (no padding), i.e. 1 - 12 | +| 'M' | Minute (two digits, zero-padded), i.e. 00 - 59 | +| 'S' | Second (two digits, zero-padded; supports leap seconds), i.e. 00 - 60 | +| 'L' | Millisecond (three digits, zero-padded), i.e. 000 - 999 | +| 'N' | Nanosecond (nine digits, zero-padded), i.e. 000000000 - 999999999。 | +| 'p' | Locale-specific lowercase AM/PM marker (e.g., "am", "pm"). Prefix with`T`to force uppercase (e.g., "AM"). | +| 'z' | RFC 822 timezone offset from GMT (e.g.,`-0800`). Adjusts for daylight saving. Uses the JVM's default timezone for`long`/`Long`/`Date`. | +| 'Z' | Timezone abbreviation (e.g., "PST"). Adjusts for daylight saving. Uses the JVM's default timezone; Formatter's timezone overrides the argument's timezone if specified. | +| 's' | Seconds since Unix epoch (1970-01-01 00:00:00 UTC), i.e. Long.MIN\_VALUE/1000 to Long.MAX\_VALUE/1000。 | +| 'Q' | Milliseconds since Unix epoch, i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* Common Date/Time Conversion Characters + +| **Symbol** | **​ Description** | +| ---------------- | -------------------------------------------------------------------- | +| 'B' | Locale-specific full month name, for example "January", "February" | +| 'b' | Locale-specific abbreviated month name, for example "Jan", "Feb" | +| 'h' | Same as`b` | +| 'A' | Locale-specific full weekday name, for example "Sunday", "Monday" | +| 'a' | Locale-specific short weekday name, for example "Sun", "Mon" | +| 'C' | Year divided by 100 (two digits, zero-padded) | +| 'Y' | Year (minimum 4 digits, zero-padded) | +| 'y' | Last two digits of year (zero-padded) | +| 'j' | Day of year (three digits, zero-padded) | +| 'm' | Month (two digits, zero-padded) | +| 'd' | Day of month (two digits, zero-padded) | +| 'e' | Day of month (no padding) | + +4. Format Strings + ```SQL + IoTDB:database1> SELECT format('The measurement status is: %s', status) FROM table2 LIMIT 1; + +-------------------------------+ + | _col0| + +-------------------------------+ + |The measurement status is: true| + +-------------------------------+ + ``` +5. Format Percentage Sign + ```SQL + IoTDB:database1> SELECT format('%s%%', 99.9) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + |99.9%| + +-----+ + ``` + +#### 8.2.3 Format Conversion Failure Scenarios + +1. Type Mismatch Errors + +* Timestamp Type Conflict + + If the format specifier includes time-related tokens (e.g., `%Y-%m-%d`) but the argument: + + * Is a non-`DATE`/`TIMESTAMP` type value. ◦ + * Requires sub-day precision (e.g., `%H`, `%M`) but the argument is not `TIMESTAMP`. + +```SQL +-- Example 1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- Example 2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* Floating-Point Type Conflict + + Using `%f` with non-numeric arguments (e.g., strings or booleans): + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. Argument Count Mismatch + The number of arguments must equal or exceed the number of format specifiers. + + ```SQL + IoTDB:database1> SELECT format('%.5f %03d', humidity) FROM table1 WHERE humidity = 35.4; + Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') + ``` +3. Invalid Invocation Errors + + Triggered if: + + * Total arguments < 2 (must include `pattern` and at least one argument).• + * `pattern` is not of type `STRING`/`TEXT`. + +```SQL +-- Example 1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--Example 2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + +## 9. String Functions and Operators + +### 9.1 String operators + +#### 9.1.1 || Operator + +The `||` operator is used for string concatenation and functions the same as the `concat` function. + +#### 9.1.2 LIKE Statement + + The `LIKE` statement is used for pattern matching. For detailed usage, refer to Pattern Matching:[LIKE](#1-like-运算符). + +### 9.2 String Functions + +| Function Name | Description | Input | Output | Usage | +| :------------ |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------| :------ | :----------------------------------------------------------- | +| `length` | Returns the number of characters in a string (not byte length). | `string` (the string whose length is to be calculated) | INT32 | length(string) | +| `upper` | Converts all letters in a string to uppercase. | string | String | upper(string) | +| `lower` | Converts all letters in a string to lowercase. | string | String | lower(string) | +| `trim` | Removes specified leading and/or trailing characters from a string. **Parameters:** - `specification` (optional): Specifies which side to trim: - `BOTH`: Removes characters from both sides (default). - `LEADING`: Removes characters from the beginning. - `TRAILING`: Removes characters from the end. - `trimcharacter` (optional): Character to be removed (default is whitespace). - `string`: The target string. | string | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) Example:`trim('!' FROM '!foo!');` —— `'foo'` | +| `strpos` | Returns the position of the first occurrence of `subStr` in `sourceStr`. **Notes:** - Position starts at `1`. - Returns `0` if `subStr` is not found. - Positioning is based on characters, not byte arrays. | `sourceStr` (string to be searched), `subStr` (substring to find) | INT32 | strpos(sourceStr, subStr) | +| `starts_with` | Checks if `sourceStr` starts with the specified `prefix`. | `sourceStr`, `prefix` | Boolean | starts_with(sourceStr, prefix) | +| `ends_with` | Checks if `sourceStr` ends with the specified `suffix`. | `sourceStr`, `suffix` | Boolean | ends_with(sourceStr, suffix) | +| `concat` | Concatenates `string1, string2, ..., stringN`. Equivalent to the `\|\|` operator. | `string`, `text` | String | concat(str1, str2, ...) or str1 \|\| str2 ... | +| `strcmp` | Compares two strings lexicographically. **Returns:** - `-1` if `str1 < str2` - `0` if `str1 = str2` - `1` if `str1 > str2` - `NULL` if either `str1` or `str2` is `NULL` | `string1`, `string2` | INT32 | strcmp(str1, str2) | +| `replace` | Removes all occurrences of `search` in `string`. | `string`, `search` | String | replace(string, search) | +| `replace` | Replaces all occurrences of `search` in `string` with `replace`. | `string`, `search`, `replace` | String | replace(string, search, replace) | +| `substring` | Extracts a substring from `start_index` to the end of the string. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. | `string`, `start_index` | String | substring(string from start_index)or substring(string, start_index) | +| `substring` | Extracts a substring of `length` characters starting from `start_index`. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. - Throws an error if `length` is negative. - If `start_index + length` exceeds `int.MAX`, an overflow error may occur. | `string`, `start_index`, `length` | String | substring(string from start_index for length) or substring(string, start_index, length) | + +## 10. Pattern Matching Functions + +### 10.1 LIKE + +#### 10.1.1 Usage + +The `LIKE `operator is used to compare a value with a pattern. It is commonly used in the `WHERE `clause to match specific patterns within strings. + +#### 10.1.2 Syntax + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 Match rules + +- Matching characters is case-sensitive +- The pattern supports two wildcard characters: + - `_` matches any single character + - `%` matches zero or more characters + +#### 10.1.4 Notes + +- `LIKE` pattern matching applies to the entire string by default. Therefore, if it's desired to match a sequence anywhere within a string, the pattern must start and end with a percent sign. +- To match the escape character itself, double it (e.g., `\\` to match `\`). For example, you can use `\\` to match for `\`. + +#### 10.1.5 Examples + +#### **Example 1: Match Strings Starting with a Specific Character** + +- **Description:** Find all names that start with the letter `E` (e.g., `Europe`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +#### **Example 2: Exclude a Specific Pattern** + +- **Description:** Find all names that do **not** start with the letter `E`. + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +#### **Example 3: Match Strings of a Specific Length** + +- **Description:** Find all names that start with `A`, end with `a`, and have exactly two characters in between (e.g., `Asia`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +#### **Example 4: Escape Special Characters** + +- **Description:** Find all names that start with `South_` (e.g., `South_America`). The underscore (`_`) is a wildcard character, so it needs to be escaped using `\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +#### **Example 5: Match the Escape Character Itself** + +- **Description:** Find all names that start with 'South\'. Since `\` is the escape character, it must be escaped using `\\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like + +#### 10.2.1 Usage + +Evaluates whether the regular expression pattern is present within the given string. + +#### 10.2.2 Syntax + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 Notes + +- The pattern for `regexp_like` only needs to be contained within the string, and does not need to match the entire string. +- To match the entire string, use the `^` and `$` anchors. +- `^` signifies the "start of the string," and `$` signifies the "end of the string." +- Regular expressions use the Java-defined regular syntax, but there are the following exceptions to be aware of: + - Multiline mode + 1. Enabled by: `(?m)`. + 2. Recognizes only `\n` as the line terminator. + 3. Does not support the `(?d)` flag, and its use is prohibited. + - Case-insensitive matching + 1. Enabled by: `(?i)`. + 2. Based on Unicode rules, it does not support context-dependent and localized matching. + 3. Does not support the `(?u)` flag, and its use is prohibited. + - Character classes + 1. Within character classes (e.g., `[A-Z123]`), `\Q` and `\E` are not supported and are treated as literals. + - Unicode character classes (`\p{prop}`) + 1. Underscores in names: All underscores in names must be removed (e.g., `OldItalic `instead of `Old_Italic`). + 2. Scripts: Specify directly, without the need for `Is`, `script=`, or `sc=` prefixes (e.g., `\p{Hiragana}`). + 3. Blocks: Must use the `In` prefix, `block=` or `blk=` prefixes are not supported (e.g., `\p{InMongolian}`). + 4. Categories: Specify directly, without the need for `Is`, `general_category=`, or `gc=` prefixes (e.g., `\p{L}`). + 5. Binary properties: Specify directly, without `Is` (e.g., `\p{NoncharacterCodePoint}`). + +#### 10.2.4 Examples + +#### Example 1: **Matching strings containing a specific pattern** + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **Explanation**: Determines whether the string '1a 2b 14m' contains a substring that matches the pattern `\d+b`. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - In `'1a 2b 14m'`, the substring `'2b'` matches this pattern, so it returns `true`. + + +#### **Example 2: Matching the entire string** + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **Explanation**: Checks if the string `'1a 2b 14m'` matches the pattern `^\\d+b$` exactly. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - `'1a 2b 14m'` does not match this pattern because it does not start with digits and does not end with `b`, so it returns `false`. + +## 11. Timeseries Windowing Functions + +The sample data is as follows: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- Create table statement +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- Insert data +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 Function Description + +The HOP function segments data into overlapping time windows for analysis, assigning each row to all windows that overlap with its timestamp. If windows overlap (when SLIDE < SIZE), data will be duplicated across multiple windows. + +#### 11.1.2 Function Definition + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer | Window size | +| SLIDE | Scalar | Long integer | Sliding step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + + +#### 11.1.4 Returned Results + +The HOP function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.1.5 Usage Example + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 Function Description + +The SESSION function groups data into sessions based on time intervals. It checks the time gap between consecutive rows—rows with gaps smaller than the threshold (GAP) are grouped into the current window, while larger gaps trigger a new window. + +#### 11.2.2 Function Definition + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| TIMECOL | Scalar | String (default: 'time') | Time column name | +| GAP | Scalar | Long integer | Session gap threshold | + +#### 11.2.4 Returned Results + +The SESSION function returns: + +* `window_start`: Time of the first row in the session +* `window_end`: Time of the last row in the session +* Pass-through columns: All input columns from DATA + +#### 11.2.5 Usage Example + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY SESSION when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 Function Description + +The VARIATION function groups data based on value differences. The first row becomes the baseline for the first window. Subsequent rows are compared to the baseline—if the difference is within the threshold (DELTA), they join the current window; otherwise, a new window starts with that row as the new baseline. + +#### 11.3.2 Function Definition + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| COL | Scalar | String | Column for difference calculation | +| DELTA | Scalar | Float | Difference threshold | + +#### 11.3.4 Returned Results + +The VARIATION function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.3.5 Usage Example + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY VARIATION when combined with GROUP BY +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 Function Description + +The CAPACITY function groups data into fixed-size windows, where each window contains up to SIZE rows. + +#### 11.4.2 Function Definition + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| SIZE | Scalar | Long integer | Window size (row count) | + +#### 11.4.4 Returned Results + +The CAPACITY function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.4.5 Usage Example + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY COUNT when combined with GROUP BY +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 Function Description + +The TUMBLE function assigns each row to a non-overlapping, fixed-size time window based on a timestamp attribute. + +#### 11.5.2 Function Definition + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +#### 11.5.4 Returned Results + +The TUMBLE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.5.5 Usage Example + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 Function Description + +The CUMULATE function creates expanding windows from an initial window, maintaining the same start time while incrementally extending the end time by STEP until reaching SIZE. Each window contains all elements within its range. For example, with a 1-hour STEP and 24-hour SIZE, daily windows would be: `[00:00, 01:00)`, `[00:00, 02:00)`, ..., `[00:00, 24:00)`. + +#### 11.6.2 Function Definition + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | --------------------------------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size (must be an integer multiple of STEP) | +| STEP | Scalar | Long integer (positive) | Expansion step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +> Note: An error `Cumulative table function requires size must be an integral multiple of step` occurs if SIZE is not divisible by STEP. + +#### 11.6.4 Returned Results + +The CUMULATE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.6.5 Usage Example + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/Basis-Function_timecho.md b/src/UserGuide/latest-Table/SQL-Manual/Basis-Function_timecho.md new file mode 100644 index 000000000..7f354dc89 --- /dev/null +++ b/src/UserGuide/latest-Table/SQL-Manual/Basis-Function_timecho.md @@ -0,0 +1,2037 @@ + + + +# Basic Functions + +## 1. Comparison Functions and Operators + +### 1.1 Basic Comparison Operators + +Comparison operators are used to compare two values and return the comparison result (`true` or `false`). + +| Operators | Description | +| :-------- | :----------------------- | +| < | Less than | +| > | Greater than | +| <= | Less than or equal to | +| >= | Greater than or equal to | +| = | Equal to | +| <> | Not equal to | +| != | Not equal to | + +#### 1.1.1 Comparison rules: + +1. All types can be compared with themselves. +2. Numeric types (INT32, INT64, FLOAT, DOUBLE, TIMESTAMP) can be compared with each other. +3. Character types (STRING, TEXT) can also be compared with each other. +4. Comparisons between types other than those mentioned above will result in an error. + +### 1.2 BETWEEN Operator + +1. The `BETWEEN `operator is used to determine whether a value falls within a specified range. +2. The `NOT BETWEEN` operator is used to determine whether a value does not fall within a specified range. +3. The `BETWEEN` and `NOT BETWEEN` operators can be used to evaluate any sortable type. +4. The value, minimum, and maximum parameters for `BETWEEN` and `NOT BETWEEN` must be of the same type, otherwise an error will occur. + +Syntax: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +Example 1 :BETWEEN + +```SQL +-- Query records where temperature is between 85.0 and 90.0 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +Example 2 : NOT BETWEEN + +``` +-- Query records where humidity is not between 35.0 and 40.0 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL Operator + +1. These operators apply to all data types. + +Example 1: Query records where temperature is NULL + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +Example 2: Query records where humidity is not NULL + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN Operator + +1. The `IN` operator can be used in the `WHERE `clause to compare a column with a list of values. +2. These values can be provided by a static array or scalar expressions. + +Syntax: + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +Example 1: Static array: Query records where region is 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region IN ('Beijing', 'Shanghai'); +--Equivalent to +SELECT * FROM region WHERE name = 'Beijing' OR name = 'Shanghai'; +``` + +Example 2: Scalar expression: Query records where temperature is among specific values + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +Example 3: Query records where region is not 'Beijing' or 'Shanghai' + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('Beijing', 'Shanghai'); +``` + +### 1.5 GREATEST and LEAST + +The `GREATEST` function returns the maximum value from a list of arguments, while the `LEAST` function returns the minimum value. The return type matches the input data type. + +Key Behaviors: +1. NULL Handling: Returns NULL if all arguments are NULL. +2. Parameter Requirements: Requires at least 2 arguments. +3. Type Constraints: All arguments must have the same data type. +4. Supported Types: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**Syntax:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**Examples:** + +```sql +-- Retrieve the maximum value between `temperature` and `humidity` in `table2` +SELECT GREATEST(temperature,humidity) FROM table2; + +-- Retrieve the minimum value between `temperature` and `humidity` in `table2` +SELECT LEAST(temperature,humidity) FROM table2; +``` + +## 2. Aggregate functions + +### 2.1 Overview + +1. Aggregate functions are many-to-one functions. They perform aggregate calculations on a set of values to obtain a single aggregate result. + +2. Except for `COUNT()`, all other aggregate functions ignore null values and return null when there are no input rows or all values are null. For example, `SUM()` returns null instead of zero, and `AVG()` does not include null values in the count. + +### 2.2 Supported Aggregate Functions + +| Function Name | Description | Allowed Input Types | Output Type | +|:-----------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------| +| COUNT | Counts the number of data points. | All types | INT64 | +| COUNT_IF | COUNT_IF(exp) counts the number of rows that satisfy a specified boolean expression. | `exp` must be a boolean expression,(e.g. `count_if(temperature>20)`) | INT64 | +| APPROX_COUNT_DISTINCT | The APPROX_COUNT_DISTINCT(x[, maxStandardError]) function provides an approximation of COUNT(DISTINCT x), returning the estimated number of distinct input values. | `x`: The target column to be calculated, supports all data types.
`maxStandardError` (optional): Specifies the maximum standard error allowed for the function's result. Valid range is [0.0040625, 0.26]. Defaults to 0.023 if not specified. | INT64 | +| APPROX_MOST_FREQUENT | The APPROX_MOST_FREQUENT(x, k, capacity) function is used to approximately calculate the top k most frequent elements in a dataset. It returns a JSON-formatted string where the keys are the element values and the values are their corresponding approximate frequencies. (Available since V2.0.5.1) | `x` : The column to be calculated, supporting all existing data types in IoTDB;
`k`: The number of top-k most frequent values to return;
`capacity`: The number of buckets used for computation, which relates to memory usage—a larger value reduces error but consumes more memory, while a smaller value increases error but uses less memory. | STRING | +| SUM | Calculates the sum. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | Calculates the average. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | Finds the maximum value. | All types | Same as input type | +| MIN | Finds the minimum value. | All types | Same as input type | +| FIRST | Finds the value with the smallest timestamp that is not NULL. | All types | Same as input type | +| LAST | Finds the value with the largest timestamp that is not NULL. | All types | Same as input type | +| STDDEV | Alias for STDDEV_SAMP, calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | Calculates the population standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | Calculates the sample standard deviation. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | Alias for VAR_SAMP, calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | Calculates the population variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | Calculates the sample variance. | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | Finds the value with the largest absolute value. If the largest absolute values of positive and negative values are equal, returns the positive value. | INT32 INT64 FLOAT DOUBLE | Same as input type | +| MODE | Finds the mode. Note: 1. There is a risk of memory exception when the number of distinct values in the input sequence is too large; 2. If all elements have the same frequency, i.e., there is no mode, a random element is returned; 3. If there are multiple modes, a random mode is returned; 4. NULL values are also counted in frequency, so even if not all values in the input sequence are NULL, the final result may still be NULL. | All types | Same as input type | +| MAX_BY | MAX_BY(x, y) finds the value of x corresponding to the maximum y in the binary input x and y. MAX_BY(time, x) returns the timestamp when x is at its maximum. | x and y can be of any type | Same as the data type of the first input x | +| MIN_BY | MIN_BY(x, y) finds the value of x corresponding to the minimum y in the binary input x and y. MIN_BY(time, x) returns the timestamp when x is at its minimum. | x and y can be of any type | Same as the data type of the first input x | +| FIRST_BY | FIRST_BY(x, y) finds the value of x in the same row when y is the first non-null value. | x and y can be of any type | Same as the data type of the first input x | +| LAST_BY | LAST_BY(x, y) finds the value of x in the same row when y is the last non-null value. | x and y can be of any type | Same as the data type of the first input x | + + +### 2.3 Examples + +#### 2.3.1 Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### 2.3.2 Count + +Counts the number of rows in the entire table and the number of non-null values in the `temperature` column. + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +The execution result is as follows: + +> Note: Only the COUNT function can be used with *, otherwise an error will occur. + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +Count `Non-Null` `arrival_time` Records in `table2` + +```sql +select count_if(arrival_time is not null) from table2; +``` + +The execution result is as follows: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +Retrieve the number of distinct values in the `temperature` column from `table1`. + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +The execution result is as follows: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +Query the ​​top 2 most frequent values​​ in the `temperature` column of `table1`. + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +The execution result is as follows: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +Finds the values with the smallest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +Finds the values with the largest timestamp that are not NULL in the `temperature` and `humidity` columns. + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +The execution result is as follows: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +Finds the `time` value of the row with the smallest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the smallest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +Queries the `time` value of the row with the largest timestamp that is not NULL in the `temperature` column, and the `humidity` value of the row with the largest timestamp that is not NULL in the `temperature` column. + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +Queries the `time` value of the row where the `temperature` column is at its maximum, and the `humidity` value of the row where the `temperature` column is at its maximum. + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +Queries the `time` value of the row where the `temperature` column is at its minimum, and the `humidity` value of the row where the `temperature` column is at its minimum. + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +The execution result is as follows: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. Logical operators + +### 3.1 Overview + +Logical operators are used to combine conditions or negate conditions, returning a Boolean result (`true` or `false`). + +Below are the commonly used logical operators along with their descriptions: + +| Operator | Description | Example | +| :------- | :-------------------------------- | :------ | +| AND | True only if both values are true | a AND b | +| OR | True if either value is true | a OR b | +| NOT | True when the value is false | NOT a | + +### 3.2 Impact of NULL on Logical Operators + +#### 3.2.1 AND Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `AND` operator is `FALSE`, the expression result is `FALSE`. + +Examples: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR Operator + +- If one or both sides of the expression are `NULL`, the result may be `NULL`. +- If one side of the `OR` operator is `TRUE`, the expression result is `TRUE`. + +Examples: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 Truth Table + +The following truth table illustrates how `NULL` is handled in `AND` and `OR` operators: + +| a | b | a AND b | a OR b | +| :---- | :---- | :------ | :----- | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT Operator + +The logical negation of `NULL` remains `NULL`. + +Example: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1 Truth Table + +The following truth table illustrates how `NULL` is handled in the `NOT` operator: + +| a | NOT a | +| :---- | :---- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + +## 4. Date and Time Functions and Operators + +### 4.1 now() -> Timestamp + +Returns the current timestamp. + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +The `date_bin` function is used for handling time data by rounding a timestamp (`Timestamp`) to the boundary of a specified time interval (`interval`). + +#### **Syntax:** + +```SQL +-- Calculates the time interval starting from timestamp 0 and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source) + +-- Calculates the time interval starting from the origin timestamp and returns the nearest interval boundary to the specified timestamp. +date_bin(interval,source,origin) + +--Supported time units for interval: +--Years (y), months (mo), weeks (week), days (d), hours (h), minutes (M), seconds (s), milliseconds (ms), microseconds (µs), nanoseconds (ns). +--source: Must be of timestamp type. +``` + +#### **Parameters**: + +| Parameter | Description | +| :-------- | :----------------------------------------------------------- | +| interval | 1. Time interval 2. Supported units: `y`, `mo`, `week`, `d`, `h`, `M`, `s`, `ms`, `µs`, `ns`. | +| source | 1. The timestamp column or expression to be calculated. 2. Must be of timestamp type. | +| origin | The reference timestamp. | + +#### 4.2.1Syntax Rules : + +1. If `origin` is not specified, the default reference timestamp is `1970-01-01T00:00:00Z` (Beijing time: `1970-01-01 08:00:00`). +2. `interval` must be a non-negative number with a time unit. If `interval` is `0ms`, the function returns `source` directly without calculation. +3. If `origin` or `source` is negative, it represents a time point before the epoch. `date_bin` will calculate and return the relevant time period. +4. If `source` is `null`, the function returns `null`. +5. Mixing months and non-month time units (e.g., `1 MONTH 1 DAY`) is not supported due to ambiguity. + +> For example, if the starting point is **April 30, 2000**, calculating `1 DAY` first and then `1 MONTH` results in **June 1, 2000**, whereas calculating `1 MONTH` first and then `1 DAY` results in **May 31, 2000**. The resulting dates are different. + +#### 4.2.2 Examples + +##### Example Data + +The [Example Data page](../Reference/Sample-Data.md) contains SQL statements for building table structures and inserting data. Download and execute these statements in the IoTDB CLI to import the data into IoTDB. You can use this data to test and execute the SQL statements in the examples and obtain the corresponding results. + +#### Example 1: Without Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +#### Example 2: Specifying the Origin Timestamp + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +#### Example 3: Negative Origin + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +#### Example 4: Interval of 0 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +Result**:** + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +#### Example 5: Source is NULL + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +Result: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract Function + +This function is used to extract the value of a specific part of a date. (Supported from version V2.0.6) + +#### 4.3.1 Syntax Definition + +```SQL +EXTRACT (identifier FROM expression) +``` + +* Parameter Description + * **expression**: `TIMESTAMP` type or a time constant + * **identifier**: The valid ranges and corresponding return value types are shown in the table below. + + | Valid Range | Return Type | Return Range | + |----------------------|---------------|--------------------| + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 Usage Example + +Using table1 from the [Sample Data](../Reference/Sample-Data.md) as the source data, query the average temperature for the first 12 hours of each day within a certain period. + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +Introduction to the `Format` function: [Format Function](../SQL-Manual/Basis-Function_timecho.md#_7-2-format-function) + +Introduction to the `Date_bin` function: [Date_bin Funtion](../SQL-Manual/Basis-Function_timecho.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. Mathematical Functions and Operators + +### 5.1 Mathematical Operators + +| **Operator** | **Description** | +| :----------- | :---------------------------------------------- | +| + | Addition | +| - | Subtraction | +| * | Multiplication | +| / | Division (integer division performs truncation) | +| % | Modulus (remainder) | +| - | Negation | + +### 5.2 Mathematical functions + +| Function Name | Description | Input | Output | Usage | +|:--------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------|:-------------------| :--------- | +| sin | Sine | double, float, INT64, INT32 | double | sin(x) | +| cos | Cosine | double, float, INT64, INT32 | double | cos(x) | +| tan | Tangent | double, float, INT64, INT32 | double | tan(x) | +| asin | Inverse Sine | double, float, INT64, INT32 | double | asin(x) | +| acos | Inverse Cosine | double, float, INT64, INT32 | double | acos(x) | +| atan | Inverse Tangent | double, float, INT64, INT32 | double | atan(x) | +| sinh | Hyperbolic Sine | double, float, INT64, INT32 | double | sinh(x) | +| cosh | Hyperbolic Cosine | double, float, INT64, INT32 | double | cosh(x) | +| tanh | Hyperbolic Tangent | double, float, INT64, INT32 | double | tanh(x) | +| degrees | Converts angle `x` in radians to degrees | double, float, INT64, INT32 | double | degrees(x) | +| radians | Radian Conversion from Degrees | double, float, INT64, INT32 | double | radians(x) | +| abs | Absolute Value | double, float, INT64, INT32 | Same as input type | abs(x) | +| sign | Returns the sign of `x`: - If `x = 0`, returns `0` - If `x > 0`, returns `1` - If `x < 0`, returns `-1` For `double/float` inputs: - If `x = NaN`, returns `NaN` - If `x = +Infinity`, returns `1.0` - If `x = -Infinity`, returns `-1.0` | double, float, INT64, INT32 | Same as input type | sign(x) | +| ceil | Rounds `x` up to the nearest integer | double, float, INT64, INT32 | double | ceil(x) | +| floor | Rounds `x` down to the nearest integer | double, float, INT64, INT32 | double | floor(x) | +| exp | Returns `e^x` (Euler's number raised to the power of `x`) | double, float, INT64, INT32 | double | exp(x) | +| ln | Returns the natural logarithm of `x` | double, float, INT64, INT32 | double | ln(x) | +| log10 | Returns the base 10 logarithm of `x` | double, float, INT64, INT32 | double | log10(x) | +| round | Rounds `x` to the nearest integer | double, float, INT64, INT32 | double | round(x) | +| round | Rounds `x` to `d` decimal places | double, float, INT64, INT32 | double | round(x, d) | +| sqrt | Returns the square root of `x`. | double, float, INT64, INT32 | double | sqrt(x) | +| e | Returns Euler’s number `e`. | | double | e() | +| pi | Pi (π) | | double | pi() | + +## 6. Bitwise Functions + +> Supported from version V2.0.6 + +Example raw data is as follows: + +``` +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +-- Table creation statement +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +-- Write data +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +The `bit_count(num, bits)`function is used to count the number of 1s in the binary representation of the integer `num`under the specified bit width `bits`. + +#### 6.1.1 Syntax Definition + +``` +bit_count(num, bits) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * **​num:​**​ Any integer value (int32 or int64) + * **​bits:​**​ Integer value, with a valid range of 2\~64 + +Note: An error will be raised if the number of `bits`is insufficient to represent `num`(using ​**two's complement signed representation**​): `Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* Usage Methods + + * Two specific numbers: `bit_count(9, 64)` + * Column and a number: `bit_count(column1, 64)` + * Between two columns: `bit_count(column1, column2)` + +#### 6.1.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- Two specific numbers +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +-- Column and a number +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +-- Insufficient bits +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +The `bitwise_and(x, y)`function performs a logical AND operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise AND operation result. + +#### 6.2.1 Syntax Definition + +``` +bitwise_and(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_and(19, 25)` + * Column and a number: `bitwise_and(column1, 25)` + * Between two columns: `bitwise_and(column1, column2)` + +#### 6.2.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +The `bitwise_not(x)`function performs a logical NOT operation on each bit of the integer x based on its two's complement representation, and returns the bitwise NOT operation result. + +#### 6.3.1 Syntax Definition + +``` +bitwise_not(x) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x**​: Must be an integer value of data type Int32 or Int64 +* Usage Methods + + * Specific number: `bitwise_not(5)` + * Single column operation: `bitwise_not(column1)` + +#### 6.3.2 Usage Examples + +``` +-- Specific number +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- Single column +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +The `bitwise_or(x,y)`function performs a logical OR operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise OR operation result. + +#### 6.4.1 Syntax Definition + +``` +bitwise_or(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_or(19, 25)` + * Column and a number: `bitwise_or(column1, 25)` + * Between two columns: `bitwise_or(column1, column2)` + +#### 6.4.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +The `bitwise_xor(x,y)`function performs a logical XOR (exclusive OR) operation on each bit of two integers x and y based on their two's complement representation, and returns the bitwise XOR operation result. XOR rule: same bits result in 0, different bits result in 1. + +#### 6.5.1 Syntax Definition + +``` +bitwise_xor(x, y) -> INT64 -- The return type is Int64 +``` + +* Parameter Description + + * ​**x, y**​: Must be integer values of data type Int32 or Int64 +* Usage Methods + + * Two specific numbers: `bitwise_xor(19, 25)` + * Column and a number: `bitwise_xor(column1, 25)` + * Between two columns: `bitwise_xor(column1, column2)` + +#### 6.5.2 Usage Examples + +``` +-- Two specific numbers +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- Column and a number +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +The `bitwise_left_shift(value, shift)`function returns the result of shifting the binary representation of integer `value`left by `shift`bits. The left shift operation moves bits towards the higher-order direction, filling the vacated lower-order bits with 0s, and discarding the higher-order bits that overflow. Equivalent to: `value << shift`. + +#### 6.6.1 Syntax Definition + +``` +bitwise_left_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift left. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_left_shift(1, 2)` + * Column and a number: `bitwise_left_shift(column1, 2)` + * Between two columns: `bitwise_left_shift(column1, column2)` + +#### 6.6.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- Between two columns +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +The `bitwise_right_shift(value, shift)`function returns the result of logically (unsigned) right shifting the binary representation of integer `value`by `shift`bits. The logical right shift operation moves bits towards the lower-order direction, filling the vacated higher-order bits with 0s, and discarding the lower-order bits that overflow. + +#### 6.7.1 Syntax Definition + +``` +bitwise_right_shift(value, shift) -> [same as value] -- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods + + * Two specific numbers: `bitwise_right_shift(8, 3)` + * Column and a number: `bitwise_right_shift(column1, 3)` + * Between two columns: `bitwise_right_shift(column1, column2)` + +#### 6.7.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--Column and a number +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +The `bitwise_right_shift_arithmetic(value, shift)`function returns the result of arithmetically right shifting the binary representation of integer `value`by `shift`bits. The arithmetic right shift operation moves bits towards the lower-order direction, discarding the lower-order bits that overflow, and filling the vacated higher-order bits with the sign bit (0 for positive numbers, 1 for negative numbers) to preserve the sign of the number. + +#### 6.8.1 Syntax Definition + +``` +bitwise_right_shift_arithmetic(value, shift) -> [same as value]-- The return type is the same as the data type of value +``` + +* Parameter Description + + * ​**value**​: The integer value to shift right. Must be of data type Int32 or Int64. + * ​**shift**​: The number of bits to shift. Must be of data type Int32 or Int64. +* Usage Methods: + + * Two specific numbers: `bitwise_right_shift_arithmetic(12, 2)` + * Column and a number: `bitwise_right_shift_arithmetic(column1, 64)` + * Between two columns: `bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 Usage Examples + +``` +--Two specific numbers +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- Column and a number +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--Between two columns +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + + +## 7. Conditional Expressions + +### 7.1 CASE + +CASE expressions come in two forms: **Simple CASE** and **Searched CASE**. + +#### 7.1.1 Simple CASE + +The simple form evaluates each value expression from left to right until it finds a match with the given expression: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If a matching value is found, the corresponding result is returned. If no match is found, the result from the `ELSE` clause (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 Searched CASE + +The searched form evaluates each Boolean condition from left to right until a `TRUE` condition is found, then returns the corresponding result: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +If no condition evaluates to `TRUE`, the `ELSE` clause result (if provided) is returned; otherwise, `NULL` is returned. + +Example: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE + +Returns the first non-null value from the given list of parameters. + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. Conversion Functions + +### 8.1 Conversion Functions + +#### 8.1.1 cast(value AS type) → type + +Explicitly converts a value to the specified type. This can be used to convert strings (`VARCHAR`) to numeric types or numeric values to string types. Starting from V2.0.8, OBJECT type can be explicitly cast to STRING type. + +If the conversion fails, a runtime error is thrown. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +Similar to `CAST()`. If the conversion fails, returns `NULL` instead of throwing an error. + +Example: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format Function + +This function generates and returns a formatted string based on a specified format string and input arguments. Similar to Java’s `String.format` or C’s `printf`, it allows developers to construct dynamic string templates using placeholder syntax. Predefined format specifiers in the template are replaced precisely with corresponding argument values, producing a complete string that adheres to specific formatting requirements. + +#### 8.2.1 Syntax + +```SQL +format(pattern, ...args) -> STRING +``` + +**Parameters** + +* `pattern`: A format string containing static text and one or more format specifiers (e.g., `%s`, `%d`), or any expression returning a `STRING`/`TEXT` type. +* `args`: Input arguments to replace format specifiers. Constraints: + * Number of arguments ≥ 1. + * Multiple arguments must be comma-separated (e.g., `arg1, arg2`). + * Total arguments can exceed the number of specifiers in `pattern` but cannot be fewer, otherwise an exception is triggered. + +**Return Value** + +* Formatted result string of type `STRING`. + +#### 8.2.2 Usage Examples + +1. Format Floating-Point Numbers + ```SQL + IoTDB:database1> SELECT format('%.5f', humidity) FROM table1 WHERE humidity = 35.4; + +--------+ + | _col0| + +--------+ + |35.40000| + +--------+ + ``` +2. Format Integers + ```SQL + IoTDB:database1> SELECT format('%03d', 8) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + | 008| + +-----+ + ``` +3. Format Dates and Timestamps + +* Locale-Specific Date + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) FROM table1 LIMIT 1; ++--------------------+ +| _col0| ++--------------------+ +|Monday, January 1, 2024| ++--------------------+ +``` + +* Remove Timezone Information + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* Second-Level Timestamp Precision + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) FROM table1 LIMIT 1; ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* Date/Time Format Symbols + +| **Symbol** | **​ Description** | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'H' | 24-hour format (two digits, zero-padded), i.e. 00 - 23 | +| 'I' | 12-hour format (two digits, zero-padded), i.e. 01 - 12 | +| 'k' | 24-hour format (no padding), i.e. 0 - 23 | +| 'l' | 12-hour format (no padding), i.e. 1 - 12 | +| 'M' | Minute (two digits, zero-padded), i.e. 00 - 59 | +| 'S' | Second (two digits, zero-padded; supports leap seconds), i.e. 00 - 60 | +| 'L' | Millisecond (three digits, zero-padded), i.e. 000 - 999 | +| 'N' | Nanosecond (nine digits, zero-padded), i.e. 000000000 - 999999999。 | +| 'p' | Locale-specific lowercase AM/PM marker (e.g., "am", "pm"). Prefix with`T`to force uppercase (e.g., "AM"). | +| 'z' | RFC 822 timezone offset from GMT (e.g.,`-0800`). Adjusts for daylight saving. Uses the JVM's default timezone for`long`/`Long`/`Date`. | +| 'Z' | Timezone abbreviation (e.g., "PST"). Adjusts for daylight saving. Uses the JVM's default timezone; Formatter's timezone overrides the argument's timezone if specified. | +| 's' | Seconds since Unix epoch (1970-01-01 00:00:00 UTC), i.e. Long.MIN\_VALUE/1000 to Long.MAX\_VALUE/1000。 | +| 'Q' | Milliseconds since Unix epoch, i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* Common Date/Time Conversion Characters + +| **Symbol** | **​ Description** | +| ---------------- | -------------------------------------------------------------------- | +| 'B' | Locale-specific full month name, for example "January", "February" | +| 'b' | Locale-specific abbreviated month name, for example "Jan", "Feb" | +| 'h' | Same as`b` | +| 'A' | Locale-specific full weekday name, for example "Sunday", "Monday" | +| 'a' | Locale-specific short weekday name, for example "Sun", "Mon" | +| 'C' | Year divided by 100 (two digits, zero-padded) | +| 'Y' | Year (minimum 4 digits, zero-padded) | +| 'y' | Last two digits of year (zero-padded) | +| 'j' | Day of year (three digits, zero-padded) | +| 'm' | Month (two digits, zero-padded) | +| 'd' | Day of month (two digits, zero-padded) | +| 'e' | Day of month (no padding) | + +4. Format Strings + ```SQL + IoTDB:database1> SELECT format('The measurement status is: %s', status) FROM table2 LIMIT 1; + +-------------------------------+ + | _col0| + +-------------------------------+ + |The measurement status is: true| + +-------------------------------+ + ``` +5. Format Percentage Sign + ```SQL + IoTDB:database1> SELECT format('%s%%', 99.9) FROM table1 LIMIT 1; + +-----+ + |_col0| + +-----+ + |99.9%| + +-----+ + ``` + +#### 8.2.3 Format Conversion Failure Scenarios + +1. Type Mismatch Errors + +* Timestamp Type Conflict + + If the format specifier includes time-related tokens (e.g., `%Y-%m-%d`) but the argument: + + * Is a non-`DATE`/`TIMESTAMP` type value. ◦ + * Requires sub-day precision (e.g., `%H`, `%M`) but the argument is not `TIMESTAMP`. + +```SQL +-- Example 1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- Example 2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* Floating-Point Type Conflict + + Using `%f` with non-numeric arguments (e.g., strings or booleans): + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. Argument Count Mismatch + The number of arguments must equal or exceed the number of format specifiers. + + ```SQL + IoTDB:database1> SELECT format('%.5f %03d', humidity) FROM table1 WHERE humidity = 35.4; + Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') + ``` +3. Invalid Invocation Errors + + Triggered if: + + * Total arguments < 2 (must include `pattern` and at least one argument).• + * `pattern` is not of type `STRING`/`TEXT`. + +```SQL +-- Example 1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--Example 2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + +## 9. String Functions and Operators + +### 9.1 String operators + +#### 9.1.1 || Operator + +The `||` operator is used for string concatenation and functions the same as the `concat` function. + +#### 9.1.2 LIKE Statement + + The `LIKE` statement is used for pattern matching. For detailed usage, refer to Pattern Matching:[LIKE](#1-like-运算符). + +### 9.2 String Functions + +| Function Name | Description | Input | Output | Usage | +| :------------ |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------| :------ | :----------------------------------------------------------- | +| `length` | Returns the number of characters in a string (not byte length). | `string` (the string whose length is to be calculated) | INT32 | length(string) | +| `upper` | Converts all letters in a string to uppercase. | string | String | upper(string) | +| `lower` | Converts all letters in a string to lowercase. | string | String | lower(string) | +| `trim` | Removes specified leading and/or trailing characters from a string. **Parameters:** - `specification` (optional): Specifies which side to trim: - `BOTH`: Removes characters from both sides (default). - `LEADING`: Removes characters from the beginning. - `TRAILING`: Removes characters from the end. - `trimcharacter` (optional): Character to be removed (default is whitespace). - `string`: The target string. | string | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) Example:`trim('!' FROM '!foo!');` —— `'foo'` | +| `strpos` | Returns the position of the first occurrence of `subStr` in `sourceStr`. **Notes:** - Position starts at `1`. - Returns `0` if `subStr` is not found. - Positioning is based on characters, not byte arrays. | `sourceStr` (string to be searched), `subStr` (substring to find) | INT32 | strpos(sourceStr, subStr) | +| `starts_with` | Checks if `sourceStr` starts with the specified `prefix`. | `sourceStr`, `prefix` | Boolean | starts_with(sourceStr, prefix) | +| `ends_with` | Checks if `sourceStr` ends with the specified `suffix`. | `sourceStr`, `suffix` | Boolean | ends_with(sourceStr, suffix) | +| `concat` | Concatenates `string1, string2, ..., stringN`. Equivalent to the `\|\|` operator. | `string`, `text` | String | concat(str1, str2, ...) or str1 \|\| str2 ... | +| `strcmp` | Compares two strings lexicographically. **Returns:** - `-1` if `str1 < str2` - `0` if `str1 = str2` - `1` if `str1 > str2` - `NULL` if either `str1` or `str2` is `NULL` | `string1`, `string2` | INT32 | strcmp(str1, str2) | +| `replace` | Removes all occurrences of `search` in `string`. | `string`, `search` | String | replace(string, search) | +| `replace` | Replaces all occurrences of `search` in `string` with `replace`. | `string`, `search`, `replace` | String | replace(string, search, replace) | +| `substring` | Extracts a substring from `start_index` to the end of the string. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. | `string`, `start_index` | String | substring(string from start_index)or substring(string, start_index) | +| `substring` | Extracts a substring of `length` characters starting from `start_index`. **Notes:** - `start_index` starts at `1`. - Returns `NULL` if input is `NULL`. - Throws an error if `start_index` is greater than string length. - Throws an error if `length` is negative. - If `start_index + length` exceeds `int.MAX`, an overflow error may occur. | `string`, `start_index`, `length` | String | substring(string from start_index for length) or substring(string, start_index, length) | + +## 10. Pattern Matching Functions + +### 10.1 LIKE + +#### 10.1.1 Usage + +The `LIKE `operator is used to compare a value with a pattern. It is commonly used in the `WHERE `clause to match specific patterns within strings. + +#### 10.1.2 Syntax + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 Match rules + +- Matching characters is case-sensitive +- The pattern supports two wildcard characters: + - `_` matches any single character + - `%` matches zero or more characters + +#### 10.1.4 Notes + +- `LIKE` pattern matching applies to the entire string by default. Therefore, if it's desired to match a sequence anywhere within a string, the pattern must start and end with a percent sign. +- To match the escape character itself, double it (e.g., `\\` to match `\`). For example, you can use `\\` to match for `\`. + +#### 10.1.5 Examples + +#### **Example 1: Match Strings Starting with a Specific Character** + +- **Description:** Find all names that start with the letter `E` (e.g., `Europe`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +#### **Example 2: Exclude a Specific Pattern** + +- **Description:** Find all names that do **not** start with the letter `E`. + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +#### **Example 3: Match Strings of a Specific Length** + +- **Description:** Find all names that start with `A`, end with `a`, and have exactly two characters in between (e.g., `Asia`). + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +#### **Example 4: Escape Special Characters** + +- **Description:** Find all names that start with `South_` (e.g., `South_America`). The underscore (`_`) is a wildcard character, so it needs to be escaped using `\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +#### **Example 5: Match the Escape Character Itself** + +- **Description:** Find all names that start with 'South\'. Since `\` is the escape character, it must be escaped using `\\`. + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like + +#### 10.2.1 Usage + +Evaluates whether the regular expression pattern is present within the given string. + +#### 10.2.2 Syntax + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 Notes + +- The pattern for `regexp_like` only needs to be contained within the string, and does not need to match the entire string. +- To match the entire string, use the `^` and `$` anchors. +- `^` signifies the "start of the string," and `$` signifies the "end of the string." +- Regular expressions use the Java-defined regular syntax, but there are the following exceptions to be aware of: + - Multiline mode + 1. Enabled by: `(?m)`. + 2. Recognizes only `\n` as the line terminator. + 3. Does not support the `(?d)` flag, and its use is prohibited. + - Case-insensitive matching + 1. Enabled by: `(?i)`. + 2. Based on Unicode rules, it does not support context-dependent and localized matching. + 3. Does not support the `(?u)` flag, and its use is prohibited. + - Character classes + 1. Within character classes (e.g., `[A-Z123]`), `\Q` and `\E` are not supported and are treated as literals. + - Unicode character classes (`\p{prop}`) + 1. Underscores in names: All underscores in names must be removed (e.g., `OldItalic `instead of `Old_Italic`). + 2. Scripts: Specify directly, without the need for `Is`, `script=`, or `sc=` prefixes (e.g., `\p{Hiragana}`). + 3. Blocks: Must use the `In` prefix, `block=` or `blk=` prefixes are not supported (e.g., `\p{InMongolian}`). + 4. Categories: Specify directly, without the need for `Is`, `general_category=`, or `gc=` prefixes (e.g., `\p{L}`). + 5. Binary properties: Specify directly, without `Is` (e.g., `\p{NoncharacterCodePoint}`). + +#### 10.2.4 Examples + +#### Example 1: **Matching strings containing a specific pattern** + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **Explanation**: Determines whether the string '1a 2b 14m' contains a substring that matches the pattern `\d+b`. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - In `'1a 2b 14m'`, the substring `'2b'` matches this pattern, so it returns `true`. + + +#### **Example 2: Matching the entire string** + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **Explanation**: Checks if the string `'1a 2b 14m'` matches the pattern `^\\d+b$` exactly. + - `\d+` means "one or more digits". + - `b` represents the letter b. + - `'1a 2b 14m'` does not match this pattern because it does not start with digits and does not end with `b`, so it returns `false`. + +## 11. Timeseries Windowing Functions + +The sample data is as follows: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- Create table statement +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- Insert data +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 Function Description + +The HOP function segments data into overlapping time windows for analysis, assigning each row to all windows that overlap with its timestamp. If windows overlap (when SLIDE < SIZE), data will be duplicated across multiple windows. + +#### 11.1.2 Function Definition + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer | Window size | +| SLIDE | Scalar | Long integer | Sliding step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + + +#### 11.1.4 Returned Results + +The HOP function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.1.5 Usage Example + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 Function Description + +The SESSION function groups data into sessions based on time intervals. It checks the time gap between consecutive rows—rows with gaps smaller than the threshold (GAP) are grouped into the current window, while larger gaps trigger a new window. + +#### 11.2.2 Function Definition + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| TIMECOL | Scalar | String (default: 'time') | Time column name | +| GAP | Scalar | Long integer | Session gap threshold | + +#### 11.2.4 Returned Results + +The SESSION function returns: + +* `window_start`: Time of the first row in the session +* `window_end`: Time of the last row in the session +* Pass-through columns: All input columns from DATA + +#### 11.2.5 Usage Example + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY SESSION when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 Function Description + +The VARIATION function groups data based on value differences. The first row becomes the baseline for the first window. Subsequent rows are compared to the baseline—if the difference is within the threshold (DELTA), they join the current window; otherwise, a new window starts with that row as the new baseline. + +#### 11.3.2 Function Definition + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| COL | Scalar | String | Column for difference calculation | +| DELTA | Scalar | Float | Difference threshold | + +#### 11.3.4 Returned Results + +The VARIATION function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.3.5 Usage Example + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY VARIATION when combined with GROUP BY +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 Function Description + +The CAPACITY function groups data into fixed-size windows, where each window contains up to SIZE rows. + +#### 11.4.2 Function Definition + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | ---------------------------- | -------------------------------------- | +| DATA | Table | SET SEMANTIC, PASS THROUGH | Input table with partition/sort keys | +| SIZE | Scalar | Long integer | Window size (row count) | + +#### 11.4.4 Returned Results + +The CAPACITY function returns: + +* `window_index`: Window identifier +* Pass-through columns: All input columns from DATA + +#### 11.4.5 Usage Example + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY COUNT when combined with GROUP BY +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 Function Description + +The TUMBLE function assigns each row to a non-overlapping, fixed-size time window based on a timestamp attribute. + +#### 11.5.2 Function Definition + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | ------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +#### 11.5.4 Returned Results + +The TUMBLE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.5.5 Usage Example + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 Function Description + +The CUMULATE function creates expanding windows from an initial window, maintaining the same start time while incrementally extending the end time by STEP until reaching SIZE. Each window contains all elements within its range. For example, with a 1-hour STEP and 24-hour SIZE, daily windows would be: `[00:00, 01:00)`, `[00:00, 02:00)`, ..., `[00:00, 24:00)`. + +#### 11.6.2 Function Definition + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 Parameter Description + +| Parameter | Type | Attributes | Description | +| ----------- | -------- | --------------------------------- | --------------------------------------------------- | +| DATA | Table | ROW SEMANTIC, PASS THROUGH | Input table | +| TIMECOL | Scalar | String (default: 'time') | Time column | +| SIZE | Scalar | Long integer (positive) | Window size (must be an integer multiple of STEP) | +| STEP | Scalar | Long integer (positive) | Expansion step | +| ORIGIN | Scalar | Timestamp (default: Unix epoch) | First window start time | + +> Note: An error `Cumulative table function requires size must be an integral multiple of step` occurs if SIZE is not divisible by STEP. + +#### 11.6.4 Returned Results + +The CUMULATE function returns: + +* `window_start`: Window start time (inclusive) +* `window_end`: Window end time (exclusive) +* Pass-through columns: All input columns from DATA + +#### 11.6.5 Usage Example + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- Equivalent to tree mode's GROUP BY TIME when combined with GROUP BY +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md b/src/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md index 715fcba68..97cefa7a3 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md +++ b/src/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md @@ -823,7 +823,7 @@ Result: **Description**: Reads binary content from an `OBJECT` type column and returns a `BLOB` type (raw binary data of the object). -> Supported since V2.0.8-beta +> Supported since V2.0.8 **Syntax:** ```SQL diff --git a/src/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md b/src/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md index fa7a7f49f..ed6c776bb 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md +++ b/src/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md @@ -67,7 +67,7 @@ intervalField IoTDB supports the following three methods to fill NULL values: -1. **PREVIOUS Fill:** Uses the most recent non-NULL value from the same column to fill NULL values. Starting from V2.0.8-beta, only this method supports the OBJECT type. +1. **PREVIOUS Fill:** Uses the most recent non-NULL value from the same column to fill NULL values. Starting from V2.0.8, only this method supports the OBJECT type. 2. **LINEAR Fill:** Applies linear interpolation using the nearest previous and next non-NULL values in the same column. 3. **CONSTANT Fill:** Fills NULL values with a specified constant. diff --git a/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md b/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md index 36808b379..35fee992c 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md +++ b/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md @@ -1,3 +1,6 @@ +--- +redirectTo: SQL-Maintenance-Statements_apache.html +--- - -# Management Statements - -## 1. Status Inspection - -### 1.1 View Current Tree/Table Mode - -**Syntax:** - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 1.2 View Current User - -**Syntax:** - -```SQL -showCurrentUserStatement - : SHOW CURRENT_USER - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_USER -+-----------+ -|CurrentUser| -+-----------+ -| root| -+-----------+ -``` - -### 1.3 View Connected Database - -**Syntax:** - -```SQL -showCurrentDatabaseStatement - : SHOW CURRENT_DATABASE - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| null| -+---------------+ - -IoTDB> USE test; - -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| test| -+---------------+ -``` - -### 1.4 View Cluster Version - -**Syntax:** - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW VERSION -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.5 View Key Cluster Parameters - -**Syntax:** - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW VARIABLES -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.6 View Cluster ID - -**Syntax:** - -```SQL -showClusterIdStatement - : SHOW (CLUSTERID | CLUSTER_ID) - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CLUSTER_ID -+------------------------------------+ -| ClusterId| -+------------------------------------+ -|40163007-9ec1-4455-aa36-8055d740fcda| -``` - -### 1.7 View Server Time - -Shows time of the DataNode server directly connected to client - -**Syntax:** - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - - -### 1.8 View Region Information - -**Description**: Displays regions' information of the current cluster. - -**Syntax**: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW REGIONS -``` - -**Result**: - -```SQL -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | -| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| -| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.9 View Available Nodes - -**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. - -> This feature is supported starting from v2.0.8-beta. - -**Syntax**: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -**Result**: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. Status Configuration - -### 2.1 Set Connection Tree/Table Mode - -**Syntax:** - -```SQL -SET SQL_DIALECT EQ (TABLE | TREE) -``` - -**Example:** - -```SQL -IoTDB> SET SQL_DIALECT=TABLE -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 2.2 Update Configuration Items - -**Syntax:** - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**Example:** - -```SQL -IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; -``` - -### 2.3 Load Manually Modified Configuration - -**Syntax:** - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 Set System Status - -**Syntax:** - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - -## 3. Data Management - -### 3.1 Flush Memory Table to Disk - -**Syntax:** - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> FLUSH test_db TRUE ON LOCAL; -``` - -### 3.2 Clear DataNode Cache - -**Syntax:** - -```SQL -clearCacheStatement - : CLEAR clearCacheOptions? CACHE localOrClusterMode? - ; - -clearCacheOptions - : ATTRIBUTE - | QUERY - | ALL - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> CLEAR ALL CACHE ON LOCAL; -``` - -## 4. Data Repair - -### 4.1 Start Background TsFile Repair - -**Syntax:** - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 Pause TsFile Repair - -**Syntax:** - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Example:** - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. Query Operations - -### 5.1 View Active Queries - -**Syntax:** - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**Example:** - -```SQL -IoTDB> SHOW QUERIES WHERE elapsed_time > 30 -+-----------------------+-----------------------------+-----------+------------+------------+----+ -| query_id| start_time|datanode_id|elapsed_time| statement|user| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -``` - -### 5.2 Terminate Queries - -**Syntax:** - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**Example:** - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -- teminate specific query -IoTDB> KILL ALL QUERIES; -- teminate all query -``` - -### 5.3 Query Performance Analysis - -#### 5.3.1 View Execution Plan - -**Syntax:** - -```SQL -EXPLAIN -``` - -Detailed syntax reference: [EXPLAIN STATEMENT](../User-Manual/Query-Performance-Analysis.md#_1-explain-statement) - -**Example:** - -```SQL -IoTDB> explain select * from t1 -+-----------------------------------------------------------------------------------------------+ -| distribution plan| -+-----------------------------------------------------------------------------------------------+ -| ┌─────────────────────────────────────────────┐ | -| │OutputNode-4 │ | -| │OutputColumns-[time, device_id, type, speed] │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| │ | -| │ | -| ┌─────────────────────────────────────────────┐ | -| │Collect-21 │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| ┌───────────────────────┴───────────────────────┐ | -| │ │ | -|┌─────────────────────────────────────────────┐ ┌───────────┐ | -|│TableScan-19 │ │Exchange-28│ | -|│QualifiedTableName: test.t1 │ └───────────┘ | -|│OutputSymbols: [time, device_id, type, speed]│ │ | -|│DeviceNumber: 1 │ │ | -|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| -|│PushDownOffset: 0 │ │TableScan-20 │| -|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| -|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| -|│RegionId: 2 │ │DeviceNumber: 1 │| -|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| -| │PushDownOffset: 0 │| -| │PushDownLimit: 0 │| -| │PushDownLimitToEachDevice: false │| -| │RegionId: 1 │| -| └─────────────────────────────────────────────┘| -+-----------------------------------------------------------------------------------------------+ -``` - -#### 5.3.2 Analyze Query Performance - -**Syntax:** - -```SQL -EXPLAIN ANALYZE [VERBOSE] -``` - -Detailed syntax reference: [EXPLAIN ANALYZE STATEMENT](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-statement) - -**Example:** - -```SQL -IoTDB> explain analyze verbose select * from t1 -+-----------------------------------------------------------------------------------------------+ -| Explain Analyze| -+-----------------------------------------------------------------------------------------------+ -|Analyze Cost: 38.860 ms | -|Fetch Partition Cost: 9.888 ms | -|Fetch Schema Cost: 54.046 ms | -|Logical Plan Cost: 10.102 ms | -|Logical Optimization Cost: 17.396 ms | -|Distribution Plan Cost: 2.508 ms | -|Dispatch Cost: 22.126 ms | -|Fragment Instances Count: 2 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| -| Total Wall Time: 18 ms | -| Cost of initDataQuerySource: 6.153 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.294 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.047 | -| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 5.523 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 31]: CollectNode(CollectOperator) | -| CPU Time: 5.512 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 29]: TableScanNode(TableScanOperator) | -| CPU Time: 5.439 ms | -| output: 1 rows | -| HasNext() Called Count: 3 -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | -| CPU Time: 0.053 ms | -| output: 1 rows | -| HasNext() Called Count: 2 | -| Next() Called Count: 1 | -| Estimated Memory Size: : 131072 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| -| Total Wall Time: 13 ms | -| Cost of initDataQuerySource: 5.725 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.001 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.007 | -| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 0.270 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 30]: TableScanNode(TableScanOperator) | -| CPU Time: 0.250 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -+-----------------------------------------------------------------------------------------------+ -``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_apache.md b/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_apache.md new file mode 100644 index 000000000..36808b379 --- /dev/null +++ b/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_apache.md @@ -0,0 +1,652 @@ + + +# Management Statements + +## 1. Status Inspection + +### 1.1 View Current Tree/Table Mode + +**Syntax:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 View Current User + +**Syntax:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 View Connected Database + +**Syntax:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 View Cluster Version + +**Syntax:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 View Key Cluster Parameters + +**Syntax:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 View Cluster ID + +**Syntax:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 View Server Time + +Shows time of the DataNode server directly connected to client + +**Syntax:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + + +### 1.8 View Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 View Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8-beta. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Configuration + +### 2.1 Set Connection Tree/Table Mode + +**Syntax:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**Example:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 Update Configuration Items + +**Syntax:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Example:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 Load Manually Modified Configuration + +**Syntax:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Set System Status + +**Syntax:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flush Memory Table to Disk + +**Syntax:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 Clear DataNode Cache + +**Syntax:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Start Background TsFile Repair + +**Syntax:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pause TsFile Repair + +**Syntax:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Operations + +### 5.1 View Active Queries + +**Syntax:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 Terminate Queries + +**Syntax:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Example:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- teminate specific query +IoTDB> KILL ALL QUERIES; -- teminate all query +``` + +### 5.3 Query Performance Analysis + +#### 5.3.1 View Execution Plan + +**Syntax:** + +```SQL +EXPLAIN +``` + +Detailed syntax reference: [EXPLAIN STATEMENT](../User-Manual/Query-Performance-Analysis.md#_1-explain-statement) + +**Example:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 Analyze Query Performance + +**Syntax:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +Detailed syntax reference: [EXPLAIN ANALYZE STATEMENT](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-statement) + +**Example:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md b/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md new file mode 100644 index 000000000..8b975d660 --- /dev/null +++ b/src/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md @@ -0,0 +1,652 @@ + + +# Management Statements + +## 1. Status Inspection + +### 1.1 View Current Tree/Table Mode + +**Syntax:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 View Current User + +**Syntax:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 View Connected Database + +**Syntax:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 View Cluster Version + +**Syntax:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 View Key Cluster Parameters + +**Syntax:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 View Cluster ID + +**Syntax:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 View Server Time + +Shows time of the DataNode server directly connected to client + +**Syntax:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + + +### 1.8 View Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 View Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Configuration + +### 2.1 Set Connection Tree/Table Mode + +**Syntax:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**Example:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 Update Configuration Items + +**Syntax:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Example:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 Load Manually Modified Configuration + +**Syntax:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Set System Status + +**Syntax:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flush Memory Table to Disk + +**Syntax:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 Clear DataNode Cache + +**Syntax:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Start Background TsFile Repair + +**Syntax:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pause TsFile Repair + +**Syntax:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Example:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Operations + +### 5.1 View Active Queries + +**Syntax:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Example:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 Terminate Queries + +**Syntax:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Example:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- teminate specific query +IoTDB> KILL ALL QUERIES; -- teminate all query +``` + +### 5.3 Query Performance Analysis + +#### 5.3.1 View Execution Plan + +**Syntax:** + +```SQL +EXPLAIN +``` + +Detailed syntax reference: [EXPLAIN STATEMENT](../User-Manual/Query-Performance-Analysis.md#_1-explain-statement) + +**Example:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 Analyze Query Performance + +**Syntax:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +Detailed syntax reference: [EXPLAIN ANALYZE STATEMENT](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-statement) + +**Example:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/UserGuide/latest-Table/SQL-Manual/Select-Clause.md b/src/UserGuide/latest-Table/SQL-Manual/Select-Clause.md index 57abbe5de..1a6da4ae1 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/Select-Clause.md +++ b/src/UserGuide/latest-Table/SQL-Manual/Select-Clause.md @@ -1,3 +1,6 @@ +--- +redirectTo: Select-Clause_apache.html +--- - -# SELECT Clauses - -**SELECT Clause** specifies the columns included in the query results. - -## 1. Syntax Overview - -```sql -SELECT setQuantifier? selectItem (',' selectItem)* - -selectItem - : expression (AS? identifier)? #selectSingle - | tableName '.' ASTERISK (AS columnAliases)? #selectAll - | ASTERISK #selectAll - ; -setQuantifier - : DISTINCT - | ALL - ; -``` - -- It supports aggregate functions (e.g., `SUM`, `AVG`, `COUNT`) and window functions, logically executed last in the query process. -- DISTINCT Keyword: `SELECT DISTINCT column_name` ensures that the values in the query results are unique, removing duplicates. -- COLUMNS Function: The COLUMNS function is supported in the SELECT clause for column filtering. It can be combined with expressions, allowing the expression's logic to apply to all columns selected by the function. - -## 2. Detailed Syntax: - -Each `selectItem` can take one of the following forms: - -1. **Expression**: `expression [[AS] column_alias]` defines a single output column and optionally assigns an alias. -2. **All Columns from a Relation**: `relation.*` selects all columns from a specified relation. Column aliases are not allowed in this case. -3. **All Columns in the Result Set**: `*` selects all columns returned by the query. Column aliases are not allowed. - -Usage scenarios for DISTINCT: - -1. **SELECT Statement**: Use DISTINCT in the SELECT statement to remove duplicate items from the query results. - -2. **Aggregate Functions**: When used with aggregate functions, DISTINCT only processes non-duplicate rows in the input dataset. - -3. **AGROUP BY Clause**: Use ALL and DISTINCT quantifiers in the GROUP BY clause to determine whether each duplicate grouping set produces distinct output rows. - -`COLUMNS` Function: - -1. **`COLUMNS(*)`**: Matches all columns and supports combining with expressions. -2. **`COLUMNS(regexStr) ? AS identifier`**: Regular expression matching - - Selects columns whose names match the specified regular expression `(regexStr)` and supports combining with expressions. - - Allows renaming columns by referencing groups captured by the regular expression. If `AS` is omitted, the original column name is displayed in the format `_coln_original_name` (where `n` is the column’s position in the result table). - - Renaming Syntax: - - Use parentheses () in regexStr to define capture groups. - - Reference captured groups in identifier using `'$index'`. - - Note: The identifier must be enclosed in double quotes if it contains special characters like `$`. - -## 3. Example Data - - -The [Example Data page](../Reference/Sample-Data.md)page provides SQL statements to construct table schemas and insert data. By downloading and executing these statements in the IoTDB CLI, you can import the data into IoTDB. This data can be used to test and run the example SQL queries included in this documentation, allowing you to reproduce the described results. - -### 3.1 Selection List - -#### 3.1.1 Star Expression - -The asterisk (`*`) selects all columns in a table. Note that it cannot be used with most functions, except for cases like `COUNT(*)`. - -**Example**: Selecting all columns from a table. - - -```sql -SELECT * FROM table1; -``` - -Results: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -#### 3.1.2 Aggregate Functions - -Aggregate functions summarize multiple rows into a single value. When aggregate functions are present in the `SELECT` clause, the query is treated as an **aggregate query**. All expressions in the query must either be part of an aggregate function or specified in the [GROUP BY clause](../SQL-Manual/GroupBy-Clause.md). - -**Example 1**: Total number of rows in a table. - -```sql -SELECT count(*) FROM table1; -``` - -Results: - -```sql -+-----+ -|_col0| -+-----+ -| 18| -+-----+ -Total line number = 1 -It costs 0.091s -``` - -**Example 2**: Total rows grouped by region. - -```sql -SELECT region, count(*) - FROM table1 - GROUP BY region; -``` - -Results: - -```sql -+------+-----+ -|region|_col1| -+------+-----+ -| 上海| 9| -| 北京| 9| -+------+-----+ -Total line number = 2 -It costs 0.071s -``` - -#### 3.1.3 Aliases - -The `AS` keyword assigns an alias to selected columns, improving readability by overriding existing column names. - -**Example 1**: Original table. - - -```sql -IoTDB> SELECT * FROM table1; -``` - -Results: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -**Example 2**: Assigning an alias to a single column. - -```sql -IoTDB> SELECT device_id - AS device - FROM table1; -``` - -Results: - -```sql -+------+ -|device| -+------+ -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -+------+ -Total line number = 18 -It costs 0.053s -``` - -**Example 3:** Assigning aliases to all columns. - -```sql -IoTDB> SELECT table1.* - AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) - FROM table1; -``` - -Results: - -```sql -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| -|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| -|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| -|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| -|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| -|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| -|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -Total line number = 18 -It costs 0.189s -``` - -#### 3.1.4 Object Type Query - -> Supported since V2.0.8-beta - -**Example 1: Directly querying Object type data** - -```sql -IoTDB:database1> SELECT s1 FROM table1 WHERE device_id = 'tag1' -+------------+ -| s1| -+------------+ -|(Object) 5 B| -+------------+ -Total line number = 1 -It costs 0.428s -``` - -**Example 2: Retrieving raw content of Object type data using `read_object` function** - -```sql -IoTDB:database1> SELECT read_object(s1) FROM table1 WHERE device_id = 'tag1' -+------------+ -| _col0| -+------------+ -|0x696f746462| -+------------+ -Total line number = 1 -It costs 0.188s -``` - - -### 3.2 Columns Function - -1. Without combining expressions -```sql --- Query data from columns whose names start with 'm' -IoTDB:database1> select columns('^m.*') from table1 limit 5 -+--------+-----------+ -|model_id|maintenance| -+--------+-----------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+--------+-----------+ - - --- Query columns whose names start with 'o' - throw an exception if no columns match -IoTDB:database1> select columns('^o.*') from table1 limit 5 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' - - --- Query data from columns whose names start with 'm' and rename them with 'series_' prefix -IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 -+---------------+------------------+ -|series_model_id|series_maintenance| -+---------------+------------------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+---------------+------------------+ -``` - -2. With Expression Combination - -- Single COLUMNS Function -```sql --- Query the minimum value of all columns -IoTDB:database1> select min(columns(*)) from table1 -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -``` - -- Multiple COLUMNS Functions in Same Expression - -> Usage Restriction: When multiple COLUMNS functions appear in the same expression, their parameters must be identical. - -```sql --- Query the sum of minimum and maximum values for columns starting with 'h' -IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 -+--------------+ -|_col0_humidity| -+--------------+ -| 79.899994| -+--------------+ - --- Error Case: Non-Identical COLUMNS Functions -IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported -``` - -- Multiple COLUMNS Functions in Different Expressions - -```sql --- Query minimum of 'h'-columns and maximum of 'h'-columns separately -IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 -+--------------+--------------+ -|_col0_humidity|_col1_humidity| -+--------------+--------------+ -| 34.8| 45.1| -+--------------+--------------+ - --- Query minimum of 'h'-columns and maximum of 'te'-columns -IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 -+--------------+-----------------+ -|_col0_humidity|_col1_temperature| -+--------------+-----------------+ -| 34.8| 90.0| -+--------------+-----------------+ -``` - -3. In Where Clause - -```sql --- Query data where all 'h'-columns must be > 40 (equivalent to) -IoTDB:database1> select * from table1 where columns('^h.*') > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ - ---Alternative syntax -IoTDB:database1> select * from table1 where humidity > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -``` - -## 4. Column Order in the Result Set - -- **Column Order**: The order of columns in the result set matches the order specified in the `SELECT` clause. -- **Multi-column Expressions**: If a selection expression produces multiple columns, their order follows the order in the source relation.p. \ No newline at end of file diff --git a/src/UserGuide/latest-Table/SQL-Manual/Select-Clause_apache.md b/src/UserGuide/latest-Table/SQL-Manual/Select-Clause_apache.md new file mode 100644 index 000000000..57abbe5de --- /dev/null +++ b/src/UserGuide/latest-Table/SQL-Manual/Select-Clause_apache.md @@ -0,0 +1,421 @@ + + +# SELECT Clauses + +**SELECT Clause** specifies the columns included in the query results. + +## 1. Syntax Overview + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- It supports aggregate functions (e.g., `SUM`, `AVG`, `COUNT`) and window functions, logically executed last in the query process. +- DISTINCT Keyword: `SELECT DISTINCT column_name` ensures that the values in the query results are unique, removing duplicates. +- COLUMNS Function: The COLUMNS function is supported in the SELECT clause for column filtering. It can be combined with expressions, allowing the expression's logic to apply to all columns selected by the function. + +## 2. Detailed Syntax: + +Each `selectItem` can take one of the following forms: + +1. **Expression**: `expression [[AS] column_alias]` defines a single output column and optionally assigns an alias. +2. **All Columns from a Relation**: `relation.*` selects all columns from a specified relation. Column aliases are not allowed in this case. +3. **All Columns in the Result Set**: `*` selects all columns returned by the query. Column aliases are not allowed. + +Usage scenarios for DISTINCT: + +1. **SELECT Statement**: Use DISTINCT in the SELECT statement to remove duplicate items from the query results. + +2. **Aggregate Functions**: When used with aggregate functions, DISTINCT only processes non-duplicate rows in the input dataset. + +3. **AGROUP BY Clause**: Use ALL and DISTINCT quantifiers in the GROUP BY clause to determine whether each duplicate grouping set produces distinct output rows. + +`COLUMNS` Function: + +1. **`COLUMNS(*)`**: Matches all columns and supports combining with expressions. +2. **`COLUMNS(regexStr) ? AS identifier`**: Regular expression matching + - Selects columns whose names match the specified regular expression `(regexStr)` and supports combining with expressions. + - Allows renaming columns by referencing groups captured by the regular expression. If `AS` is omitted, the original column name is displayed in the format `_coln_original_name` (where `n` is the column’s position in the result table). + - Renaming Syntax: + - Use parentheses () in regexStr to define capture groups. + - Reference captured groups in identifier using `'$index'`. + - Note: The identifier must be enclosed in double quotes if it contains special characters like `$`. + +## 3. Example Data + + +The [Example Data page](../Reference/Sample-Data.md)page provides SQL statements to construct table schemas and insert data. By downloading and executing these statements in the IoTDB CLI, you can import the data into IoTDB. This data can be used to test and run the example SQL queries included in this documentation, allowing you to reproduce the described results. + +### 3.1 Selection List + +#### 3.1.1 Star Expression + +The asterisk (`*`) selects all columns in a table. Note that it cannot be used with most functions, except for cases like `COUNT(*)`. + +**Example**: Selecting all columns from a table. + + +```sql +SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 Aggregate Functions + +Aggregate functions summarize multiple rows into a single value. When aggregate functions are present in the `SELECT` clause, the query is treated as an **aggregate query**. All expressions in the query must either be part of an aggregate function or specified in the [GROUP BY clause](../SQL-Manual/GroupBy-Clause.md). + +**Example 1**: Total number of rows in a table. + +```sql +SELECT count(*) FROM table1; +``` + +Results: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +**Example 2**: Total rows grouped by region. + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +Results: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 Aliases + +The `AS` keyword assigns an alias to selected columns, improving readability by overriding existing column names. + +**Example 1**: Original table. + + +```sql +IoTDB> SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +**Example 2**: Assigning an alias to a single column. + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +Results: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +**Example 3:** Assigning aliases to all columns. + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +Results: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object Type Query + +> Supported since V2.0.8-beta + +**Example 1: Directly querying Object type data** + +```sql +IoTDB:database1> SELECT s1 FROM table1 WHERE device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +**Example 2: Retrieving raw content of Object type data using `read_object` function** + +```sql +IoTDB:database1> SELECT read_object(s1) FROM table1 WHERE device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns Function + +1. Without combining expressions +```sql +-- Query data from columns whose names start with 'm' +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- Query columns whose names start with 'o' - throw an exception if no columns match +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- Query data from columns whose names start with 'm' and rename them with 'series_' prefix +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. With Expression Combination + +- Single COLUMNS Function +```sql +-- Query the minimum value of all columns +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- Multiple COLUMNS Functions in Same Expression + +> Usage Restriction: When multiple COLUMNS functions appear in the same expression, their parameters must be identical. + +```sql +-- Query the sum of minimum and maximum values for columns starting with 'h' +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- Error Case: Non-Identical COLUMNS Functions +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- Multiple COLUMNS Functions in Different Expressions + +```sql +-- Query minimum of 'h'-columns and maximum of 'h'-columns separately +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- Query minimum of 'h'-columns and maximum of 'te'-columns +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. In Where Clause + +```sql +-- Query data where all 'h'-columns must be > 40 (equivalent to) +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--Alternative syntax +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. Column Order in the Result Set + +- **Column Order**: The order of columns in the result set matches the order specified in the `SELECT` clause. +- **Multi-column Expressions**: If a selection expression produces multiple columns, their order follows the order in the source relation.p. \ No newline at end of file diff --git a/src/UserGuide/latest-Table/SQL-Manual/Select-Clause_timecho.md b/src/UserGuide/latest-Table/SQL-Manual/Select-Clause_timecho.md new file mode 100644 index 000000000..75c5cd5c1 --- /dev/null +++ b/src/UserGuide/latest-Table/SQL-Manual/Select-Clause_timecho.md @@ -0,0 +1,421 @@ + + +# SELECT Clauses + +**SELECT Clause** specifies the columns included in the query results. + +## 1. Syntax Overview + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- It supports aggregate functions (e.g., `SUM`, `AVG`, `COUNT`) and window functions, logically executed last in the query process. +- DISTINCT Keyword: `SELECT DISTINCT column_name` ensures that the values in the query results are unique, removing duplicates. +- COLUMNS Function: The COLUMNS function is supported in the SELECT clause for column filtering. It can be combined with expressions, allowing the expression's logic to apply to all columns selected by the function. + +## 2. Detailed Syntax: + +Each `selectItem` can take one of the following forms: + +1. **Expression**: `expression [[AS] column_alias]` defines a single output column and optionally assigns an alias. +2. **All Columns from a Relation**: `relation.*` selects all columns from a specified relation. Column aliases are not allowed in this case. +3. **All Columns in the Result Set**: `*` selects all columns returned by the query. Column aliases are not allowed. + +Usage scenarios for DISTINCT: + +1. **SELECT Statement**: Use DISTINCT in the SELECT statement to remove duplicate items from the query results. + +2. **Aggregate Functions**: When used with aggregate functions, DISTINCT only processes non-duplicate rows in the input dataset. + +3. **AGROUP BY Clause**: Use ALL and DISTINCT quantifiers in the GROUP BY clause to determine whether each duplicate grouping set produces distinct output rows. + +`COLUMNS` Function: + +1. **`COLUMNS(*)`**: Matches all columns and supports combining with expressions. +2. **`COLUMNS(regexStr) ? AS identifier`**: Regular expression matching + - Selects columns whose names match the specified regular expression `(regexStr)` and supports combining with expressions. + - Allows renaming columns by referencing groups captured by the regular expression. If `AS` is omitted, the original column name is displayed in the format `_coln_original_name` (where `n` is the column’s position in the result table). + - Renaming Syntax: + - Use parentheses () in regexStr to define capture groups. + - Reference captured groups in identifier using `'$index'`. + - Note: The identifier must be enclosed in double quotes if it contains special characters like `$`. + +## 3. Example Data + + +The [Example Data page](../Reference/Sample-Data.md)page provides SQL statements to construct table schemas and insert data. By downloading and executing these statements in the IoTDB CLI, you can import the data into IoTDB. This data can be used to test and run the example SQL queries included in this documentation, allowing you to reproduce the described results. + +### 3.1 Selection List + +#### 3.1.1 Star Expression + +The asterisk (`*`) selects all columns in a table. Note that it cannot be used with most functions, except for cases like `COUNT(*)`. + +**Example**: Selecting all columns from a table. + + +```sql +SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 Aggregate Functions + +Aggregate functions summarize multiple rows into a single value. When aggregate functions are present in the `SELECT` clause, the query is treated as an **aggregate query**. All expressions in the query must either be part of an aggregate function or specified in the [GROUP BY clause](../SQL-Manual/GroupBy-Clause.md). + +**Example 1**: Total number of rows in a table. + +```sql +SELECT count(*) FROM table1; +``` + +Results: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +**Example 2**: Total rows grouped by region. + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +Results: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 Aliases + +The `AS` keyword assigns an alias to selected columns, improving readability by overriding existing column names. + +**Example 1**: Original table. + + +```sql +IoTDB> SELECT * FROM table1; +``` + +Results: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +**Example 2**: Assigning an alias to a single column. + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +Results: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +**Example 3:** Assigning aliases to all columns. + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +Results: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object Type Query + +> Supported since V2.0.8 + +**Example 1: Directly querying Object type data** + +```sql +IoTDB:database1> SELECT s1 FROM table1 WHERE device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +**Example 2: Retrieving raw content of Object type data using `read_object` function** + +```sql +IoTDB:database1> SELECT read_object(s1) FROM table1 WHERE device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns Function + +1. Without combining expressions +```sql +-- Query data from columns whose names start with 'm' +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- Query columns whose names start with 'o' - throw an exception if no columns match +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- Query data from columns whose names start with 'm' and rename them with 'series_' prefix +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. With Expression Combination + +- Single COLUMNS Function +```sql +-- Query the minimum value of all columns +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- Multiple COLUMNS Functions in Same Expression + +> Usage Restriction: When multiple COLUMNS functions appear in the same expression, their parameters must be identical. + +```sql +-- Query the sum of minimum and maximum values for columns starting with 'h' +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- Error Case: Non-Identical COLUMNS Functions +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- Multiple COLUMNS Functions in Different Expressions + +```sql +-- Query minimum of 'h'-columns and maximum of 'h'-columns separately +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- Query minimum of 'h'-columns and maximum of 'te'-columns +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. In Where Clause + +```sql +-- Query data where all 'h'-columns must be > 40 (equivalent to) +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--Alternative syntax +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. Column Order in the Result Set + +- **Column Order**: The order of columns in the result set matches the order specified in the `SELECT` clause. +- **Multi-column Expressions**: If a selection expression produces multiple columns, their order follows the order in the source relation.p. \ No newline at end of file diff --git a/src/UserGuide/latest-Table/SQL-Manual/overview_apache.md b/src/UserGuide/latest-Table/SQL-Manual/overview_apache.md index 0ef5cbc2e..e6b98ca16 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/overview_apache.md +++ b/src/UserGuide/latest-Table/SQL-Manual/overview_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_apache.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/latest-Table/SQL-Manual/overview_timecho.md b/src/UserGuide/latest-Table/SQL-Manual/overview_timecho.md index 19afdc1b8..d564f44c6 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/overview_timecho.md +++ b/src/UserGuide/latest-Table/SQL-Manual/overview_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ The IoTDB table model query syntax supports the following clauses: -- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause.md) +- **SELECT Clause**: Specifies the columns to be included in the result. Details: [SELECT Clause](../SQL-Manual/Select-Clause_timecho.md) - **FROM Clause**: Indicates the data source for the query, which can be a single table, multiple tables joined using the `JOIN` clause, or a subquery. Details: [FROM & JOIN Clause](../SQL-Manual/From-Join-Clause.md) - **WHERE Clause**: Filters rows based on specific conditions. Logically executed immediately after the `FROM` clause. Details: [WHERE Clause](../SQL-Manual/Where-Clause.md) - **GROUP BY Clause**: Used for aggregating data, specifying the columns for grouping. Details: [GROUP BY Clause](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md b/src/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md index 9e5645b07..f2c8a7d2a 100644 --- a/src/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md +++ b/src/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md @@ -43,8 +43,8 @@ The data export tool `export-data.sh/bat` is located in the `tools` directory an |`-end_time` |`--end_time` | The end time of the data to be exported only takes effect when `-sql_dialect` is set to the table type. If `-q` is specified, this parameter will not take effect.| No | - | | `-t` | `--target` | Target directory for the output files. If the path does not exist, it will be created. | ​**Yes** | - | | `-pfn` | `--prefix_file_name` | Prefix for the exported file names. For example, `abc` will generate files like `abc_0.tsfile`, `abc_1.tsfile`. | No | `dump_0.tsfile` | -| `-q` | `--query` | SQL query command to execute. Starting from v2.0.8-beta, semicolons in SQL statements are automatically removed, and query execution proceeds normally. | No | - | -| `-timeout` | `--query_timeout` | Query timeout in milliseconds (ms). | No | `-1` (before v2.0.8-beta)
`Long.MAX_VALUE` (v2.0.8-beta and later)
(Range: `-1~Long.MAX_VALUE`) | +| `-q` | `--query` | SQL query command to execute. Starting from v2.0.8, semicolons in SQL statements are automatically removed, and query execution proceeds normally. | No | - | +| `-timeout` | `--query_timeout` | Query timeout in milliseconds (ms). | No | `-1` (before v2.0.8)
`Long.MAX_VALUE` (v2.0.8 and later)
(Range: `-1~Long.MAX_VALUE`) | | `-help` | `--help` | Display help information. | No | - | ### 2.2 CSV Format diff --git a/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md b/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md index 00f790aff..f29a1fa78 100644 --- a/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md +++ b/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md @@ -31,7 +31,7 @@ Audit logs provide a documented record of database activities. Through the audit * Configurable audit log retention periods using TTL (time-based rolling) and SpaceTL (space-based rolling) * Default encryption storage for audit logs -> Note: This feature is available from version V2.0.8-beta onwards. +> Note: This feature is available from version V2.0.8 onwards. ## 2. Configuration Parameters diff --git a/src/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md b/src/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md index dceab5681..3aa3cb94a 100644 --- a/src/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md +++ b/src/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md @@ -39,7 +39,7 @@ Administrators can enable/disable the whitelist function and add, modify, or del * Edit the configuration file `iotdb‑system.properties`. * Use the `set configuration` statement. - * Table model reference: [set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-update-configuration-items) + * Table model reference: [set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-update-configuration-items) Related parameters are as follows: @@ -60,7 +60,7 @@ Administrators can enable/disable the blacklist function and add, modify, or del * Edit the configuration file `iotdb‑system.properties`. * Use the `set configuration`statement. - * Table model reference:[set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-update-configuration-items) + * Table model reference:[set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-update-configuration-items) Related parameters are as follows: diff --git a/src/UserGuide/latest-Table/User-Manual/Maintenance-commands_timecho.md b/src/UserGuide/latest-Table/User-Manual/Maintenance-commands_timecho.md index 27a0b101a..02185325c 100644 --- a/src/UserGuide/latest-Table/User-Manual/Maintenance-commands_timecho.md +++ b/src/UserGuide/latest-Table/User-Manual/Maintenance-commands_timecho.md @@ -341,7 +341,7 @@ IoTDB> SHOW REGIONS **Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. -> This feature is supported starting from v2.0.8-beta. +> This feature is supported starting from v2.0.8. **Syntax**: diff --git a/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md b/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md index 1af814e5d..149f989e1 100644 --- a/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md +++ b/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md @@ -27,7 +27,7 @@ This document describes two independent operational modes for IoTDB's integratio * **Mode 1: Data Subscription Service (IoTDB as OPC UA Server)**: IoTDB starts an embedded OPC UA server to passively allow external clients (e.g., UAExpert) to connect and subscribe to its internal data. This is the traditional usage. * **Mode 2: Data Push (IoTDB as OPC UA Client)**: IoTDB acts as a client to actively synchronize data and metadata to one or more independently deployed external OPC UA servers. - > Note: This mode is supported starting from V2.0.8-beta. + > Note: This mode is supported starting from V2.0.8. **Note: Modes are mutually exclusive** When the Pipe configuration specifies the `node-urls` parameter (Mode 2), IoTDB will **not** start the embedded OPC UA server (Mode 1). These two modes **cannot be used simultaneously** within the same Pipe. @@ -61,22 +61,22 @@ CREATE PIPE p1 #### 2.1.2 Parameters -| **Parameter** | **Description** | **Value Range** | **Required** | **Default Value** | -| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |-------------------------------------------------------------------------------------------------------------------------------------| -------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| sink | OPC UA SINK | String: opc-ua-sink | Required | | -| sink.opcua.model | OPC UA operational mode | String: client-server / pub-sub | Optional | client-server | -| sink.opcua.tcp.port | OPC UA TCP port | Integer: [0, 65536] | Optional | 12686 | -| sink.opcua.https.port | OPC UA HTTPS port | Integer: [0, 65536] | Optional | 8443 | -| sink.opcua.security.dir | OPC UA key and certificate directory | String: Path (supports absolute/relative paths) | Optional | 1. `opc_security` folder under IoTDB's DataNode conf directory `/`. 2. User home directory's `iotdb_opc_security` folder `/` if no IoTDB conf directory exists (e.g., when starting DataNode in IDEA) | -| opcua.security-policy | Security policy used for OPC UA connections (case-insensitive). Multiple policies can be configured and separated by commas. After configuring one policy, clients can only connect using that policy. Default implementation supports `None` and `Basic256Sha256`. Should be set to a non-`None` policy by default. `None` policy is only for debugging (convenient but insecure; not recommended for production). Note: Supported since V2.0.8-beta, only for client-server mode. | String (security level increases):`None`,`Basic128Rsa15`,`Basic256`,`Basic256Sha256`,`Aes128_Sha256_RsaOaep`,`Aes256_Sha256_RsaPss` | Optional | `Basic256Sha256,Aes128_Sha256_RsaOaep,Aes256_Sha256_RsaPss` | -| sink.opcua.enable-anonymous-access | Whether OPC UA allows anonymous access | Boolean | Optional | true | -| sink.user | User (OPC UA allowed user) | String | Optional | root | -| sink.password | Password (OPC UA allowed password) | String | Optional | TimechoDB@2021 (Default was 'root' before V2.0.6.x) | -| opcua.with-quality | Whether OPC UA publishes data in value + quality mode. When enabled, system processes data as follows:1. Both value and quality present → Push directly to OPC UA Server.2. Only value present → Quality automatically filled as UNCERTAIN (default, configurable).3. Only quality present → Ignore write (no processing).4. Non-value/quality fields present → Ignore data and log warning (configurable log frequency to avoid high-frequency interference).5. Quality type restriction: Only boolean type supported (true = GOOD, false = BAD).**Note**: Supported since V2.0.8-beta, only for client-server mode | Boolean | Optional | false | -| opcua.value-name | Effective when `with-quality` = true, specifies the name of the value point. **Note**: Supported since V2.0.8-beta, only for client-server mode | String | Optional | value | -| opcua.quality-name | Effective when `with-quality` = true, specifies the name of the quality point. **Note**: Supported since V2.0.8-beta, only for client-server mode | String | Optional | quality | -| opcua.default-quality | When no quality is provided, specify `GOOD`/`UNCERTAIN`/`BAD` via SQL parameter. **Note**: Supported since V2.0.8-beta, only for client-server mode | String: `GOOD`/`UNCERTAIN`/`BAD` | Optional | `UNCERTAIN` | -| opcua.timeout-seconds | Client connection timeout in seconds (effective only when IoTDB acts as client). **Note**: Supported since V2.0.8-beta, only for client-server mode | Long | Optional | 10L | +| **Parameter** | **Description** | **Value Range** | **Required** | **Default Value** | +| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |-------------------------------------------------------------------------------------------------------------------------------------| -------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| sink | OPC UA SINK | String: opc-ua-sink | Required | | +| sink.opcua.model | OPC UA operational mode | String: client-server / pub-sub | Optional | client-server | +| sink.opcua.tcp.port | OPC UA TCP port | Integer: [0, 65536] | Optional | 12686 | +| sink.opcua.https.port | OPC UA HTTPS port | Integer: [0, 65536] | Optional | 8443 | +| sink.opcua.security.dir | OPC UA key and certificate directory | String: Path (supports absolute/relative paths) | Optional | 1. `opc_security` folder under IoTDB's DataNode conf directory `/`. 2. User home directory's `iotdb_opc_security` folder `/` if no IoTDB conf directory exists (e.g., when starting DataNode in IDEA) | +| opcua.security-policy | Security policy used for OPC UA connections (case-insensitive). Multiple policies can be configured and separated by commas. After configuring one policy, clients can only connect using that policy. Default implementation supports `None` and `Basic256Sha256`. Should be set to a non-`None` policy by default. `None` policy is only for debugging (convenient but insecure; not recommended for production). Note: Supported since V2.0.8, only for client-server mode. | String (security level increases):`None`,`Basic128Rsa15`,`Basic256`,`Basic256Sha256`,`Aes128_Sha256_RsaOaep`,`Aes256_Sha256_RsaPss` | Optional | `Basic256Sha256,Aes128_Sha256_RsaOaep,Aes256_Sha256_RsaPss` | +| sink.opcua.enable-anonymous-access | Whether OPC UA allows anonymous access | Boolean | Optional | true | +| sink.user | User (OPC UA allowed user) | String | Optional | root | +| sink.password | Password (OPC UA allowed password) | String | Optional | TimechoDB@2021 (Default was 'root' before V2.0.6.x) | +| opcua.with-quality | Whether OPC UA publishes data in value + quality mode. When enabled, system processes data as follows:1. Both value and quality present → Push directly to OPC UA Server.2. Only value present → Quality automatically filled as UNCERTAIN (default, configurable).3. Only quality present → Ignore write (no processing).4. Non-value/quality fields present → Ignore data and log warning (configurable log frequency to avoid high-frequency interference).5. Quality type restriction: Only boolean type supported (true = GOOD, false = BAD).**Note**: Supported since V2.0.8, only for client-server mode | Boolean | Optional | false | +| opcua.value-name | Effective when `with-quality` = true, specifies the name of the value point. **Note**: Supported since V2.0.8, only for client-server mode | String | Optional | value | +| opcua.quality-name | Effective when `with-quality` = true, specifies the name of the quality point. **Note**: Supported since V2.0.8, only for client-server mode | String | Optional | quality | +| opcua.default-quality | When no quality is provided, specify `GOOD`/`UNCERTAIN`/`BAD` via SQL parameter. **Note**: Supported since V2.0.8, only for client-server mode | String: `GOOD`/`UNCERTAIN`/`BAD` | Optional | `UNCERTAIN` | +| opcua.timeout-seconds | Client connection timeout in seconds (effective only when IoTDB acts as client). **Note**: Supported since V2.0.8, only for client-server mode | Long | Optional | 10L | #### 2.1.3 Example @@ -229,7 +229,7 @@ CREATE PIPE p1 > **Parameter Naming Note**: All parameters support omitting `opcua.` prefix (e.g., `node-urls` and `opcua.node-urls` are equivalent). > -> **Support Note**: All `opcua.` parameters are supported starting from V2.0.8-beta, and only for `client-server` mode. +> **Support Note**: All `opcua.` parameters are supported starting from V2.0.8, and only for `client-server` mode. #### 3.1.3 Example diff --git a/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md b/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md index 106795a1d..3f50f61cf 100644 --- a/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md +++ b/src/UserGuide/latest/Basic-Concept/Query-Data_apache.md @@ -2948,7 +2948,7 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root #### Other points to note - For general aggregation queries, the timestamp is meaningless, and the convention is to use 0 to store. -- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md#Data Type Compatibility). +- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md). - When the target time series does not exist, the system automatically creates it (including the database). - When the queried time series does not exist, or the queried sequence does not have data, the target time series will not be created automatically. diff --git a/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md b/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md index 3f51a3045..c9a38b5fd 100644 --- a/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md +++ b/src/UserGuide/latest/Basic-Concept/Query-Data_timecho.md @@ -2948,7 +2948,7 @@ This statement specifies that `root.sg_copy.d1` is an unaligned device and `root #### Other points to note - For general aggregation queries, the timestamp is meaningless, and the convention is to use 0 to store. -- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md#Data Type Compatibility). +- When the target time-series exists, the data type of the source column and the target time-series must be compatible. About data type compatibility, see the document [Data Type](../Background-knowledge/Data-Type.md). - When the target time series does not exist, the system automatically creates it (including the database). - When the queried time series does not exist, or the queried sequence does not have data, the target time series will not be created automatically. diff --git a/src/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md b/src/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md index 46b154282..dd0191367 100644 --- a/src/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md +++ b/src/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md @@ -37,8 +37,8 @@ The data export tool, export-data.sh (Unix/OS X) or export-data.bat (Windows), l | `-pw` | `--password` | Password for authentication. | No | `TimechoDB@2021`(Before V2.0.6 it is `root` ) | | `-t` | `--target` | Target directory for the output files. If the path does not exist, it will be created. | ​**Yes** | - | | `-pfn` | `--prefix_file_name` | Prefix for the exported file names. For example, `abc` will generate files like `abc_0.tsfile`, `abc_1.tsfile`. | No | `dump_0.tsfile` | -| `-q` | `--query` | SQL query command to execute. Starting from v2.0.8-beta, semicolons in SQL statements are automatically removed, and query execution proceeds normally. | No | - | -| `-timeout` | `--query_timeout` | Query timeout in milliseconds (ms). | No | `-1` (before v2.0.8-beta)
`Long.MAX_VALUE` (v2.0.8-beta and later)
(Range: `-1~Long.MAX_VALUE`) | +| `-q` | `--query` | SQL query command to execute. Starting from v2.0.8, semicolons in SQL statements are automatically removed, and query execution proceeds normally. | No | - | +| `-timeout` | `--query_timeout` | Query timeout in milliseconds (ms). | No | `-1` (before v2.0.8)
`Long.MAX_VALUE` (v2.0.8 and later)
(Range: `-1~Long.MAX_VALUE`) | | `-help` | `--help` | Display help information. | No | - | ### 2.2 CSV Format diff --git a/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md b/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md index 1a0b7d933..89b599690 100644 --- a/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md +++ b/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md @@ -30,7 +30,7 @@ Audit logs provide a documented record of database activities. Through the audit * Ability to configure audit log file retention periods using TTL (time-based rolling) and SpaceTL (space-based rolling) * Audit logs are encrypted by default -> Note: This feature is available from version V2.0.8-beta onwards. +> Note: This feature is available from version V2.0.8 onwards. ## 2. Configuration Parameters diff --git a/src/UserGuide/latest/User-Manual/Maintenance-commands.md b/src/UserGuide/latest/User-Manual/Maintenance-commands.md index e72ce52fe..fc3ebe560 100644 --- a/src/UserGuide/latest/User-Manual/Maintenance-commands.md +++ b/src/UserGuide/latest/User-Manual/Maintenance-commands.md @@ -1,3 +1,6 @@ +--- +redirectTo: Maintenance-commands_apache.html +--- -# Maintenance Statement - -## 1. Status Checking - -### 1.1 Viewing the Connected Model - -**Description**: Returns the current SQL dialect mode (`Tree` or `Table`). - -**Syntax**: - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT; -``` - -**Result:** - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 1.2 Viewing the Cluster Version - -**Description**: Returns the current cluster version. - -**Syntax**: - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW VERSION; -``` - -**Result**: - -```Plain -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.3 Viewing Cluster Key Parameters - -**Description**: Returns key parameters of the current cluster. - -**Syntax**: - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -Key Parameters: - -1. **ClusterName**: The name of the current cluster. -2. **DataReplicationFactor**: Number of data replicas per DataRegion. -3. **SchemaReplicationFactor**: Number of schema replicas per SchemaRegion. -4. **DataRegionConsensusProtocolClass**: Consensus protocol class for DataRegions. -5. **SchemaRegionConsensusProtocolClass**: Consensus protocol class for SchemaRegions. -6. **ConfigNodeConsensusProtocolClass**: Consensus protocol class for ConfigNodes. -7. **TimePartitionOrigin**: The starting timestamp of database time partitions. -8. **TimePartitionInterval**: The interval of database time partitions (in milliseconds). -9. **ReadConsistencyLevel**: The consistency level for read operations. -10. **SchemaRegionPerDataNode**: Number of SchemaRegions per DataNode. -11. **DataRegionPerDataNode**: Number of DataRegions per DataNode. -12. **SeriesSlotNum**: Number of SeriesSlots per DataRegion. -13. **SeriesSlotExecutorClass**: Implementation class for SeriesSlots. -14. **DiskSpaceWarningThreshold**: Disk space warning threshold (in percentage). -15. **TimestampPrecision**: Timestamp precision. - -**Example**: - -```SQL -IoTDB> SHOW VARIABLES; -``` - -**Result**: - -```Plain -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.4 Viewing the Current Timestamp of Database - -**Description**: Returns the current timestamp of the database. - -**Syntax**: - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP; -``` - -**Result**: - -```Plain -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - -### 1.5 Viewing Executing Queries - -**Description**: Displays information about all currently executing queries. - -**Syntax**: - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**Parameters**: - -1. **WHERE Clause**: Filters the result set based on specified conditions. -2. **ORDER BY Clause**: Sorts the result set based on specified columns. -3. **limitOffsetClause**: Limits the number of rows returned. - 1. Format: `LIMIT , `. - -**Columns in QUERIES Table**: - -- **time**: Timestamp when the query started. -- **queryid**: Unique ID of the query. -- **datanodeid**: ID of the DataNode executing the query. -- **elapsedtime**: Time elapsed since the query started (in seconds). -- **statement**: The SQL statement being executed. - -**Example**: - -```SQL -IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 -``` - -**Result**: - -```SQL -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -| Time| QueryId|DataNodeId|ElapsedTime| Statement| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -``` - -### 1.6 Viewing Region Information - -**Description**: Displays regions' information of the current cluster. - -**Syntax**: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW REGIONS -``` - -**Result**: - -```SQL -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | -| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| -| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | -| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.7 Viewing Available Nodes - -**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. - -> This feature is supported starting from v2.0.8-beta. - -**Syntax**: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -**Example**: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -**Result**: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. Status Setting - -### 2.1 Setting the Connected Model - -**Description**: Sets the current SQL dialect mode to `Tree` or `Table` which can be used in both tree and table modes. - -**Syntax**: - -```SQL -SET SQL_DIALECT = (TABLE | TREE); -``` - -**Example**: - -```SQL -IoTDB> SET SQL_DIALECT=TREE; -IoTDB> SHOW CURRENT_SQL_DIALECT; -``` - -**Result**: - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 2.2 Updating Configuration Items - -**Description**: Updates configuration items. Changes take effect immediately without restarting if the items support hot modification. - -**Syntax**: - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**Parameters**: - -1. **propertyAssignments**: A list of properties to update. - 1. Format: `property (',' property)*`. - 2. Values: - - `DEFAULT`: Resets the configuration to its default value. - - `expression`: A specific value (must be a string). -2. **ON INTEGER_VALUE** **(Optional):** Specifies the node ID to update. - 1. If not specified or set to a negative value, updates all ConfigNodes and DataNodes. - -**Example**: - -```SQL -IoTDB> SET CONFIGURATION ‘disk_space_warning_threshold’='0.05',‘heartbeat_interval_in_ms’='1000' ON 1; -``` - -### 2.3 Loading Manually Modified Configuration Files - -**Description**: Loads manually modified configuration files and hot-loads the changes. Configuration items that support hot modification take effect immediately. - -**Syntax**: - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **localOrClusterMode** **(Optional):** - 1. Specifies the scope of configuration loading. - 2. Default: `CLUSTER`. - 3. Values: - - `LOCAL`: Loads configuration only on the DataNode directly connected to the client. - - `CLUSTER`: Loads configuration on all DataNodes in the cluster. - -**Example**: - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 Setting the System Status - -**Description**: Sets the system status to either `READONLY` or `RUNNING`. - -**Syntax**: - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **RUNNING |** **READONLY**: - 1. **RUNNING**: Sets the system to running mode, allowing both read and write operations. - 2. **READONLY**: Sets the system to read-only mode, allowing only read operations and prohibiting writes. -2. **localOrClusterMode** **(Optional):** - 1. **LOCAL**: Applies the status change only to the DataNode directly connected to the client. - 2. **CLUSTER**: Applies the status change to all DataNodes in the cluster. - 3. **Default**: `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - -## 3. Data Management - -### 3.1 Flushing Data from Memory to Disk - -**Description**: Flushes data from the memory table to disk. - -**Syntax**: - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **identifier** **(Optional):** - 1. Specifies the name of the path to flush. - 2. If not specified, all paths are flushed. - 3. **Multiple Paths**: Multiple path names can be specified, separated by commas (e.g., `FLUSH root.ln, root.lnm.**`). -2. **booleanValue** **(****Optional****)**: - 1. Specifies the type of data to flush. - 2. **TRUE**: Flushes only the sequential memory table. - 3. **FALSE**: Flushes only the unsequential MemTable. - 4. **Default**: Flushes both sequential and unsequential memory tables. -3. **localOrClusterMode** **(****Optional****)**: - 1. **ON LOCAL**: Flushes only the memory tables on the DataNode directly connected to the client. - 2. **ON CLUSTER**: Flushes memory tables on all DataNodes in the cluster. - 3. **Default:** `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> FLUSH root.ln TRUE ON LOCAL; -``` - -## 4. Data Repair - -### 4.1 Starting Background Scan and Repair of TsFiles - -**Description**: Starts a background task to scan and repair TsFiles, fixing issues such as timestamp disorder within data files. - -**Syntax**: - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **localOrClusterMode(Optional)**: - 1. **ON LOCAL**: Executes the repair task only on the DataNode directly connected to the client. - 2. **ON CLUSTER**: Executes the repair task on all DataNodes in the cluster. - 3. **Default:** `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 Pausing Background TsFile Repair Task - -**Description**: Pauses the background repair task. The paused task can be resumed by executing the `START REPAIR DATA` command again. - -**Syntax**: - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**Parameters**: - -1. **localOrClusterMode** **(Optional):** - 1. **ON LOCAL**: Executes the pause command only on the DataNode directly connected to the client. - 2. **ON CLUSTER**: Executes the pause command on all DataNodes in the cluster. - 3. **Default:** `ON CLUSTER`. - -**Example**: - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. Query Termination - -### 5.1 Terminating Queries - -**Description**: Terminates one or more running queries. - -**Syntax**: - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**Parameters**: - -1. **QUERY** **queryId:** Specifies the ID of the query to terminate. - -- To obtain the `queryId`, use the `SHOW QUERIES` command. - -2. **ALL QUERIES:** Terminates all currently running queries. - -**Example**: - -Terminate a specific query: - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -``` - -Terminate all queries: - -```SQL -IoTDB> KILL ALL QUERIES; -``` \ No newline at end of file +--> \ No newline at end of file diff --git a/src/UserGuide/latest/User-Manual/Maintenance-commands_apache.md b/src/UserGuide/latest/User-Manual/Maintenance-commands_apache.md new file mode 100644 index 000000000..e72ce52fe --- /dev/null +++ b/src/UserGuide/latest/User-Manual/Maintenance-commands_apache.md @@ -0,0 +1,548 @@ + +# Maintenance Statement + +## 1. Status Checking + +### 1.1 Viewing the Connected Model + +**Description**: Returns the current SQL dialect mode (`Tree` or `Table`). + +**Syntax**: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result:** + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 Viewing the Cluster Version + +**Description**: Returns the current cluster version. + +**Syntax**: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW VERSION; +``` + +**Result**: + +```Plain ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 Viewing Cluster Key Parameters + +**Description**: Returns key parameters of the current cluster. + +**Syntax**: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +Key Parameters: + +1. **ClusterName**: The name of the current cluster. +2. **DataReplicationFactor**: Number of data replicas per DataRegion. +3. **SchemaReplicationFactor**: Number of schema replicas per SchemaRegion. +4. **DataRegionConsensusProtocolClass**: Consensus protocol class for DataRegions. +5. **SchemaRegionConsensusProtocolClass**: Consensus protocol class for SchemaRegions. +6. **ConfigNodeConsensusProtocolClass**: Consensus protocol class for ConfigNodes. +7. **TimePartitionOrigin**: The starting timestamp of database time partitions. +8. **TimePartitionInterval**: The interval of database time partitions (in milliseconds). +9. **ReadConsistencyLevel**: The consistency level for read operations. +10. **SchemaRegionPerDataNode**: Number of SchemaRegions per DataNode. +11. **DataRegionPerDataNode**: Number of DataRegions per DataNode. +12. **SeriesSlotNum**: Number of SeriesSlots per DataRegion. +13. **SeriesSlotExecutorClass**: Implementation class for SeriesSlots. +14. **DiskSpaceWarningThreshold**: Disk space warning threshold (in percentage). +15. **TimestampPrecision**: Timestamp precision. + +**Example**: + +```SQL +IoTDB> SHOW VARIABLES; +``` + +**Result**: + +```Plain ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 Viewing the Current Timestamp of Database + +**Description**: Returns the current timestamp of the database. + +**Syntax**: + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP; +``` + +**Result**: + +```Plain ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 Viewing Executing Queries + +**Description**: Displays information about all currently executing queries. + +**Syntax**: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Parameters**: + +1. **WHERE Clause**: Filters the result set based on specified conditions. +2. **ORDER BY Clause**: Sorts the result set based on specified columns. +3. **limitOffsetClause**: Limits the number of rows returned. + 1. Format: `LIMIT , `. + +**Columns in QUERIES Table**: + +- **time**: Timestamp when the query started. +- **queryid**: Unique ID of the query. +- **datanodeid**: ID of the DataNode executing the query. +- **elapsedtime**: Time elapsed since the query started (in seconds). +- **statement**: The SQL statement being executed. + +**Example**: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +**Result**: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + +### 1.6 Viewing Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 Viewing Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8-beta. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Setting + +### 2.1 Setting the Connected Model + +**Description**: Sets the current SQL dialect mode to `Tree` or `Table` which can be used in both tree and table modes. + +**Syntax**: + +```SQL +SET SQL_DIALECT = (TABLE | TREE); +``` + +**Example**: + +```SQL +IoTDB> SET SQL_DIALECT=TREE; +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result**: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 Updating Configuration Items + +**Description**: Updates configuration items. Changes take effect immediately without restarting if the items support hot modification. + +**Syntax**: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Parameters**: + +1. **propertyAssignments**: A list of properties to update. + 1. Format: `property (',' property)*`. + 2. Values: + - `DEFAULT`: Resets the configuration to its default value. + - `expression`: A specific value (must be a string). +2. **ON INTEGER_VALUE** **(Optional):** Specifies the node ID to update. + 1. If not specified or set to a negative value, updates all ConfigNodes and DataNodes. + +**Example**: + +```SQL +IoTDB> SET CONFIGURATION ‘disk_space_warning_threshold’='0.05',‘heartbeat_interval_in_ms’='1000' ON 1; +``` + +### 2.3 Loading Manually Modified Configuration Files + +**Description**: Loads manually modified configuration files and hot-loads the changes. Configuration items that support hot modification take effect immediately. + +**Syntax**: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. Specifies the scope of configuration loading. + 2. Default: `CLUSTER`. + 3. Values: + - `LOCAL`: Loads configuration only on the DataNode directly connected to the client. + - `CLUSTER`: Loads configuration on all DataNodes in the cluster. + +**Example**: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Setting the System Status + +**Description**: Sets the system status to either `READONLY` or `RUNNING`. + +**Syntax**: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **RUNNING |** **READONLY**: + 1. **RUNNING**: Sets the system to running mode, allowing both read and write operations. + 2. **READONLY**: Sets the system to read-only mode, allowing only read operations and prohibiting writes. +2. **localOrClusterMode** **(Optional):** + 1. **LOCAL**: Applies the status change only to the DataNode directly connected to the client. + 2. **CLUSTER**: Applies the status change to all DataNodes in the cluster. + 3. **Default**: `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flushing Data from Memory to Disk + +**Description**: Flushes data from the memory table to disk. + +**Syntax**: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **identifier** **(Optional):** + 1. Specifies the name of the path to flush. + 2. If not specified, all paths are flushed. + 3. **Multiple Paths**: Multiple path names can be specified, separated by commas (e.g., `FLUSH root.ln, root.lnm.**`). +2. **booleanValue** **(****Optional****)**: + 1. Specifies the type of data to flush. + 2. **TRUE**: Flushes only the sequential memory table. + 3. **FALSE**: Flushes only the unsequential MemTable. + 4. **Default**: Flushes both sequential and unsequential memory tables. +3. **localOrClusterMode** **(****Optional****)**: + 1. **ON LOCAL**: Flushes only the memory tables on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Flushes memory tables on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Starting Background Scan and Repair of TsFiles + +**Description**: Starts a background task to scan and repair TsFiles, fixing issues such as timestamp disorder within data files. + +**Syntax**: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode(Optional)**: + 1. **ON LOCAL**: Executes the repair task only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the repair task on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pausing Background TsFile Repair Task + +**Description**: Pauses the background repair task. The paused task can be resumed by executing the `START REPAIR DATA` command again. + +**Syntax**: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. **ON LOCAL**: Executes the pause command only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the pause command on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Termination + +### 5.1 Terminating Queries + +**Description**: Terminates one or more running queries. + +**Syntax**: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Parameters**: + +1. **QUERY** **queryId:** Specifies the ID of the query to terminate. + +- To obtain the `queryId`, use the `SHOW QUERIES` command. + +2. **ALL QUERIES:** Terminates all currently running queries. + +**Example**: + +Terminate a specific query: + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; +``` + +Terminate all queries: + +```SQL +IoTDB> KILL ALL QUERIES; +``` \ No newline at end of file diff --git a/src/UserGuide/latest/User-Manual/Maintenance-commands_timecho.md b/src/UserGuide/latest/User-Manual/Maintenance-commands_timecho.md new file mode 100644 index 000000000..78ab71b7f --- /dev/null +++ b/src/UserGuide/latest/User-Manual/Maintenance-commands_timecho.md @@ -0,0 +1,548 @@ + +# Maintenance Statement + +## 1. Status Checking + +### 1.1 Viewing the Connected Model + +**Description**: Returns the current SQL dialect mode (`Tree` or `Table`). + +**Syntax**: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result:** + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 Viewing the Cluster Version + +**Description**: Returns the current cluster version. + +**Syntax**: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW VERSION; +``` + +**Result**: + +```Plain ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 Viewing Cluster Key Parameters + +**Description**: Returns key parameters of the current cluster. + +**Syntax**: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +Key Parameters: + +1. **ClusterName**: The name of the current cluster. +2. **DataReplicationFactor**: Number of data replicas per DataRegion. +3. **SchemaReplicationFactor**: Number of schema replicas per SchemaRegion. +4. **DataRegionConsensusProtocolClass**: Consensus protocol class for DataRegions. +5. **SchemaRegionConsensusProtocolClass**: Consensus protocol class for SchemaRegions. +6. **ConfigNodeConsensusProtocolClass**: Consensus protocol class for ConfigNodes. +7. **TimePartitionOrigin**: The starting timestamp of database time partitions. +8. **TimePartitionInterval**: The interval of database time partitions (in milliseconds). +9. **ReadConsistencyLevel**: The consistency level for read operations. +10. **SchemaRegionPerDataNode**: Number of SchemaRegions per DataNode. +11. **DataRegionPerDataNode**: Number of DataRegions per DataNode. +12. **SeriesSlotNum**: Number of SeriesSlots per DataRegion. +13. **SeriesSlotExecutorClass**: Implementation class for SeriesSlots. +14. **DiskSpaceWarningThreshold**: Disk space warning threshold (in percentage). +15. **TimestampPrecision**: Timestamp precision. + +**Example**: + +```SQL +IoTDB> SHOW VARIABLES; +``` + +**Result**: + +```Plain ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 Viewing the Current Timestamp of Database + +**Description**: Returns the current timestamp of the database. + +**Syntax**: + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP; +``` + +**Result**: + +```Plain ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 Viewing Executing Queries + +**Description**: Displays information about all currently executing queries. + +**Syntax**: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**Parameters**: + +1. **WHERE Clause**: Filters the result set based on specified conditions. +2. **ORDER BY Clause**: Sorts the result set based on specified columns. +3. **limitOffsetClause**: Limits the number of rows returned. + 1. Format: `LIMIT , `. + +**Columns in QUERIES Table**: + +- **time**: Timestamp when the query started. +- **queryid**: Unique ID of the query. +- **datanodeid**: ID of the DataNode executing the query. +- **elapsedtime**: Time elapsed since the query started (in seconds). +- **statement**: The SQL statement being executed. + +**Example**: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +**Result**: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + +### 1.6 Viewing Region Information + +**Description**: Displays regions' information of the current cluster. + +**Syntax**: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW REGIONS +``` + +**Result**: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 Viewing Available Nodes + +**Description**: Returns the RPC addresses and ports of all available DataNodes in the current cluster. Note: A DataNode is considered "available" if it is not in the REMOVING state. + +> This feature is supported starting from v2.0.8. + +**Syntax**: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +**Example**: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +**Result**: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. Status Setting + +### 2.1 Setting the Connected Model + +**Description**: Sets the current SQL dialect mode to `Tree` or `Table` which can be used in both tree and table modes. + +**Syntax**: + +```SQL +SET SQL_DIALECT = (TABLE | TREE); +``` + +**Example**: + +```SQL +IoTDB> SET SQL_DIALECT=TREE; +IoTDB> SHOW CURRENT_SQL_DIALECT; +``` + +**Result**: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 Updating Configuration Items + +**Description**: Updates configuration items. Changes take effect immediately without restarting if the items support hot modification. + +**Syntax**: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**Parameters**: + +1. **propertyAssignments**: A list of properties to update. + 1. Format: `property (',' property)*`. + 2. Values: + - `DEFAULT`: Resets the configuration to its default value. + - `expression`: A specific value (must be a string). +2. **ON INTEGER_VALUE** **(Optional):** Specifies the node ID to update. + 1. If not specified or set to a negative value, updates all ConfigNodes and DataNodes. + +**Example**: + +```SQL +IoTDB> SET CONFIGURATION ‘disk_space_warning_threshold’='0.05',‘heartbeat_interval_in_ms’='1000' ON 1; +``` + +### 2.3 Loading Manually Modified Configuration Files + +**Description**: Loads manually modified configuration files and hot-loads the changes. Configuration items that support hot modification take effect immediately. + +**Syntax**: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. Specifies the scope of configuration loading. + 2. Default: `CLUSTER`. + 3. Values: + - `LOCAL`: Loads configuration only on the DataNode directly connected to the client. + - `CLUSTER`: Loads configuration on all DataNodes in the cluster. + +**Example**: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 Setting the System Status + +**Description**: Sets the system status to either `READONLY` or `RUNNING`. + +**Syntax**: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **RUNNING |** **READONLY**: + 1. **RUNNING**: Sets the system to running mode, allowing both read and write operations. + 2. **READONLY**: Sets the system to read-only mode, allowing only read operations and prohibiting writes. +2. **localOrClusterMode** **(Optional):** + 1. **LOCAL**: Applies the status change only to the DataNode directly connected to the client. + 2. **CLUSTER**: Applies the status change to all DataNodes in the cluster. + 3. **Default**: `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. Data Management + +### 3.1 Flushing Data from Memory to Disk + +**Description**: Flushes data from the memory table to disk. + +**Syntax**: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **identifier** **(Optional):** + 1. Specifies the name of the path to flush. + 2. If not specified, all paths are flushed. + 3. **Multiple Paths**: Multiple path names can be specified, separated by commas (e.g., `FLUSH root.ln, root.lnm.**`). +2. **booleanValue** **(****Optional****)**: + 1. Specifies the type of data to flush. + 2. **TRUE**: Flushes only the sequential memory table. + 3. **FALSE**: Flushes only the unsequential MemTable. + 4. **Default**: Flushes both sequential and unsequential memory tables. +3. **localOrClusterMode** **(****Optional****)**: + 1. **ON LOCAL**: Flushes only the memory tables on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Flushes memory tables on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. Data Repair + +### 4.1 Starting Background Scan and Repair of TsFiles + +**Description**: Starts a background task to scan and repair TsFiles, fixing issues such as timestamp disorder within data files. + +**Syntax**: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode(Optional)**: + 1. **ON LOCAL**: Executes the repair task only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the repair task on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 Pausing Background TsFile Repair Task + +**Description**: Pauses the background repair task. The paused task can be resumed by executing the `START REPAIR DATA` command again. + +**Syntax**: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**Parameters**: + +1. **localOrClusterMode** **(Optional):** + 1. **ON LOCAL**: Executes the pause command only on the DataNode directly connected to the client. + 2. **ON CLUSTER**: Executes the pause command on all DataNodes in the cluster. + 3. **Default:** `ON CLUSTER`. + +**Example**: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. Query Termination + +### 5.1 Terminating Queries + +**Description**: Terminates one or more running queries. + +**Syntax**: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**Parameters**: + +1. **QUERY** **queryId:** Specifies the ID of the query to terminate. + +- To obtain the `queryId`, use the `SHOW QUERIES` command. + +2. **ALL QUERIES:** Terminates all currently running queries. + +**Example**: + +Terminate a specific query: + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; +``` + +Terminate all queries: + +```SQL +IoTDB> KILL ALL QUERIES; +``` \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md b/src/zh/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md index e71d71f94..bb3134ee6 100644 --- a/src/zh/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md +++ b/src/zh/UserGuide/Master/Table/API/Programming-Java-Native-API_timecho.md @@ -68,7 +68,7 @@ ITableSession接口定义了与IoTDB交互的基本操作,可以执行数据 **关于 Object 数据类型的说明:** -自 V2.0.8-beta 起,`iTableSession.insert(Tablet tablet)`接口支持将单个 Object 类文件拆成多段后按顺序分段写入。当 Tablet 数据结构中列数据类型为 **`TSDataType.Object`​ ​**时,需要使用如下方法向 Tablet 填值。 +自 V2.0.8 起,`iTableSession.insert(Tablet tablet)`接口支持将单个 Object 类文件拆成多段后按顺序分段写入。当 Tablet 数据结构中列数据类型为 **`TSDataType.Object`​ ​**时,需要使用如下方法向 Tablet 填值。 ```Java /* diff --git a/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type.md index 1f2347c2c..49bc408ab 100644 --- a/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type.md +++ b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type.md @@ -1,3 +1,6 @@ +--- +redirectTo: Data-Type_apache.html +--- - -# 数据类型 - -## 1. 基本数据类型 - -IoTDB 支持以下十种数据类型: - -* BOOLEAN(布尔值) -* INT32(整型) -* INT64(长整型) -* FLOAT(单精度浮点数) -* DOUBLE(双精度浮点数) -* TEXT(长字符串) -* STRING(字符串) -* BLOB(大二进制对象) -* OBJECT(大二进制对象) - > V2.0.8-beta 版本起支持 -* TIMESTAMP(时间戳) -* DATE(日期) - -其中: -1. STRING 和 TEXT 类型的区别在于,STRING 类型具有更多的统计信息,能够用于优化值过滤查询。TEXT 类型适合用于存储长字符串。 -2. OBJECT 和 BLOB 类型的区别如下: - - | | **OBJECT** | **BLOB** | - | ---------------------- |-------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- | - | 写放大(越低越好) | 低(写放大系数永远为 1) | 高(写放大系数为 2 + 合并次数) | - | 空间放大(越低越好) | 低(merge & release on write) | 高(merge on read and release on compact) | - | 查询结果 | 默认查询 OBJECT 列时,返回结果如`(Object) XX.XX KB)`。
真实 OBJECT 数据存储路径位于:`${data_dir}/object_data`,可通过 `READ_OBJECT` 函数读取其真实内容 | 直接返回真实的二进制内容 | - - -### 1.1 浮点数精度配置 - -对于 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 `RLE`或 `TS_2DIFF`,可以在创建序列时通过 `MAX_POINT_NUMBER` 属性指定浮点数的小数点后位数。 - -例如, -```sql -CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; -``` - -若不指定,系统会按照配置文件 `iotdb-system.properties` 中的 [float_precision](../Reference/System-Config-Manual.md) 项配置(默认为 2 位)。 - -### 1.2 数据类型兼容性 - -当写入数据的类型与序列注册的数据类型不一致时, -- 如果序列数据类型不兼容写入数据类型,系统会给出错误提示。 -- 如果序列数据类型兼容写入数据类型,系统会进行数据类型的自动转换,将写入的数据类型更正为注册序列的类型。 - -各数据类型的兼容情况如下表所示: - -| 序列数据类型 | 支持的写入数据类型 | -|-----------|------------------------------------| -| BOOLEAN | BOOLEAN | -| INT32 | INT32 | -| INT64 | INT32 INT64 TIMESTAMP | -| FLOAT | INT32 FLOAT | -| DOUBLE | INT32 INT64 FLOAT DOUBLE TIMESTAMP | -| TEXT | TEXT STRING | -| STRING | TEXT STRING | -| BLOB | TEXT STRING BLOB | -| OBJECT | OBJECT | -| TIMESTAMP | INT32 INT64 TIMESTAMP | -| DATE | DATE | - -## 2. 时间戳类型 - -时间戳是一个数据到来的时间点,其中包括绝对时间戳和相对时间戳。 - -### 2.1 绝对时间戳 - -IOTDB 中绝对时间戳分为二种,一种为 LONG 类型,一种为 DATETIME 类型(包含 DATETIME-INPUT, DATETIME-DISPLAY 两个小类)。 - -在用户在输入时间戳时,可以使用 LONG 类型的时间戳或 DATETIME-INPUT 类型的时间戳,其中 DATETIME-INPUT 类型的时间戳支持格式如表所示: - -
- -**DATETIME-INPUT 类型支持格式** - - -| format | -| :--------------------------- | -| yyyy-MM-dd HH:mm:ss | -| yyyy/MM/dd HH:mm:ss | -| yyyy.MM.dd HH:mm:ss | -| yyyy-MM-dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ssZZ | -| yyyy.MM.dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSS | -| yyyy.MM.dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSSZZ | -| yyyy/MM/dd HH:mm:ss.SSSZZ | -| yyyy.MM.dd HH:mm:ss.SSSZZ | -| ISO8601 standard time format | - - -
- - -IoTDB 在显示时间戳时可以支持 LONG 类型以及 DATETIME-DISPLAY 类型,其中 DATETIME-DISPLAY 类型可以支持用户自定义时间格式。自定义时间格式的语法如表所示: - -
- -**DATETIME-DISPLAY 自定义时间格式的语法** - - -| Symbol | Meaning | Presentation | Examples | -| :----: | :-------------------------: | :----------: | :--------------------------------: | -| G | era | era | era | -| C | century of era (>=0) | number | 20 | -| Y | year of era (>=0) | year | 1996 | -| | | | | -| x | weekyear | year | 1996 | -| w | week of weekyear | number | 27 | -| e | day of week | number | 2 | -| E | day of week | text | Tuesday; Tue | -| | | | | -| y | year | year | 1996 | -| D | day of year | number | 189 | -| M | month of year | month | July; Jul; 07 | -| d | day of month | number | 10 | -| | | | | -| a | halfday of day | text | PM | -| K | hour of halfday (0~11) | number | 0 | -| h | clockhour of halfday (1~12) | number | 12 | -| | | | | -| H | hour of day (0~23) | number | 0 | -| k | clockhour of day (1~24) | number | 24 | -| m | minute of hour | number | 30 | -| s | second of minute | number | 55 | -| S | fraction of second | millis | 978 | -| | | | | -| z | time zone | text | Pacific Standard Time; PST | -| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | -| | | | | -| ' | escape for text | delimiter | | -| '' | single quote | literal | ' | - -
- -### 2.2 相对时间戳 - - 相对时间是指与服务器时间```now()```和```DATETIME```类型时间相差一定时间间隔的时间。 - 形式化定义为: - - ``` - Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ - RelativeTime = (now() | DATETIME) ((+|-) Duration)+ - ``` - -
- - **The syntax of the duration unit** - - - | Symbol | Meaning | Presentation | Examples | - | :----: | :---------: | :----------------------: | :------: | - | y | year | 1y=365 days | 1y | - | mo | month | 1mo=30 days | 1mo | - | w | week | 1w=7 days | 1w | - | d | day | 1d=1 day | 1d | - | | | | | - | h | hour | 1h=3600 seconds | 1h | - | m | minute | 1m=60 seconds | 1m | - | s | second | 1s=1 second | 1s | - | | | | | - | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | - | us | microsecond | 1us=1000 nanoseconds | 1us | - | ns | nanosecond | 1ns=1 nanosecond | 1ns | - -
- - 例子: - - ``` - now() - 1d2h //比服务器时间早 1 天 2 小时的时间 - now() - 1w //比服务器时间早 1 周的时间 - ``` - - > 注意:'+'和'-'的左右两边必须有空格 \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type_apache.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type_apache.md new file mode 100644 index 000000000..1ee9da48d --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type_apache.md @@ -0,0 +1,200 @@ + + +# 数据类型 + +## 1. 基本数据类型 + +IoTDB 支持以下十种数据类型: + +* BOOLEAN(布尔值) +* INT32(整型) +* INT64(长整型) +* FLOAT(单精度浮点数) +* DOUBLE(双精度浮点数) +* TEXT(长字符串) +* STRING(字符串) +* BLOB(大二进制对象) +* OBJECT(大二进制对象) + > V2.0.8-beta 版本起支持 +* TIMESTAMP(时间戳) +* DATE(日期) + +其中: +1. STRING 和 TEXT 类型的区别在于,STRING 类型具有更多的统计信息,能够用于优化值过滤查询。TEXT 类型适合用于存储长字符串。 +2. OBJECT 和 BLOB 类型的区别如下: + + | | **OBJECT** | **BLOB** | + | ---------------------- |-------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- | + | 写放大(越低越好) | 低(写放大系数永远为 1) | 高(写放大系数为 2 + 合并次数) | + | 空间放大(越低越好) | 低(merge & release on write) | 高(merge on read and release on compact) | + | 查询结果 | 默认查询 OBJECT 列时,返回结果如`(Object) XX.XX KB)`。
真实 OBJECT 数据存储路径位于:`${data_dir}/object_data`,可通过 `READ_OBJECT` 函数读取其真实内容 | 直接返回真实的二进制内容 | + + +### 1.1 浮点数精度配置 + +对于 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 `RLE`或 `TS_2DIFF`,可以在创建序列时通过 `MAX_POINT_NUMBER` 属性指定浮点数的小数点后位数。 + +例如, +```sql +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +若不指定,系统会按照配置文件 `iotdb-system.properties` 中的 [float_precision](../Reference/System-Config-Manual_apache.md) 项配置(默认为 2 位)。 + +### 1.2 数据类型兼容性 + +当写入数据的类型与序列注册的数据类型不一致时, +- 如果序列数据类型不兼容写入数据类型,系统会给出错误提示。 +- 如果序列数据类型兼容写入数据类型,系统会进行数据类型的自动转换,将写入的数据类型更正为注册序列的类型。 + +各数据类型的兼容情况如下表所示: + +| 序列数据类型 | 支持的写入数据类型 | +|-----------|------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32 INT64 TIMESTAMP | +| FLOAT | INT32 FLOAT | +| DOUBLE | INT32 INT64 FLOAT DOUBLE TIMESTAMP | +| TEXT | TEXT STRING | +| STRING | TEXT STRING | +| BLOB | TEXT STRING BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32 INT64 TIMESTAMP | +| DATE | DATE | + +## 2. 时间戳类型 + +时间戳是一个数据到来的时间点,其中包括绝对时间戳和相对时间戳。 + +### 2.1 绝对时间戳 + +IOTDB 中绝对时间戳分为二种,一种为 LONG 类型,一种为 DATETIME 类型(包含 DATETIME-INPUT, DATETIME-DISPLAY 两个小类)。 + +在用户在输入时间戳时,可以使用 LONG 类型的时间戳或 DATETIME-INPUT 类型的时间戳,其中 DATETIME-INPUT 类型的时间戳支持格式如表所示: + +
+ +**DATETIME-INPUT 类型支持格式** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ + +IoTDB 在显示时间戳时可以支持 LONG 类型以及 DATETIME-DISPLAY 类型,其中 DATETIME-DISPLAY 类型可以支持用户自定义时间格式。自定义时间格式的语法如表所示: + +
+ +**DATETIME-DISPLAY 自定义时间格式的语法** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 相对时间戳 + + 相对时间是指与服务器时间```now()```和```DATETIME```类型时间相差一定时间间隔的时间。 + 形式化定义为: + + ``` + Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ + RelativeTime = (now() | DATETIME) ((+|-) Duration)+ + ``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ + 例子: + + ``` + now() - 1d2h //比服务器时间早 1 天 2 小时的时间 + now() - 1w //比服务器时间早 1 周的时间 + ``` + + > 注意:'+'和'-'的左右两边必须有空格 \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type_timecho.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type_timecho.md new file mode 100644 index 000000000..be817a3f1 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Type_timecho.md @@ -0,0 +1,200 @@ + + +# 数据类型 + +## 1. 基本数据类型 + +IoTDB 支持以下十种数据类型: + +* BOOLEAN(布尔值) +* INT32(整型) +* INT64(长整型) +* FLOAT(单精度浮点数) +* DOUBLE(双精度浮点数) +* TEXT(长字符串) +* STRING(字符串) +* BLOB(大二进制对象) +* OBJECT(大二进制对象) + > V2.0.8 版本起支持 +* TIMESTAMP(时间戳) +* DATE(日期) + +其中: +1. STRING 和 TEXT 类型的区别在于,STRING 类型具有更多的统计信息,能够用于优化值过滤查询。TEXT 类型适合用于存储长字符串。 +2. OBJECT 和 BLOB 类型的区别如下: + + | | **OBJECT** | **BLOB** | + | ---------------------- |-------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- | + | 写放大(越低越好) | 低(写放大系数永远为 1) | 高(写放大系数为 2 + 合并次数) | + | 空间放大(越低越好) | 低(merge & release on write) | 高(merge on read and release on compact) | + | 查询结果 | 默认查询 OBJECT 列时,返回结果如`(Object) XX.XX KB)`。
真实 OBJECT 数据存储路径位于:`${data_dir}/object_data`,可通过 `READ_OBJECT` 函数读取其真实内容 | 直接返回真实的二进制内容 | + + +### 1.1 浮点数精度配置 + +对于 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 `RLE`或 `TS_2DIFF`,可以在创建序列时通过 `MAX_POINT_NUMBER` 属性指定浮点数的小数点后位数。 + +例如, +```sql +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +若不指定,系统会按照配置文件 `iotdb-system.properties` 中的 [float_precision](../Reference/System-Config-Manual_timecho.md) 项配置(默认为 2 位)。 + +### 1.2 数据类型兼容性 + +当写入数据的类型与序列注册的数据类型不一致时, +- 如果序列数据类型不兼容写入数据类型,系统会给出错误提示。 +- 如果序列数据类型兼容写入数据类型,系统会进行数据类型的自动转换,将写入的数据类型更正为注册序列的类型。 + +各数据类型的兼容情况如下表所示: + +| 序列数据类型 | 支持的写入数据类型 | +|-----------|------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32 INT64 TIMESTAMP | +| FLOAT | INT32 FLOAT | +| DOUBLE | INT32 INT64 FLOAT DOUBLE TIMESTAMP | +| TEXT | TEXT STRING | +| STRING | TEXT STRING | +| BLOB | TEXT STRING BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32 INT64 TIMESTAMP | +| DATE | DATE | + +## 2. 时间戳类型 + +时间戳是一个数据到来的时间点,其中包括绝对时间戳和相对时间戳。 + +### 2.1 绝对时间戳 + +IOTDB 中绝对时间戳分为二种,一种为 LONG 类型,一种为 DATETIME 类型(包含 DATETIME-INPUT, DATETIME-DISPLAY 两个小类)。 + +在用户在输入时间戳时,可以使用 LONG 类型的时间戳或 DATETIME-INPUT 类型的时间戳,其中 DATETIME-INPUT 类型的时间戳支持格式如表所示: + +
+ +**DATETIME-INPUT 类型支持格式** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ + +IoTDB 在显示时间戳时可以支持 LONG 类型以及 DATETIME-DISPLAY 类型,其中 DATETIME-DISPLAY 类型可以支持用户自定义时间格式。自定义时间格式的语法如表所示: + +
+ +**DATETIME-DISPLAY 自定义时间格式的语法** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 相对时间戳 + + 相对时间是指与服务器时间```now()```和```DATETIME```类型时间相差一定时间间隔的时间。 + 形式化定义为: + + ``` + Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ + RelativeTime = (now() | DATETIME) ((+|-) Duration)+ + ``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ + 例子: + + ``` + now() - 1d2h //比服务器时间早 1 天 2 小时的时间 + now() - 1w //比服务器时间早 1 周的时间 + ``` + + > 注意:'+'和'-'的左右两边必须有空格 \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md index 811f816d5..f43389f74 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_apache.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md index 53309d6aa..48c47bc3d 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_timecho.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md index f5190f8ed..6e7bb9e8f 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -293,7 +293,7 @@ It costs 0.014s 自为了避免单个 Object 过大导致写入请求过大,Object 类型的值支持拆分后按顺序分段写入。SQL 中需要使用 `to_object(isEOF, offset, content)` 函数进行值填充。 -> V2.0.8-beta 版本起支持 +> V2.0.8 版本起支持 **语法:** diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_apache.md index 5ca9b5462..2766e74a6 100644 --- a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_apache.md +++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_apache.md @@ -146,7 +146,7 @@ -> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual.md#_2-2-datanode-env-sh-bat) +> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual_apache.md#_2-2-datanode-env-sh-bat) ## 3. 存储(磁盘) ### 3.1 存储空间 diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_timecho.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_timecho.md index 0934dbcd7..4bf223c79 100644 --- a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_timecho.md +++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Database-Resources_timecho.md @@ -146,7 +146,7 @@ -> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual.md#_2-2-datanode-env-sh-bat) +> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual_timecho.md#_2-2-datanode-env-sh-bat) ## 3. 存储(磁盘) ### 3.1 存储空间 diff --git a/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual.md b/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual.md index 672829976..660b55b42 100644 --- a/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual.md +++ b/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual.md @@ -1,3 +1,6 @@ +--- +redirectTo: System-Config-Manual_apache.html +--- - -# 配置参数 - -IoTDB 配置文件位于 IoTDB 安装目录:`conf`文件夹下。 - -- `confignode-env.sh/bat`:环境配置项的配置文件,可以配置 ConfigNode 的内存大小。 -- `datanode-env.sh/bat`:环境配置项的配置文件,可以配置 DataNode 的内存大小。 -- `iotdb-system.properties`:IoTDB 的配置文件。 -- `iotdb-system.properties.template`:IoTDB 的配置文件模版。 - -## 1. 修改配置: - -在 `iotdb-system.properties` 文件中已存在的参数可以直接进行修改。对于那些在 `iotdb-system.properties` 中未列出的参数,可以从 `iotdb-system.properties.template` 配置文件模板中找到相应的参数,然后将其复制到 `iotdb-system.properties` 文件中进行修改。 - -### 1.1 改后生效方式 - -不同的配置参数有不同的生效方式,分为以下三种: - -- 仅允许在第一次启动服务前修改: 在第一次启动 ConfigNode/DataNode 后即禁止修改,修改会导致 ConfigNode/DataNode 无法启动。 -- 重启服务生效: ConfigNode/DataNode 启动后仍可修改,但需要重启 ConfigNode/DataNode 后才生效。 -- 热加载: 可在 ConfigNode/DataNode 运行时修改,修改后通过 Session 或 Cli 发送 `load configuration` 或 `set configuration key1 = 'value1'` 命令(SQL)至 IoTDB 使配置生效。 - -## 2. 环境配置项 - -### 2.1 confignode-env.sh/bat - -环境配置项主要用于对 ConfigNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。ConfigNode 启动时,此部分配置会被传给 JVM,详细配置项说明如下: - -- MEMORY_SIZE - -| 名字 | MEMORY_SIZE | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB ConfigNode 启动时分配的内存大小 | -| 类型 | String | -| 默认值 | 取决于操作系统和机器配置。默认为机器内存的十分之三,最多会被设置为 16G。 | -| 改后生效方式 | 重启服务生效 | - -- ON_HEAP_MEMORY - -| 名字 | ON_HEAP_MEMORY | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB ConfigNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置。 | -| 改后生效方式 | 重启服务生效 | - -- OFF_HEAP_MEMORY - -| 名字 | OFF_HEAP_MEMORY | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB ConfigNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置。 | -| 改后生效方式 | 重启服务生效 | - -### 2.2 datanode-env.sh/bat - -环境配置项主要用于对 DataNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。DataNode/Standalone 启动时,此部分配置会被传给 JVM,详细配置项说明如下: - -- MEMORY_SIZE - -| 名字 | MEMORY_SIZE | -| ------------ | ---------------------------------------------------- | -| 描述 | IoTDB DataNode 启动时分配的内存大小 | -| 类型 | String | -| 默认值 | 取决于操作系统和机器配置。默认为机器内存的二分之一。 | -| 改后生效方式 | 重启服务生效 | - -- ON_HEAP_MEMORY - -| 名字 | ON_HEAP_MEMORY | -| ------------ | ---------------------------------------------------------- | -| 描述 | IoTDB DataNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置。 | -| 改后生效方式 | 重启服务生效 | - -- OFF_HEAP_MEMORY - -| 名字 | OFF_HEAP_MEMORY | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB DataNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置 | -| 改后生效方式 | 重启服务生效 | - - -## 3. 系统配置项(iotdb-system.properties.template) - -### 3.1 集群管理 - -- cluster_name - -| 名字 | cluster_name | -| -------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 集群名称 | -| 类型 | String | -| 默认值 | default_cluster | -| 修改方式 | CLI 中执行语句 `set configuration cluster_name = 'xxx'` (xxx为希望修改成的集群名称) | -| 注意 | 此修改通过网络分发至每个节点。在网络波动或者有节点宕机的情况下,不保证能够在全部节点修改成功。未修改成功的节点重启时无法加入集群,此时需要手动修改该节点的配置文件中的cluster_name项,再重启。正常情况下,不建议通过手动修改配置文件的方式修改集群名称,不建议通过`load configuration`的方式热加载。 | - -### 3.2 SeedConfigNode 配置 - -- cn_seed_config_node - -| 名字 | cn_seed_config_node | -| ------------ | ------------------------------------------------------------ | -| 描述 | 目标 ConfigNode 地址,ConfigNode 通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 cn_target_config_node_list | -| 类型 | String | -| 默认值 | 127.0.0.1:10710 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_seed_config_node - -| 名字 | dn_seed_config_node | -| ------------ | ------------------------------------------------------------ | -| 描述 | ConfigNode 地址,DataNode 启动时通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 dn_target_config_node_list | -| 类型 | String | -| 默认值 | 127.0.0.1:10710 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -### 3.3 Node RPC 配置 - -- cn_internal_address - -| 名字 | cn_internal_address | -| ------------ | ---------------------------- | -| 描述 | ConfigNode 集群内部地址 | -| 类型 | String | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- cn_internal_port - -| 名字 | cn_internal_port | -| ------------ | ---------------------------- | -| 描述 | ConfigNode 集群服务监听端口 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 10710 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- cn_consensus_port - -| 名字 | cn_consensus_port | -| ------------ | ----------------------------- | -| 描述 | ConfigNode 的共识协议通信端口 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 10720 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_rpc_address - -| 名字 | dn_rpc_address | -| ------------ |----------------| -| 描述 | 客户端 RPC 服务监听地址 | -| 类型 | String | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_port - -| 名字 | dn_rpc_port | -| ------------ | ----------------------- | -| 描述 | Client RPC 服务监听端口 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 6667 | -| 改后生效方式 | 重启服务生效 | - -- dn_internal_address - -| 名字 | dn_internal_address | -| ------------ | ---------------------------- | -| 描述 | DataNode 内网通信地址 | -| 类型 | string | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_internal_port - -| 名字 | dn_internal_port | -| ------------ | ---------------------------- | -| 描述 | DataNode 内网通信端口 | -| 类型 | int | -| 默认值 | 10730 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_mpp_data_exchange_port - -| 名字 | dn_mpp_data_exchange_port | -| ------------ | ---------------------------- | -| 描述 | MPP 数据交换端口 | -| 类型 | int | -| 默认值 | 10740 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_schema_region_consensus_port - -| 名字 | dn_schema_region_consensus_port | -| ------------ | ------------------------------------- | -| 描述 | DataNode 元数据副本的共识协议通信端口 | -| 类型 | int | -| 默认值 | 10750 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_data_region_consensus_port - -| 名字 | dn_data_region_consensus_port | -| ------------ | ----------------------------------- | -| 描述 | DataNode 数据副本的共识协议通信端口 | -| 类型 | int | -| 默认值 | 10760 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_join_cluster_retry_interval_ms - -| 名字 | dn_join_cluster_retry_interval_ms | -| ------------ | --------------------------------- | -| 描述 | DataNode 再次重试加入集群等待时间 | -| 类型 | long | -| 默认值 | 5000 | -| 改后生效方式 | 重启服务生效 | - -### 3.4 副本配置 - -- config_node_consensus_protocol_class - -| 名字 | config_node_consensus_protocol_class | -| ------------ | ------------------------------------------------ | -| 描述 | ConfigNode 副本的共识协议,仅支持 RatisConsensus | -| 类型 | String | -| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- schema_replication_factor - -| 名字 | schema_replication_factor | -| ------------ | ---------------------------------- | -| 描述 | Database 的默认元数据副本数 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 重启服务后对**新的 Database** 生效 | - -- schema_region_consensus_protocol_class - -| 名字 | schema_region_consensus_protocol_class | -| ------------ | ----------------------------------------------------- | -| 描述 | 元数据副本的共识协议,多副本时只能使用 RatisConsensus | -| 类型 | String | -| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- data_replication_factor - -| 名字 | data_replication_factor | -| ------------ | ---------------------------------- | -| 描述 | Database 的默认数据副本数 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 重启服务后对**新的 Database** 生效 | - -- data_region_consensus_protocol_class - -| 名字 | data_region_consensus_protocol_class | -| ------------ | ------------------------------------------------------------ | -| 描述 | 数据副本的共识协议,多副本时可以使用 IoTConsensus 或 RatisConsensus | -| 类型 | String | -| 默认值 | org.apache.iotdb.consensus.iot.IoTConsensus | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -### 3.5 目录配置 - -- cn_system_dir - -| 名字 | cn_system_dir | -| ------------ | ----------------------------------------------------------- | -| 描述 | ConfigNode 系统数据存储路径 | -| 类型 | String | -| 默认值 | data/confignode/system(Windows:data\\configndoe\\system) | -| 改后生效方式 | 重启服务生效 | - -- cn_consensus_dir - -| 名字 | cn_consensus_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | ConfigNode 共识协议数据存储路径 | -| 类型 | String | -| 默认值 | data/confignode/consensus(Windows:data\\configndoe\\consensus) | -| 改后生效方式 | 重启服务生效 | - -- cn_pipe_receiver_file_dir - -| 名字 | cn_pipe_receiver_file_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | ConfigNode中pipe接收者用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | -| 改后生效方式 | 重启服务生效 | - -- dn_system_dir - -| 名字 | dn_system_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 元数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/system(Windows:data\\datanode\\system) | -| 改后生效方式 | 重启服务生效 | - -- dn_data_dirs - -| 名字 | dn_data_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/data(Windows:data\\datanode\\data) | -| 改后生效方式 | 重启服务生效 | - -- dn_multi_dir_strategy - -| 名字 | dn_multi_dir_strategy | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 在 data_dirs 中为 TsFile 选择目录时采用的策略。可使用简单类名或类名全称。系统提供以下三种策略:
1. SequenceStrategy:IoTDB 按顺序选择目录,依次遍历 data_dirs 中的所有目录,并不断轮循;
2. MaxDiskUsableSpaceFirstStrategy:IoTDB 优先选择 data_dirs 中对应磁盘空余空间最大的目录;
您可以通过以下方法完成用户自定义策略:
1. 继承 org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy 类并实现自身的 Strategy 方法;
2. 将实现的类的完整类名(包名加类名,UserDefineStrategyPackage)填写到该配置项;
3. 将该类 jar 包添加到工程中。 | -| 类型 | String | -| 默认值 | SequenceStrategy | -| 改后生效方式 | 热加载 | - -- dn_consensus_dir - -| 名字 | dn_consensus_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 共识层日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/consensus(Windows:data\\datanode\\consensus) | -| 改后生效方式 | 重启服务生效 | - -- dn_wal_dirs - -| 名字 | dn_wal_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 写前日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/wal(Windows:data\\datanode\\wal) | -| 改后生效方式 | 重启服务生效 | - -- dn_tracing_dir - -| 名字 | dn_tracing_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 追踪根目录路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | datanode/tracing(Windows:datanode\\tracing) | -| 改后生效方式 | 重启服务生效 | - -- dn_sync_dir - -| 名字 | dn_sync_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB sync 存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/sync(Windows:data\\datanode\\sync) | -| 改后生效方式 | 重启服务生效 | - -- sort_tmp_dir - -| 名字 | sort_tmp_dir | -| ------------ | ------------------------------------------------- | -| 描述 | 用于配置排序操作的临时目录。 | -| 类型 | String | -| 默认值 | data/datanode/tmp(Windows:data\\datanode\\tmp) | -| 改后生效方式 | 重启服务生效 | - -- dn_pipe_receiver_file_dirs - -| 名字 | dn_pipe_receiver_file_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | DataNode中pipe接收者用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | -| 改后生效方式 | 重启服务生效 | - -- iot_consensus_v2_receiver_file_dirs - -| 名字 | iot_consensus_v2_receiver_file_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTConsensus V2中接收者用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | -| 改后生效方式 | 重启服务生效 | - -- iot_consensus_v2_deletion_file_dir - -| 名字 | iot_consensus_v2_deletion_file_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTConsensus V2中删除操作用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | -| 改后生效方式 | 重启服务生效 | - -### 3.6 监控配置 - -- cn_metric_reporter_list - -| 名字 | cn_metric_reporter_list | -| ------------ | -------------------------------------------------- | -| 描述 | confignode中用于配置监控模块的数据需要报告的系统。 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- cn_metric_level - -| 名字 | cn_metric_level | -| ------------ | ------------------------------------------ | -| 描述 | confignode中控制监控模块收集数据的详细程度 | -| 类型 | String | -| 默认值 | IMPORTANT | -| 改后生效方式 | 重启服务生效 | - -- cn_metric_async_collect_period - -| 名字 | cn_metric_async_collect_period | -| ------------ | -------------------------------------------------- | -| 描述 | confignode中某些监控数据异步收集的周期,单位是秒。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- cn_metric_prometheus_reporter_port - -| 名字 | cn_metric_prometheus_reporter_port | -| ------------ | ------------------------------------------------------ | -| 描述 | confignode中Prometheus报告者用于监控数据报告的端口号。 | -| 类型 | int | -| 默认值 | 9091 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_reporter_list - -| 名字 | dn_metric_reporter_list | -| ------------ | ------------------------------------------------ | -| 描述 | DataNode中用于配置监控模块的数据需要报告的系统。 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_level - -| 名字 | dn_metric_level | -| ------------ | ---------------------------------------- | -| 描述 | DataNode中控制监控模块收集数据的详细程度 | -| 类型 | String | -| 默认值 | IMPORTANT | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_async_collect_period - -| 名字 | dn_metric_async_collect_period | -| ------------ | ------------------------------------------------ | -| 描述 | DataNode中某些监控数据异步收集的周期,单位是秒。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_prometheus_reporter_port - -| 名字 | dn_metric_prometheus_reporter_port | -| ------------ | ---------------------------------------------------- | -| 描述 | DataNode中Prometheus报告者用于监控数据报告的端口号。 | -| 类型 | int | -| 默认值 | 9092 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_internal_reporter_type - -| 名字 | dn_metric_internal_reporter_type | -| ------------ | ------------------------------------------------------------ | -| 描述 | DataNode中监控模块内部报告者的种类,用于内部监控和检查数据是否已经成功写入和刷新。 | -| 类型 | String | -| 默认值 | IOTDB | -| 改后生效方式 | 重启服务生效 | - -### 3.7 SSL 配置 - -- enable_thrift_ssl - -| 名字 | enable_thrift_ssl | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当enable_thrift_ssl配置为true时,将通过dn_rpc_port使用 SSL 加密进行通信 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- enable_https - -| 名字 | enable_https | -| ------------ | ------------------------------ | -| 描述 | REST Service 是否开启 SSL 配置 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- key_store_path - -| 名字 | key_store_path | -| ------------ | -------------- | -| 描述 | ssl证书路径 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- key_store_pwd - -| 名字 | key_store_pwd | -| ------------ | ------------- | -| 描述 | ssl证书密码 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -### 3.8 连接配置 - -- cn_rpc_thrift_compression_enable - -| 名字 | cn_rpc_thrift_compression_enable | -| ------------ | -------------------------------- | -| 描述 | 是否启用 thrift 的压缩机制。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- cn_rpc_max_concurrent_client_num - -| 名字 | cn_rpc_max_concurrent_client_num | -| ------------ |---------------------------------| -| 描述 | 最大连接数。 | -| 类型 | int | -| 默认值 | 3000 | -| 改后生效方式 | 重启服务生效 | - -- cn_connection_timeout_ms - -| 名字 | cn_connection_timeout_ms | -| ------------ | ------------------------ | -| 描述 | 节点连接超时时间 | -| 类型 | int | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- cn_selector_thread_nums_of_client_manager - -| 名字 | cn_selector_thread_nums_of_client_manager | -| ------------ | ----------------------------------------- | -| 描述 | 客户端异步线程管理的选择器线程数量 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- cn_max_client_count_for_each_node_in_client_manager - -| 名字 | cn_max_client_count_for_each_node_in_client_manager | -| ------------ | --------------------------------------------------- | -| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | -| 类型 | int | -| 默认值 | 300 | -| 改后生效方式 | 重启服务生效 | - -- dn_session_timeout_threshold - -| 名字 | dn_session_timeout_threshold | -| ------------ | ---------------------------- | -| 描述 | 最大的会话空闲时间 | -| 类型 | int | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_thrift_compression_enable - -| 名字 | dn_rpc_thrift_compression_enable | -| ------------ | -------------------------------- | -| 描述 | 是否启用 thrift 的压缩机制 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_advanced_compression_enable - -| 名字 | dn_rpc_advanced_compression_enable | -| ------------ | ---------------------------------- | -| 描述 | 是否启用 thrift 的自定制压缩机制 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_selector_thread_count - -| 名字 | rpc_selector_thread_count | -| ------------ | ------------------------- | -| 描述 | rpc 选择器线程数量 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_min_concurrent_client_num - -| 名字 | rpc_min_concurrent_client_num | -| ------------ | ----------------------------- | -| 描述 | 最小连接数 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_max_concurrent_client_num - -| 名字 | dn_rpc_max_concurrent_client_num | -| ------------ |----------------------------------| -| 描述 | 最大连接数 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- dn_thrift_max_frame_size - -| 名字 | dn_thrift_max_frame_size | -| ------------ | ------------------------------------------------------ | -| 描述 | RPC 请求/响应的最大字节数 | -| 类型 | long | -| 默认值 | 536870912 (默认值512MB) | -| 改后生效方式 | 重启服务生效 | - -- dn_thrift_init_buffer_size - -| 名字 | dn_thrift_init_buffer_size | -| ------------ | -------------------------- | -| 描述 | 字节数 | -| 类型 | long | -| 默认值 | 1024 | -| 改后生效方式 | 重启服务生效 | - -- dn_connection_timeout_ms - -| 名字 | dn_connection_timeout_ms | -| ------------ | ------------------------ | -| 描述 | 节点连接超时时间 | -| 类型 | int | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- dn_selector_thread_count_of_client_manager - -| 名字 | dn_selector_thread_count_of_client_manager | -| ------------ | ------------------------------------------------------------ | -| 描述 | selector thread (TAsyncClientManager) nums for async thread in a clientManagerclientManager中异步线程的选择器线程(TAsyncClientManager)编号 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- dn_max_client_count_for_each_node_in_client_manager - -| 名字 | dn_max_client_count_for_each_node_in_client_manager | -| ------------ | --------------------------------------------------- | -| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | -| 类型 | int | -| 默认值 | 300 | -| 改后生效方式 | 重启服务生效 | - -### 3.9 对象存储管理 - -- remote_tsfile_cache_dirs - -| 名字 | remote_tsfile_cache_dirs | -| ------------ | ------------------------ | -| 描述 | 云端存储在本地的缓存目录 | -| 类型 | String | -| 默认值 | data/datanode/data/cache | -| 改后生效方式 | 重启服务生效 | - -- remote_tsfile_cache_page_size_in_kb - -| 名字 | remote_tsfile_cache_page_size_in_kb | -| ------------ | ----------------------------------- | -| 描述 | 云端存储在本地缓存文件的块大小 | -| 类型 | int | -| 默认值 | 20480 | -| 改后生效方式 | 重启服务生效 | - -- remote_tsfile_cache_max_disk_usage_in_mb - -| 名字 | remote_tsfile_cache_max_disk_usage_in_mb | -| ------------ | ---------------------------------------- | -| 描述 | 云端存储本地缓存的最大磁盘占用大小 | -| 类型 | long | -| 默认值 | 51200 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_type - -| 名字 | object_storage_type | -| ------------ | ------------------- | -| 描述 | 云端存储类型 | -| 类型 | String | -| 默认值 | AWS_S3 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_endpoint - -| 名字 | object_storage_endpoint | -| ------------ | ----------------------- | -| 描述 | 云端存储的 endpoint | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_bucket - -| 名字 | object_storage_bucket | -| ------------ | ---------------------- | -| 描述 | 云端存储 bucket 的名称 | -| 类型 | String | -| 默认值 | iotdb_data | -| 改后生效方式 | 重启服务生效 | - -- object_storage_access_key - -| 名字 | object_storage_access_key | -| ------------ | ------------------------- | -| 描述 | 云端存储的验证信息 key | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_access_secret - -| 名字 | object_storage_access_secret | -| ------------ | ---------------------------- | -| 描述 | 云端存储的验证信息 secret | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -### 3.10 多级管理 - -- dn_default_space_usage_thresholds - -| 名字 | dn_default_space_usage_thresholds | -| ------------ | ------------------------------------------------------------ | -| 描述 | 定义每个层级数据目录的最小剩余空间比例;当剩余空间少于该比例时,数据会被自动迁移至下一个层级;当最后一个层级的剩余存储空间到低于此阈值时,会将系统置为 READ_ONLY | -| 类型 | double | -| 默认值 | 0.85 | -| 改后生效方式 | 热加载 | - -- dn_tier_full_policy - -| 名字 | dn_tier_full_policy | -| ------------ | ------------------------------------------------------------ | -| 描述 | 如何处理最后一层数据,当其已用空间高于其dn_default_space_usage_threshold时。| -| 类型 | String | -| 默认值 | NULL | -| 改后生效方式 | 热加载 | - -- migrate_thread_count - -| 名字 | migrate_thread_count | -| ------------ | ---------------------------------------- | -| 描述 | DataNode数据目录中迁移操作的线程池大小。 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- tiered_storage_migrate_speed_limit_bytes_per_sec - -| 名字 | tiered_storage_migrate_speed_limit_bytes_per_sec | -| ------------ | ------------------------------------------------ | -| 描述 | 限制不同存储层级之间的数据迁移速度。 | -| 类型 | int | -| 默认值 | 10485760 | -| 改后生效方式 | 热加载 | - -### 3.11 REST服务配置 - -- enable_rest_service - -| 名字 | enable_rest_service | -| ------------ | ------------------- | -| 描述 | 是否开启Rest服务。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- rest_service_port - -| 名字 | rest_service_port | -| ------------ | ------------------ | -| 描述 | Rest服务监听端口号 | -| 类型 | int32 | -| 默认值 | 18080 | -| 改后生效方式 | 重启服务生效 | - -- enable_swagger - -| 名字 | enable_swagger | -| ------------ | --------------------------------- | -| 描述 | 是否启用swagger来展示rest接口信息 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- rest_query_default_row_size_limit - -| 名字 | rest_query_default_row_size_limit | -| ------------ | --------------------------------- | -| 描述 | 一次查询能返回的结果集最大行数 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- cache_expire_in_seconds - -| 名字 | cache_expire_in_seconds | -| ------------ | -------------------------------- | -| 描述 | 用户登录信息缓存的过期时间(秒) | -| 类型 | int32 | -| 默认值 | 28800 | -| 改后生效方式 | 重启服务生效 | - -- cache_max_num - -| 名字 | cache_max_num | -| ------------ | ------------------------ | -| 描述 | 缓存中存储的最大用户数量 | -| 类型 | int32 | -| 默认值 | 100 | -| 改后生效方式 | 重启服务生效 | - -- cache_init_num - -| 名字 | cache_init_num | -| ------------ | -------------- | -| 描述 | 缓存初始容量 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- client_auth - -| 名字 | client_auth | -| ------------ | ---------------------- | -| 描述 | 是否需要客户端身份验证 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- trust_store_path - -| 名字 | trust_store_path | -| ------------ | ----------------------- | -| 描述 | keyStore 密码(非必填) | -| 类型 | String | -| 默认值 | "" | -| 改后生效方式 | 重启服务生效 | - -- trust_store_pwd - -| 名字 | trust_store_pwd | -| ------------ | ------------------------- | -| 描述 | trustStore 密码(非必填) | -| 类型 | String | -| 默认值 | "" | -| 改后生效方式 | 重启服务生效 | - -- idle_timeout_in_seconds - -| 名字 | idle_timeout_in_seconds | -| ------------ | ----------------------- | -| 描述 | SSL 超时时间,单位为秒 | -| 类型 | int32 | -| 默认值 | 5000 | -| 改后生效方式 | 重启服务生效 | - -### 3.12 负载均衡配置 - -- series_slot_num - -| 名字 | series_slot_num | -| ------------ | ---------------------------- | -| 描述 | 序列分区槽数 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- series_partition_executor_class - -| 名字 | series_partition_executor_class | -| ------------ | ------------------------------------------------------------ | -| 描述 | 序列分区哈希函数 | -| 类型 | String | -| 默认值 | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- schema_region_group_extension_policy - -| 名字 | schema_region_group_extension_policy | -| ------------ | ------------------------------------ | -| 描述 | SchemaRegionGroup 的扩容策略 | -| 类型 | string | -| 默认值 | AUTO | -| 改后生效方式 | 重启服务生效 | - -- default_schema_region_group_num_per_database - -| 名字 | default_schema_region_group_num_per_database | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当选用 CUSTOM-SchemaRegionGroup 扩容策略时,此参数为每个 Database 拥有的 SchemaRegionGroup 数量;当选用 AUTO-SchemaRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 SchemaRegionGroup 数量 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_per_data_node - -| 名字 | schema_region_per_data_node | -| ------------ | -------------------------------------------------- | -| 描述 | 期望每个 DataNode 可管理的 SchemaRegion 的最大数量 | -| 类型 | double | -| 默认值 | 1.0 | -| 改后生效方式 | 重启服务生效 | - -- data_region_group_extension_policy - -| 名字 | data_region_group_extension_policy | -| ------------ | ---------------------------------- | -| 描述 | DataRegionGroup 的扩容策略 | -| 类型 | string | -| 默认值 | AUTO | -| 改后生效方式 | 重启服务生效 | - -- default_data_region_group_num_per_database - -| 名字 | default_data_region_group_per_database | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当选用 CUSTOM-DataRegionGroup 扩容策略时,此参数为每个 Database 拥有的 DataRegionGroup 数量;当选用 AUTO-DataRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 DataRegionGroup 数量 | -| 类型 | int | -| 默认值 | 2 | -| 改后生效方式 | 重启服务生效 | - -- data_region_per_data_node - -| 名字 | data_region_per_data_node | -| ------------ | ------------------------------------------------ | -| 描述 | 期望每个 DataNode 可管理的 DataRegion 的最大数量 | -| 类型 | double | -| 默认值 | CPU 核心数的一半 | -| 改后生效方式 | 重启服务生效 | - -- enable_auto_leader_balance_for_ratis_consensus - -| 名字 | enable_auto_leader_balance_for_ratis_consensus | -| ------------ | ---------------------------------------------- | -| 描述 | 是否为 Ratis 共识协议开启自动均衡 leader 策略 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- enable_auto_leader_balance_for_iot_consensus - -| 名字 | enable_auto_leader_balance_for_iot_consensus | -| ------------ | -------------------------------------------- | -| 描述 | 是否为 IoT 共识协议开启自动均衡 leader 策略 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -### 3.13 集群管理 - -- time_partition_origin - -| 名字 | time_partition_origin | -| ------------ | ------------------------------------------------------------ | -| 描述 | Database 数据时间分区的起始点,即从哪个时间点开始计算时间分区。 | -| 类型 | Long | -| 单位 | 毫秒 | -| 默认值 | 0 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- time_partition_interval - -| 名字 | time_partition_interval | -| ------------ | ------------------------------- | -| 描述 | Database 默认的数据时间分区间隔 | -| 类型 | Long | -| 单位 | 毫秒 | -| 默认值 | 604800000 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- heartbeat_interval_in_ms - -| 名字 | heartbeat_interval_in_ms | -| ------------ | ------------------------ | -| 描述 | 集群节点间的心跳间隔 | -| 类型 | Long | -| 单位 | ms | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- disk_space_warning_threshold - -| 名字 | disk_space_warning_threshold | -| ------------ | ---------------------------- | -| 描述 | DataNode 磁盘剩余阈值 | -| 类型 | double(percentage) | -| 默认值 | 0.05 | -| 改后生效方式 | 重启服务生效 | - -### 3.14 内存控制配置 - -- datanode_memory_proportion - -| 名字 | datanode_memory_proportion | -| ------------ | ---------------------------------------------------- | -| 描述 | 存储引擎、查询引擎、元数据、共识、流处理引擎和空闲内存比例 | -| 类型 | Ratio | -| 默认值 | 3:3:1:1:1:1 | -| 改后生效方式 | 重启服务生效 | - -- schema_memory_proportion - -| 名字 | schema_memory_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | Schema 相关的内存如何在 SchemaRegion、SchemaCache 和 PartitionCache 之间分配 | -| 类型 | Ratio | -| 默认值 | 5:4:1 | -| 改后生效方式 | 重启服务生效 | - -- storage_engine_memory_proportion - -| 名字 | storage_engine_memory_proportion | -| ------------ | -------------------------------- | -| 描述 | 写入和合并占存储内存比例 | -| 类型 | Ratio | -| 默认值 | 8:2 | -| 改后生效方式 | 重启服务生效 | - -- write_memory_proportion - -| 名字 | write_memory_proportion | -| ------------ | -------------------------------------------- | -| 描述 | Memtable 和 TimePartitionInfo 占写入内存比例 | -| 类型 | Ratio | -| 默认值 | 19:1 | -| 改后生效方式 | 重启服务生效 | - -- primitive_array_size - -| 名字 | primitive_array_size | -| ------------ | ---------------------------------------- | -| 描述 | 数组池中的原始数组大小(每个数组的长度) | -| 类型 | int32 | -| 默认值 | 64 | -| 改后生效方式 | 重启服务生效 | - -- chunk_metadata_size_proportion - -| 名字 | chunk_metadata_size_proportion | -| ------------ | -------------------------------------------- | -| 描述 | 在数据压缩过程中,用于存储块元数据的内存比例 | -| 类型 | Double | -| 默认值 | 0.1 | -| 改后生效方式 | 重启服务生效 | - -- flush_proportion - -| 名字 | flush_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 调用flush disk的写入内存比例,默认0.4,若有极高的写入负载力(比如batch=1000),可以设置为低于默认值,比如0.2 | -| 类型 | Double | -| 默认值 | 0.4 | -| 改后生效方式 | 重启服务生效 | - -- buffered_arrays_memory_proportion - -| 名字 | buffered_arrays_memory_proportion | -| ------------ | --------------------------------------- | -| 描述 | 为缓冲数组分配的写入内存比例,默认为0.6 | -| 类型 | Double | -| 默认值 | 0.6 | -| 改后生效方式 | 重启服务生效 | - -- reject_proportion - -| 名字 | reject_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 拒绝插入的写入内存比例,默认0.8,若有极高的写入负载力(比如batch=1000)并且物理内存足够大,它可以设置为高于默认值,如0.9 | -| 类型 | Double | -| 默认值 | 0.8 | -| 改后生效方式 | 重启服务生效 | - -- device_path_cache_proportion - -| 名字 | device_path_cache_proportion | -| ------------ | --------------------------------------------------- | -| 描述 | 在内存中分配给设备路径缓存(DevicePathCache)的比例 | -| 类型 | Double | -| 默认值 | 0.05 | -| 改后生效方式 | 重启服务生效 | - -- write_memory_variation_report_proportion - -| 名字 | write_memory_variation_report_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 如果 DataRegion 的内存增加超过写入可用内存的一定比例,则向系统报告。默认值为0.001 | -| 类型 | Double | -| 默认值 | 0.001 | -| 改后生效方式 | 重启服务生效 | - -- check_period_when_insert_blocked - -| 名字 | check_period_when_insert_blocked | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当插入被拒绝时,等待时间(以毫秒为单位)去再次检查系统,默认为50。若插入被拒绝,读取负载低,可以设置大一些。 | -| 类型 | int32 | -| 默认值 | 50 | -| 改后生效方式 | 重启服务生效 | - -- io_task_queue_size_for_flushing - -| 名字 | io_task_queue_size_for_flushing | -| ------------ | -------------------------------- | -| 描述 | ioTaskQueue 的大小。默认值为10。 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- enable_query_memory_estimation - -| 名字 | enable_query_memory_estimation | -| ------------ | ------------------------------------------------------------ | -| 描述 | 开启后会预估每次查询的内存使用量,如果超过可用内存,会拒绝本次查询 | -| 类型 | bool | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -### 3.15 元数据引擎配置 - -- schema_engine_mode - -| 名字 | schema_engine_mode | -| ------------ | ------------------------------------------------------------ | -| 描述 | 元数据引擎的运行模式,支持 Memory 和 PBTree;PBTree 模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。 | -| 类型 | string | -| 默认值 | Memory | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- partition_cache_size - -| 名字 | partition_cache_size | -| ------------ | ------------------------------ | -| 描述 | 分区信息缓存的最大缓存条目数。 | -| 类型 | Int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- sync_mlog_period_in_ms - -| 名字 | sync_mlog_period_in_ms | -| ------------ | ------------------------------------------------------------ | -| 描述 | mlog定期刷新到磁盘的周期,单位毫秒。如果该参数为0,则表示每次对元数据的更新操作都会被立即写到磁盘上。 | -| 类型 | Int64 | -| 默认值 | 100 | -| 改后生效方式 | 重启服务生效 | - -- tag_attribute_flush_interval - -| 名字 | tag_attribute_flush_interval | -| ------------ | -------------------------------------------------- | -| 描述 | 标签和属性记录的间隔数,达到此记录数量时将强制刷盘 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- tag_attribute_total_size - -| 名字 | tag_attribute_total_size | -| ------------ | ---------------------------------------- | -| 描述 | 每个时间序列标签和属性的最大持久化字节数 | -| 类型 | int32 | -| 默认值 | 700 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- max_measurement_num_of_internal_request - -| 名字 | max_measurement_num_of_internal_request | -| ------------ | ------------------------------------------------------------ | -| 描述 | 一次注册序列请求中若物理量过多,在系统内部执行时将被拆分为若干个轻量级的子请求,每个子请求中的物理量数目不超过此参数设置的最大值。 | -| 类型 | Int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- datanode_schema_cache_eviction_policy - -| 名字 | datanode_schema_cache_eviction_policy | -| ------------ | ----------------------------------------------------- | -| 描述 | 当 Schema 缓存达到其最大容量时,Schema 缓存的淘汰策略 | -| 类型 | String | -| 默认值 | FIFO | -| 改后生效方式 | 重启服务生效 | - -- cluster_timeseries_limit_threshold - -| 名字 | cluster_timeseries_limit_threshold | -| ------------ | ---------------------------------- | -| 描述 | 集群中可以创建的时间序列的最大数量 | -| 类型 | Int32 | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -- cluster_device_limit_threshold - -| 名字 | cluster_device_limit_threshold | -| ------------ | ------------------------------ | -| 描述 | 集群中可以创建的最大设备数量 | -| 类型 | Int32 | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -- database_limit_threshold - -| 名字 | database_limit_threshold | -| ------------ | ------------------------------ | -| 描述 | 集群中可以创建的最大数据库数量 | -| 类型 | Int32 | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -### 3.16 自动推断数据类型 - -- enable_auto_create_schema - -| 名字 | enable_auto_create_schema | -| ------------ | -------------------------------------- | -| 描述 | 当写入的序列不存在时,是否自动创建序列 | -| 取值 | true or false | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- default_storage_group_level - -| 名字 | default_storage_group_level | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当写入的数据不存在且自动创建序列时,若需要创建相应的 database,将序列路径的哪一层当做 database。例如,如果我们接到一个新序列 root.sg0.d1.s2, 并且 level=1, 那么 root.sg0 被视为database(因为 root 是 level 0 层) | -| 取值 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- boolean_string_infer_type - -| 名字 | boolean_string_infer_type | -| ------------ | ------------------------------------------ | -| 描述 | "true" 或者 "false" 字符串被推断的数据类型 | -| 取值 | BOOLEAN 或者 TEXT | -| 默认值 | BOOLEAN | -| 改后生效方式 | 热加载 | - -- integer_string_infer_type - -| 名字 | integer_string_infer_type | -| ------------ | --------------------------------- | -| 描述 | 整型字符串推断的数据类型 | -| 取值 | INT32, INT64, FLOAT, DOUBLE, TEXT | -| 默认值 | DOUBLE | -| 改后生效方式 | 热加载 | - -- floating_string_infer_type - -| 名字 | floating_string_infer_type | -| ------------ | ----------------------------- | -| 描述 | "6.7"等字符串被推断的数据类型 | -| 取值 | DOUBLE, FLOAT or TEXT | -| 默认值 | DOUBLE | -| 改后生效方式 | 热加载 | - -- nan_string_infer_type - -| 名字 | nan_string_infer_type | -| ------------ | ---------------------------- | -| 描述 | "NaN" 字符串被推断的数据类型 | -| 取值 | DOUBLE, FLOAT or TEXT | -| 默认值 | DOUBLE | -| 改后生效方式 | 热加载 | - -- default_boolean_encoding - -| 名字 | default_boolean_encoding | -| ------------ | ------------------------ | -| 描述 | BOOLEAN 类型编码格式 | -| 取值 | PLAIN, RLE | -| 默认值 | RLE | -| 改后生效方式 | 热加载 | - -- default_int32_encoding - -| 名字 | default_int32_encoding | -| ------------ | -------------------------------------- | -| 描述 | int32 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| 默认值 | TS_2DIFF | -| 改后生效方式 | 热加载 | - -- default_int64_encoding - -| 名字 | default_int64_encoding | -| ------------ | -------------------------------------- | -| 描述 | int64 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| 默认值 | TS_2DIFF | -| 改后生效方式 | 热加载 | - -- default_float_encoding - -| 名字 | default_float_encoding | -| ------------ | ----------------------------- | -| 描述 | float 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | -| 默认值 | GORILLA | -| 改后生效方式 | 热加载 | - -- default_double_encoding - -| 名字 | default_double_encoding | -| ------------ | ----------------------------- | -| 描述 | double 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | -| 默认值 | GORILLA | -| 改后生效方式 | 热加载 | - -- default_text_encoding - -| 名字 | default_text_encoding | -| ------------ | --------------------- | -| 描述 | text 类型编码格式 | -| 取值 | PLAIN | -| 默认值 | PLAIN | -| 改后生效方式 | 热加载 | - -* boolean_compressor - -| 名字 | boolean_compressor | -| -------------- | ----------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,BOOLEAN 数据类型的压缩方式 (V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* int32_compressor - -| 名字 | int32_compressor | -| -------------- | ------------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,INT32/DATE 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* int64_compressor - -| 名字 | int64_compressor | -| -------------- | ------------------------------------------------------------------------------ | -| 描述 | 启用自动创建模式时,INT64/TIMESTAMP 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* float_compressor - -| 名字 | float_compressor | -| -------------- | -------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,FLOAT 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* double_compressor - -| 名字 | double_compressor | -| -------------- | --------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,DOUBLE 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* text_compressor - -| 名字 | text_compressor | -| -------------- | -------------------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,TEXT/BINARY/BLOB 数据类型的压缩方式(V2.0.6 版本开始支持 ) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - - - -### 3.17 查询配置 - -- read_consistency_level - -| 名字 | read_consistency_level | -| ------------ | ------------------------------------------------------------ | -| 描述 | 查询一致性等级,取值 “strong” 时从 Leader 副本查询,取值 “weak” 时随机查询一个副本。 | -| 类型 | String | -| 默认值 | strong | -| 改后生效方式 | 重启服务生效 | - -- meta_data_cache_enable - -| 名字 | meta_data_cache_enable | -| ------------ | ------------------------------------------------------------ | -| 描述 | 是否缓存元数据(包括 BloomFilter、Chunk Metadata 和 TimeSeries Metadata。) | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- chunk_timeseriesmeta_free_memory_proportion - -| 名字 | chunk_timeseriesmeta_free_memory_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 读取内存分配比例,BloomFilterCache、ChunkCache、TimeseriesMetadataCache、数据集查询的内存和可用内存的查询。参数形式为a : b : c : d : e,其中a、b、c、d、e为整数。 例如“1 : 1 : 1 : 1 : 1” ,“1 : 100 : 200 : 300 : 400” 。 | -| 类型 | String | -| 默认值 | 1 : 100 : 200 : 300 : 400 | -| 改后生效方式 | 重启服务生效 | - -- enable_last_cache - -| 名字 | enable_last_cache | -| ------------ | ------------------ | -| 描述 | 是否开启最新点缓存 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- mpp_data_exchange_core_pool_size - -| 名字 | mpp_data_exchange_core_pool_size | -| ------------ | -------------------------------- | -| 描述 | MPP 数据交换线程池核心线程数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- mpp_data_exchange_max_pool_size - -| 名字 | mpp_data_exchange_max_pool_size | -| ------------ | ------------------------------- | -| 描述 | MPP 数据交换线程池最大线程数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- mpp_data_exchange_keep_alive_time_in_ms - -| 名字 | mpp_data_exchange_keep_alive_time_in_ms | -| ------------ | --------------------------------------- | -| 描述 | MPP 数据交换最大等待时间 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- driver_task_execution_time_slice_in_ms - -| 名字 | driver_task_execution_time_slice_in_ms | -| ------------ | -------------------------------------- | -| 描述 | 单个 DriverTask 最长执行时间(ms) | -| 类型 | int32 | -| 默认值 | 200 | -| 改后生效方式 | 重启服务生效 | - -- max_tsblock_size_in_bytes - -| 名字 | max_tsblock_size_in_bytes | -| ------------ | ------------------------------- | -| 描述 | 单个 TsBlock 的最大容量(byte) | -| 类型 | int32 | -| 默认值 | 131072 | -| 改后生效方式 | 重启服务生效 | - -- max_tsblock_line_numbers - -| 名字 | max_tsblock_line_numbers | -| ------------ | ------------------------ | -| 描述 | 单个 TsBlock 的最大行数 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- slow_query_threshold - -| 名字 | slow_query_threshold | -| ------------ | ------------------------------ | -| 描述 | 慢查询的时间阈值。单位:毫秒。 | -| 类型 | long | -| 默认值 | 10000 | -| 改后生效方式 | 热加载 | - -- query_cost_stat_window - -| 名字 | query_cost_stat_window | -| ------------ |--------------------| -| 描述 | 查询耗时统计的窗口,单位为分钟。 | -| 类型 | Int32 | -| 默认值 | 0 | -| 改后生效方式 | 热加载 | - -- query_timeout_threshold - -| 名字 | query_timeout_threshold | -| ------------ | -------------------------------- | -| 描述 | 查询的最大执行时间。单位:毫秒。 | -| 类型 | Int32 | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- max_allowed_concurrent_queries - -| 名字 | max_allowed_concurrent_queries | -| ------------ | ------------------------------ | -| 描述 | 允许的最大并发查询数量。 | -| 类型 | Int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- query_thread_count - -| 名字 | query_thread_count | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 IoTDB 对内存中的数据进行查询时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。 | -| 类型 | Int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- degree_of_query_parallelism - -| 名字 | degree_of_query_parallelism | -| ------------ | ------------------------------------------------------------ | -| 描述 | 设置单个查询片段实例将创建的 pipeline 驱动程序数量,也就是查询操作的并行度。 | -| 类型 | Int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- mode_map_size_threshold - -| 名字 | mode_map_size_threshold | -| ------------ | ---------------------------------------------- | -| 描述 | 计算 MODE 聚合函数时,计数映射可以增长到的阈值 | -| 类型 | Int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- batch_size - -| 名字 | batch_size | -| ------------ | ---------------------------------------------------------- | -| 描述 | 服务器中每次迭代的数据量(数据条目,即不同时间戳的数量。) | -| 类型 | Int32 | -| 默认值 | 100000 | -| 改后生效方式 | 重启服务生效 | - -- sort_buffer_size_in_bytes - -| 名字 | sort_buffer_size_in_bytes | -| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 设置外部排序操作中使用的内存缓冲区大小 | -| 类型 | long | -| 默认值 | 1048576(V2.0.6 之前版本)
0(V2.0.6 及之后版本),当值小于等于 0 时,由系统自动进行计算,计算公式为:`sort_buffer_size_in_bytes = Math.min(32 * 1024 * 1024, 堆内内存 * 查询引擎内存比例 * 查询执行内存比例 / 查询线程数 / 2)` | -| 改后生效方式 | 热加载 | - -- merge_threshold_of_explain_analyze - -| 名字 | merge_threshold_of_explain_analyze | -| ------------ | ------------------------------------------------------------ | -| 描述 | 用于设置在 `EXPLAIN ANALYZE` 语句的结果集中操作符(operator)数量的合并阈值。 | -| 类型 | int | -| 默认值 | 10 | -| 改后生效方式 | 热加载 | - -### 3.18 TTL配置 - -- ttl_check_interval - -| 名字 | ttl_check_interval | -| ------------ | -------------------------------------- | -| 描述 | ttl 检查任务的间隔,单位 ms,默认为 2h | -| 类型 | int | -| 默认值 | 7200000 | -| 改后生效方式 | 重启服务生效 | - -- max_expired_time - -| 名字 | max_expired_time | -| ------------ | ------------------------------------------------------------ | -| 描述 | 如果一个文件中存在设备已经过期超过此时间,那么这个文件将被立即整理。单位 ms,默认为一个月 | -| 类型 | int | -| 默认值 | 2592000000 | -| 改后生效方式 | 重启服务生效 | - -- expired_data_ratio - -| 名字 | expired_data_ratio | -| ------------ | ------------------------------------------------------------ | -| 描述 | 过期设备比例。如果一个文件中过期设备的比率超过这个值,那么这个文件中的过期数据将通过 compaction 清理。 | -| 类型 | float | -| 默认值 | 0.3 | -| 改后生效方式 | 重启服务生效 | - -### 3.19 存储引擎配置 - -- timestamp_precision - -| 名字 | timestamp_precision | -| ------------ | ---------------------------- | -| 描述 | 时间戳精度,支持 ms、us、ns | -| 类型 | String | -| 默认值 | ms | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- timestamp_precision_check_enabled - -| 名字 | timestamp_precision_check_enabled | -| ------------ | --------------------------------- | -| 描述 | 用于控制是否启用时间戳精度检查 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- max_waiting_time_when_insert_blocked - -| 名字 | max_waiting_time_when_insert_blocked | -| ------------ | ----------------------------------------------- | -| 描述 | 当插入请求等待超过这个时间,则抛出异常,单位 ms | -| 类型 | Int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- handle_system_error - -| 名字 | handle_system_error | -| ------------ | ------------------------------------ | -| 描述 | 当系统遇到不可恢复的错误时的处理方法 | -| 类型 | String | -| 默认值 | CHANGE_TO_READ_ONLY | -| 改后生效方式 | 重启服务生效 | - -- enable_timed_flush_seq_memtable - -| 名字 | enable_timed_flush_seq_memtable | -| ------------ | ------------------------------- | -| 描述 | 是否开启定时刷盘顺序 memtable | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- seq_memtable_flush_interval_in_ms - -| 名字 | seq_memtable_flush_interval_in_ms | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | -| 类型 | long | -| 默认值 | 600000 | -| 改后生效方式 | 热加载 | - -- seq_memtable_flush_check_interval_in_ms - -| 名字 | seq_memtable_flush_check_interval_in_ms | -| ------------ | ---------------------------------------- | -| 描述 | 检查顺序 memtable 是否需要刷盘的时间间隔 | -| 类型 | long | -| 默认值 | 30000 | -| 改后生效方式 | 热加载 | - -- enable_timed_flush_unseq_memtable - -| 名字 | enable_timed_flush_unseq_memtable | -| ------------ | --------------------------------- | -| 描述 | 是否开启定时刷新乱序 memtable | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- unseq_memtable_flush_interval_in_ms - -| 名字 | unseq_memtable_flush_interval_in_ms | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | -| 类型 | long | -| 默认值 | 600000 | -| 改后生效方式 | 热加载 | - -- unseq_memtable_flush_check_interval_in_ms - -| 名字 | unseq_memtable_flush_check_interval_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | 检查乱序 memtable 是否需要刷盘的时间间隔 | -| 类型 | long | -| 默认值 | 30000 | -| 改后生效方式 | 热加载 | - -- tvlist_sort_algorithm - -| 名字 | tvlist_sort_algorithm | -| ------------ | ------------------------ | -| 描述 | memtable中数据的排序方法 | -| 类型 | String | -| 默认值 | TIM | -| 改后生效方式 | 重启服务生效 | - -- avg_series_point_number_threshold - -| 名字 | avg_series_point_number_threshold | -| ------------ | ------------------------------------------------ | -| 描述 | 内存中平均每个时间序列点数最大值,达到触发 flush | -| 类型 | int32 | -| 默认值 | 100000 | -| 改后生效方式 | 重启服务生效 | - -- flush_thread_count - -| 名字 | flush_thread_count | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 IoTDB 将内存中的数据写入磁盘时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。默认值为 0。 | -| 类型 | int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- enable_partial_insert - -| 名字 | enable_partial_insert | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在一次 insert 请求中,如果部分测点写入失败,是否继续写入其他测点。 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- recovery_log_interval_in_ms - -| 名字 | recovery_log_interval_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | data region的恢复过程中打印日志信息的间隔 | -| 类型 | Int32 | -| 默认值 | 5000 | -| 改后生效方式 | 重启服务生效 | - -- 0.13_data_insert_adapt - -| 名字 | 0.13_data_insert_adapt | -| ------------ | ------------------------------------------------------- | -| 描述 | 如果 0.13 版本客户端进行写入,需要将此配置项设置为 true | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- enable_tsfile_validation - -| 名字 | enable_tsfile_validation | -| ------------ | -------------------------------------- | -| 描述 | Flush, Load 或合并后验证 tsfile 正确性 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | - -- tier_ttl_in_ms - -| 名字 | tier_ttl_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | 定义每个层级负责的数据范围,通过 TTL 表示 | -| 类型 | long | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -* max_object_file_size_in_byte - -| 名字 | max\_object\_file\_size\_in\_byte | -| -------------- |-----------------------------------| -| 描述 | 单对象文件的最大尺寸限制 (V2.0.8-beta 版本起支持) | -| 类型 | long | -| 默认值 | 4294967296 | -| 改后生效方式 | 热加载 | - -* restrict_object_limit - -| 名字 | restrict\_object\_limit | -|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 对 OBJECT 类型的表名、列名和设备名称没有特殊限制。(V2.0.8-beta 版本起支持)当设置为 true 且表中包含 OBJECT 列时,需遵循以下限制:
1. 命名规范:TAG 列的值、表名和字段名禁止使用 “.” 或 “..”,且不得包含 “./” 或 “.\” 字符,否则元数据创建将失败。若名称包含文件系统不支持的字符,则会在数据写入时报错。
2. 大小写敏感:如果底层文件系统不区分大小写,则设备标识符(如 'd1' 与 'D1')将被视为相同。在此情况下,若创建此类名称相似的设备,其 OBJECT 数据文件可能互相覆盖,导致数据错误。
3. 存储路径:OBJECT 类型数据的实际存储路径格式为:`${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin`。 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - - -### 3.20 合并配置 - -- enable_seq_space_compaction - -| 名字 | enable_seq_space_compaction | -| ------------ | -------------------------------------- | -| 描述 | 顺序空间内合并,开启顺序文件之间的合并 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- enable_unseq_space_compaction - -| 名字 | enable_unseq_space_compaction | -| ------------ | -------------------------------------- | -| 描述 | 乱序空间内合并,开启乱序文件之间的合并 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- enable_cross_space_compaction - -| 名字 | enable_cross_space_compaction | -| ------------ | ------------------------------------------ | -| 描述 | 跨空间合并,开启将乱序文件合并到顺序文件中 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- enable_auto_repair_compaction - -| 名字 | enable_auto_repair_compaction | -| ------------ | ----------------------------- | -| 描述 | 启用通过合并操作自动修复未排序文件的功能 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- cross_selector - -| 名字 | cross_selector | -| ------------ |----------------| -| 描述 | 跨空间合并任务的选择器 | -| 类型 | String | -| 默认值 | rewrite | -| 改后生效方式 | 重启服务生效 | - -- cross_performer - -| 名字 | cross_performer | -| ------------ |-----------------------------------| -| 描述 | 跨空间合并任务的执行器,可选项:read_point 和 fast | -| 类型 | String | -| 默认值 | fast | -| 改后生效方式 | 热加载 | - -- inner_seq_selector - -| 名字 | inner_seq_selector | -| ------------ |------------------------------------------------------------------------| -| 描述 | 顺序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | -| 类型 | String | -| 默认值 | size_tiered_multi_target | -| 改后生效方式 | 热加载 | - -- inner_seq_performer - -| 名字 | inner_seq_performer | -| ------------ |--------------------------------------| -| 描述 | 顺序空间内合并任务的执行器,可选项是 read_chunk 和 fast | -| 类型 | String | -| 默认值 | read_chunk | -| 改后生效方式 | 热加载 | - -- inner_unseq_selector - -| 名字 | inner_unseq_selector | -| ------------ |-------------------------------------------------------------------------| -| 描述 | 乱序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | -| 类型 | String | -| 默认值 | size_tiered_multi_target | -| 改后生效方式 | 热加载 | - -- inner_unseq_performer - -| 名字 | inner_unseq_performer | -| ------------ |--------------------------------------| -| 描述 | 乱序空间内合并任务的执行器,可选项是 read_point 和 fast | -| 类型 | String | -| 默认值 | fast | -| 改后生效方式 | 热加载 | - -- compaction_priority - -| 名字 | compaction_priority | -| ------------ |-------------------------------------------------------------------------------------------| -| 描述 | 合并时的优先级。INNER_CROSS:优先执行空间内合并,优先减少文件数量;CROSS_INNER:优先执行跨空间合并,优先清理乱序文件;BALANCE:交替执行两种合并类型。 | -| 类型 | String | -| 默认值 | INNER_CROSS | -| 改后生效方式 | 重启服务生效 | - -- candidate_compaction_task_queue_size - -| 名字 | candidate_compaction_task_queue_size | -| ------------ | ------------------------------------ | -| 描述 | 待选合并任务队列容量 | -| 类型 | int32 | -| 默认值 | 50 | -| 改后生效方式 | 重启服务生效 | - -- target_compaction_file_size - -| 名字 | target_compaction_file_size | -| ------------ |-----------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 该参数作用于两个场景:1. 空间内合并的目标文件大小 2. 跨空间合并中待选序列文件的大小需小于 target_compaction_file_size * 1.5 多数情况下,跨空间合并的目标文件大小不会超过此阈值,即便超出,幅度也不会过大 。 默认值:2GB ,单位:byte | -| 类型 | Long | -| 默认值 | 2147483648 | -| 改后生效方式 | 热加载 | - -- inner_compaction_total_file_size_threshold - -| 名字 | inner_compaction_total_file_size_threshold | -| ------------ |--------------------------------------------| -| 描述 | 空间内合并的文件总大小阈值,单位:byte | -| 类型 | Long | -| 默认值 | 10737418240 | -| 改后生效方式 | 热加载 | - -- inner_compaction_total_file_num_threshold - -| 名字 | inner_compaction_total_file_num_threshold | -| ------------ | ----------------------------------------- | -| 描述 | 空间内合并的文件总数阈值 | -| 类型 | int32 | -| 默认值 | 100 | -| 改后生效方式 | 热加载 | - -- max_level_gap_in_inner_compaction - -| 名字 | max_level_gap_in_inner_compaction | -| ------------ | -------------------------------------- | -| 描述 | 空间内合并筛选的最大层级差 | -| 类型 | int32 | -| 默认值 | 2 | -| 改后生效方式 | 热加载 | - -- target_chunk_size - -| 名字 | target_chunk_size | -| ------------ |--------------------------------------------------| -| 描述 | 刷盘与合并操作的目标数据块大小, 若内存表中某条时序数据的大小超过该值,数据会被刷盘至多个数据块 | -| 类型 | Long | -| 默认值 | 1600000 | -| 改后生效方式 | 重启服务生效 | - -- target_chunk_point_num - -| 名字 | target_chunk_point_num | -| ------------ |------------------------------------------------------| -| 描述 | 刷盘与合并操作中单个数据块的目标点数, 若内存表中某条时序数据的点数超过该值,数据会被刷盘至多个数据块中 | -| 类型 | Long | -| 默认值 | 100000 | -| 改后生效方式 | 重启服务生效 | - -- chunk_size_lower_bound_in_compaction - -| 名字 | chunk_size_lower_bound_in_compaction | -| ------------ |--------------------------------------| -| 描述 | 若数据块大小低于此阈值,则会被反序列化为数据点,默认值为128字节 | -| 类型 | Long | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- chunk_point_num_lower_bound_in_compaction - -| 名字 | chunk_point_num_lower_bound_in_compaction | -| ------------ |------------------------------------------| -| 描述 | 若数据块内的数据点数低于此阈值,则会被反序列化为数据点 | -| 类型 | Long | -| 默认值 | 100 | -| 改后生效方式 | 重启服务生效 | - -- inner_compaction_candidate_file_num - -| 名字 | inner_compaction_candidate_file_num | -| ------------ | ---------------------------------------- | -| 描述 | 空间内合并待选文件筛选的文件数量要求 | -| 类型 | int32 | -| 默认值 | 30 | -| 改后生效方式 | 热加载 | - -- max_cross_compaction_candidate_file_num - -| 名字 | max_cross_compaction_candidate_file_num | -| ------------ | --------------------------------------- | -| 描述 | 跨空间合并待选文件筛选的文件数量上限 | -| 类型 | int32 | -| 默认值 | 500 | -| 改后生效方式 | 热加载 | - -- max_cross_compaction_candidate_file_size - -| 名字 | max_cross_compaction_candidate_file_size | -| ------------ |------------------------------------------| -| 描述 | 跨空间合并待选文件筛选的总大小上限 | -| 类型 | Long | -| 默认值 | 5368709120 | -| 改后生效方式 | 热加载 | - -- min_cross_compaction_unseq_file_level - -| 名字 | min_cross_compaction_unseq_file_level | -| ------------ |---------------------------------------| -| 描述 | 可被选为待选文件的乱序文件的最小空间内合并层级 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- compaction_thread_count - -| 名字 | compaction_thread_count | -| ------------ | ----------------------- | -| 描述 | 执行合并任务的线程数目 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 热加载 | - -- compaction_max_aligned_series_num_in_one_batch - -| 名字 | compaction_max_aligned_series_num_in_one_batch | -| ------------ | ---------------------------------------------- | -| 描述 | 对齐序列合并一次执行时处理的值列数量 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 热加载 | - -- compaction_schedule_interval_in_ms - -| 名字 | compaction_schedule_interval_in_ms | -| ------------ |------------------------------------| -| 描述 | 合并调度的时间间隔,单位 ms | -| 类型 | Long | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- compaction_write_throughput_mb_per_sec - -| 名字 | compaction_write_throughput_mb_per_sec | -| ------------ |----------------------------------------| -| 描述 | 合并操作每秒可达到的写入吞吐量上限, 小于或等于 0 的取值表示无限制 | -| 类型 | int32 | -| 默认值 | 16 | -| 改后生效方式 | 重启服务生效 | - -- compaction_read_throughput_mb_per_sec - -| 名字 | compaction_read_throughput_mb_per_sec | -| --------- | ---------------------------------------------------- | -| 描述 | 合并每秒读吞吐限制,单位为 megabyte,小于或等于 0 的取值表示无限制 | -| 类型 | int32 | -| 默认值 | 0 | -| Effective | 热加载 | - -- compaction_read_operation_per_sec - -| 名字 | compaction_read_operation_per_sec | -| --------- | ------------------------------------------- | -| 描述 | 合并每秒读操作数量限制,小于或等于 0 的取值表示无限制 | -| 类型 | int32 | -| 默认值 | 0 | -| Effective | 热加载 | - -- sub_compaction_thread_count - -| 名字 | sub_compaction_thread_count | -| ------------ | ------------------------------------------------------------ | -| 描述 | 每个合并任务的子任务线程数,只对跨空间合并和乱序空间内合并生效 | -| 类型 | int32 | -| 默认值 | 4 | -| 改后生效方式 | 热加载 | - -- inner_compaction_task_selection_disk_redundancy - -| 名字 | inner_compaction_task_selection_disk_redundancy | -| ------------ | ----------------------------------------------- | -| 描述 | 定义了磁盘可用空间的冗余值,仅用于内部压缩 | -| 类型 | double | -| 默认值 | 0.05 | -| 改后生效方式 | 热加载 | - -- inner_compaction_task_selection_mods_file_threshold - -| 名字 | inner_compaction_task_selection_mods_file_threshold | -| ------------ | --------------------------------------------------- | -| 描述 | 定义了mods文件大小的阈值,仅用于内部压缩。 | -| 类型 | long | -| 默认值 | 131072 | -| 改后生效方式 | 热加载 | - -- compaction_schedule_thread_num - -| 名字 | compaction_schedule_thread_num | -| ------------ | ------------------------------ | -| 描述 | 选择合并任务的线程数量 | -| 类型 | int32 | -| 默认值 | 4 | -| 改后生效方式 | 热加载 | - -### 3.21 写前日志配置 - -- wal_mode - -| 名字 | wal_mode | -| ------------ | ------------------------------------------------------------ | -| 描述 | 写前日志的写入模式. DISABLE 模式下会关闭写前日志;SYNC 模式下写入请求会在成功写入磁盘后返回; ASYNC 模式下写入请求返回时可能尚未成功写入磁盘后。 | -| 类型 | String | -| 默认值 | ASYNC | -| 改后生效方式 | 重启服务生效 | - -- max_wal_nodes_num - -| 名字 | max_wal_nodes_num | -| ------------ | ----------------------------------------------------- | -| 描述 | 写前日志节点的最大数量,默认值 0 表示数量由系统控制。 | -| 类型 | int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- wal_async_mode_fsync_delay_in_ms - -| 名字 | wal_async_mode_fsync_delay_in_ms | -| ------------ | ------------------------------------------- | -| 描述 | async 模式下写前日志调用 fsync 前的等待时间 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 热加载 | - -- wal_sync_mode_fsync_delay_in_ms - -| 名字 | wal_sync_mode_fsync_delay_in_ms | -| ------------ | ------------------------------------------ | -| 描述 | sync 模式下写前日志调用 fsync 前的等待时间 | -| 类型 | int32 | -| 默认值 | 3 | -| 改后生效方式 | 热加载 | - -- wal_buffer_size_in_byte - -| 名字 | wal_buffer_size_in_byte | -| ------------ | ----------------------- | -| 描述 | 写前日志的 buffer 大小 | -| 类型 | int32 | -| 默认值 | 33554432 | -| 改后生效方式 | 重启服务生效 | - -- wal_buffer_queue_capacity - -| 名字 | wal_buffer_queue_capacity | -| ------------ | ------------------------- | -| 描述 | 写前日志阻塞队列大小上限 | -| 类型 | int32 | -| 默认值 | 500 | -| 改后生效方式 | 重启服务生效 | - -- wal_file_size_threshold_in_byte - -| 名字 | wal_file_size_threshold_in_byte | -| ------------ | ------------------------------- | -| 描述 | 写前日志文件封口阈值 | -| 类型 | int32 | -| 默认值 | 31457280 | -| 改后生效方式 | 热加载 | - -- wal_min_effective_info_ratio - -| 名字 | wal_min_effective_info_ratio | -| ------------ | ---------------------------- | -| 描述 | 写前日志最小有效信息比 | -| 类型 | double | -| 默认值 | 0.1 | -| 改后生效方式 | 热加载 | - -- wal_memtable_snapshot_threshold_in_byte - -| 名字 | wal_memtable_snapshot_threshold_in_byte | -| ------------ | ---------------------------------------- | -| 描述 | 触发写前日志中内存表快照的内存表大小阈值 | -| 类型 | int64 | -| 默认值 | 8388608 | -| 改后生效方式 | 热加载 | - -- max_wal_memtable_snapshot_num - -| 名字 | max_wal_memtable_snapshot_num | -| ------------ | ------------------------------ | -| 描述 | 写前日志中内存表的最大数量上限 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- delete_wal_files_period_in_ms - -| 名字 | delete_wal_files_period_in_ms | -| ------------ | ----------------------------- | -| 描述 | 删除写前日志的检查间隔 | -| 类型 | int64 | -| 默认值 | 20000 | -| 改后生效方式 | 热加载 | - -- wal_throttle_threshold_in_byte - -| 名字 | wal_throttle_threshold_in_byte | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在IoTConsensus中,当WAL文件的大小达到一定阈值时,会开始对写入操作进行节流,以控制写入速度。 | -| 类型 | long | -| 默认值 | 53687091200 | -| 改后生效方式 | 热加载 | - -- iot_consensus_cache_window_time_in_ms - -| 名字 | iot_consensus_cache_window_time_in_ms | -| ------------ | ---------------------------------------- | -| 描述 | 在IoTConsensus中,写缓存的最大等待时间。 | -| 类型 | long | -| 默认值 | -1 | -| 改后生效方式 | 热加载 | - -- enable_wal_compression - -| 名字 | iot_consensus_cache_window_time_in_ms | -| ------------ | ------------------------------------- | -| 描述 | 用于控制是否启用WAL的压缩。 | -| 类型 | boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -### 3.22 IoT 共识协议配置 - -当Region配置了IoTConsensus共识协议之后,下述的配置项才会生效 - -- data_region_iot_max_log_entries_num_per_batch - -| 名字 | data_region_iot_max_log_entries_num_per_batch | -| ------------ | --------------------------------------------- | -| 描述 | IoTConsensus batch 的最大日志条数 | -| 类型 | int32 | -| 默认值 | 1024 | -| 改后生效方式 | 重启服务生效 | - -- data_region_iot_max_size_per_batch - -| 名字 | data_region_iot_max_size_per_batch | -| ------------ | ---------------------------------- | -| 描述 | IoTConsensus batch 的最大大小 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- data_region_iot_max_pending_batches_num - -| 名字 | data_region_iot_max_pending_batches_num | -| ------------ | --------------------------------------- | -| 描述 | IoTConsensus batch 的流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- data_region_iot_max_memory_ratio_for_queue - -| 名字 | data_region_iot_max_memory_ratio_for_queue | -| ------------ | ------------------------------------------ | -| 描述 | IoTConsensus 队列内存分配比例 | -| 类型 | double | -| 默认值 | 0.6 | -| 改后生效方式 | 重启服务生效 | - -- region_migration_speed_limit_bytes_per_second - -| 名字 | region_migration_speed_limit_bytes_per_second | -| ------------ | --------------------------------------------- | -| 描述 | 定义了在region迁移过程中,数据传输的最大速率 | -| 类型 | long | -| 默认值 | 33554432 | -| 改后生效方式 | 重启服务生效 | - -### 3.23 TsFile配置 - -- group_size_in_byte - -| 名字 | group_size_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | 每次将内存中的数据写入到磁盘时的最大写入字节数 | -| 类型 | int32 | -| 默认值 | 134217728 | -| 改后生效方式 | 热加载 | - -- page_size_in_byte - -| 名字 | page_size_in_byte | -| ------------ | ---------------------------------------------------- | -| 描述 | 内存中每个列写出时,写成的单页最大的大小,单位为字节 | -| 类型 | int32 | -| 默认值 | 65536 | -| 改后生效方式 | 热加载 | - -- max_number_of_points_in_page - -| 名字 | max_number_of_points_in_page | -| ------------ | ------------------------------------------------- | -| 描述 | 一个页中最多包含的数据点(时间戳-值的二元组)数量 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 热加载 | - -- pattern_matching_threshold - -| 名字 | pattern_matching_threshold | -| ------------ | ------------------------------ | -| 描述 | 正则表达式匹配时最大的匹配次数 | -| 类型 | int32 | -| 默认值 | 1000000 | -| 改后生效方式 | 热加载 | - -- float_precision - -| 名字 | float_precision | -| ------------ | ------------------------------------------------------------ | -| 描述 | 浮点数精度,为小数点后数字的位数 | -| 类型 | int32 | -| 默认值 | 默认为 2 位。注意:32 位浮点数的十进制精度为 7 位,64 位浮点数的十进制精度为 15 位。如果设置超过机器精度将没有实际意义。 | -| 改后生效方式 | 热加载 | - -- value_encoder - -| 名字 | value_encoder | -| ------------ | ------------------------------------- | -| 描述 | value 列编码方式 | -| 类型 | 枚举 String: “TS_2DIFF”,“PLAIN”,“RLE” | -| 默认值 | PLAIN | -| 改后生效方式 | 热加载 | - -- compressor - -| 名字 | compressor | -| ------------ | ------------------------------------------------------------ | -| 描述 | 数据压缩方法; 对齐序列中时间列的压缩方法 | -| 类型 | 枚举 String : "UNCOMPRESSED", "SNAPPY", "LZ4", "ZSTD", "LZMA2" | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -- encrypt_flag - -| 名字 | encrypt_flag | -| ------------ | ---------------------------- | -| 描述 | 用于开启或关闭数据加密功能。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- encrypt_type - -| 名字 | encrypt_type | -| ------------ | ------------------------------------- | -| 描述 | 数据加密的方法。 | -| 类型 | String | -| 默认值 | org.apache.tsfile.encrypt.UNENCRYPTED | -| 改后生效方式 | 重启服务生效 | - -- encrypt_key_path - -| 名字 | encrypt_key_path | -| ------------ | ---------------------------- | -| 描述 | 数据加密使用的密钥来源路径。 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -### 3.24 授权配置 - -- authorizer_provider_class - -| 名字 | authorizer_provider_class | -| ------------ | ------------------------------------------------------------ | -| 描述 | 权限服务的类名 | -| 类型 | String | -| 默认值 | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | -| 改后生效方式 | 重启服务生效 | -| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | - -- openID_url - -| 名字 | openID_url | -| ------------ | ---------------------------------------------------------- | -| 描述 | openID 服务器地址 (当 OpenIdAuthorizer 被启用时必须设定) | -| 类型 | String(一个 http 地址) | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- iotdb_server_encrypt_decrypt_provider - -| 名字 | iotdb_server_encrypt_decrypt_provider | -| ------------ | ------------------------------------------------------------ | -| 描述 | 用于用户密码加密的类 | -| 类型 | String | -| 默认值 | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- iotdb_server_encrypt_decrypt_provider_parameter - -| 名字 | iotdb_server_encrypt_decrypt_provider_parameter | -| ------------ | ----------------------------------------------- | -| 描述 | 用于初始化用户密码加密类的参数 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- author_cache_size - -| 名字 | author_cache_size | -| ------------ | ------------------------ | -| 描述 | 用户缓存与角色缓存的大小 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- author_cache_expire_time - -| 名字 | author_cache_expire_time | -| ------------ | -------------------------------------- | -| 描述 | 用户缓存与角色缓存的有效期,单位为分钟 | -| 类型 | int32 | -| 默认值 | 30 | -| 改后生效方式 | 重启服务生效 | - -### 3.25 UDF配置 - -- udf_initial_byte_array_length_for_memory_control - -| 名字 | udf_initial_byte_array_length_for_memory_control | -| ------------ | ------------------------------------------------------------ | -| 描述 | 用于评估UDF查询中文本字段的内存使用情况。建议将此值设置为略大于所有文本的平均长度记录。 | -| 类型 | int32 | -| 默认值 | 48 | -| 改后生效方式 | 重启服务生效 | - -- udf_memory_budget_in_mb - -| 名字 | udf_memory_budget_in_mb | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在一个UDF查询中使用多少内存(以 MB 为单位)。上限为已分配内存的 20% 用于读取。 | -| 类型 | Float | -| 默认值 | 30.0 | -| 改后生效方式 | 重启服务生效 | - -- udf_reader_transformer_collector_memory_proportion - -| 名字 | udf_reader_transformer_collector_memory_proportion | -| ------------ | --------------------------------------------------------- | -| 描述 | UDF内存分配比例。参数形式为a : b : c,其中a、b、c为整数。 | -| 类型 | String | -| 默认值 | 1:1:1 | -| 改后生效方式 | 重启服务生效 | - -- udf_lib_dir - -| 名字 | udf_lib_dir | -| ------------ | ---------------------------- | -| 描述 | UDF 日志及jar文件存储路径 | -| 类型 | String | -| 默认值 | ext/udf(Windows:ext\\udf) | -| 改后生效方式 | 重启服务生效 | - -### 3.26 触发器配置 - -- trigger_lib_dir - -| 名字 | trigger_lib_dir | -| ------------ | ----------------------- | -| 描述 | 触发器 JAR 包存放的目录 | -| 类型 | String | -| 默认值 | ext/trigger | -| 改后生效方式 | 重启服务生效 | - -- stateful_trigger_retry_num_when_not_found - -| 名字 | stateful_trigger_retry_num_when_not_found | -| ------------ | ---------------------------------------------- | -| 描述 | 有状态触发器触发无法找到触发器实例时的重试次数 | -| 类型 | Int32 | -| 默认值 | 3 | -| 改后生效方式 | 重启服务生效 | - -### 3.27 SELECT-INTO配置 - -- into_operation_buffer_size_in_byte - -| 名字 | into_operation_buffer_size_in_byte | -| ------------ | ------------------------------------------------------------ | -| 描述 | 执行 select-into 语句时,待写入数据占用的最大内存(单位:Byte) | -| 类型 | long | -| 默认值 | 104857600 | -| 改后生效方式 | 热加载 | - -- select_into_insert_tablet_plan_row_limit - -| 名字 | select_into_insert_tablet_plan_row_limit | -| ------------ | ------------------------------------------------------------ | -| 描述 | 执行 select-into 语句时,一个 insert-tablet-plan 中可以处理的最大行数 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 热加载 | - -- into_operation_execution_thread_count - -| 名字 | into_operation_execution_thread_count | -| ------------ | ------------------------------------------ | -| 描述 | SELECT INTO 中执行写入任务的线程池的线程数 | -| 类型 | int32 | -| 默认值 | 2 | -| 改后生效方式 | 重启服务生效 | - -### 3.28 连续查询配置 -- continuous_query_submit_thread_count - -| 名字 | continuous_query_execution_thread | -| ------------ | --------------------------------- | -| 描述 | 执行连续查询任务的线程池的线程数 | -| 类型 | int32 | -| 默认值 | 2 | -| 改后生效方式 | 重启服务生效 | - -- continuous_query_min_every_interval_in_ms - -| 名字 | continuous_query_min_every_interval_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | 连续查询执行时间间隔的最小值 | -| 类型 | long (duration) | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -### 3.29 PIPE配置 - -- pipe_lib_dir - -| 名字 | pipe_lib_dir | -| ------------ | -------------------------- | -| 描述 | 自定义 Pipe 插件的存放目录 | -| 类型 | string | -| 默认值 | ext/pipe | -| 改后生效方式 | 暂不支持修改 | - -- pipe_subtask_executor_max_thread_num - -| 名字 | pipe_subtask_executor_max_thread_num | -| ------------ | ------------------------------------------------------------ | -| 描述 | pipe 子任务 processor、sink 中各自可以使用的最大线程数。实际值将是 min(pipe_subtask_executor_max_thread_num, max(1, CPU核心数 / 2))。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- pipe_sink_timeout_ms - -| 名字 | pipe_sink_timeout_ms | -| ------------ | --------------------------------------------- | -| 描述 | thrift 客户端的连接超时时间(以毫秒为单位)。 | -| 类型 | int | -| 默认值 | 900000 | -| 改后生效方式 | 重启服务生效 | - -- pipe_sink_selector_number - -| 名字 | pipe_sink_selector_number | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大执行结果处理线程数量。 建议将此值设置为小于或等于 pipe_sink_max_client_number。 | -| 类型 | int | -| 默认值 | 4 | -| 改后生效方式 | 重启服务生效 | - -- pipe_sink_max_client_number - -| 名字 | pipe_sink_max_client_number | -| ------------ | ----------------------------------------------------------- | -| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大客户端数量。 | -| 类型 | int | -| 默认值 | 16 | -| 改后生效方式 | 重启服务生效 | - -- pipe_air_gap_receiver_enabled - -| 名字 | pipe_air_gap_receiver_enabled | -| ------------ | ------------------------------------------------------------ | -| 描述 | 是否启用通过网闸接收 pipe 数据。接收器只能在 tcp 模式下返回 0 或 1,以指示数据是否成功接收。 \| | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- pipe_air_gap_receiver_port - -| 名字 | pipe_air_gap_receiver_port | -| ------------ | ------------------------------------ | -| 描述 | 服务器通过网闸接收 pipe 数据的端口。 | -| 类型 | int | -| 默认值 | 9780 | -| 改后生效方式 | 重启服务生效 | - -- pipe_all_sinks_rate_limit_bytes_per_second - -| 名字 | pipe_all_sinks_rate_limit_bytes_per_second | -| ------------ | ------------------------------------------------------------ | -| 描述 | 所有 pipe sink 每秒可以传输的总字节数。当给定的值小于或等于 0 时,表示没有限制。默认值是 -1,表示没有限制。 | -| 类型 | double | -| 默认值 | -1 | -| 改后生效方式 | 热加载 | - -### 3.30 Ratis共识协议配置 - -当Region配置了RatisConsensus共识协议之后,下述的配置项才会生效 - -- config_node_ratis_log_appender_buffer_size_max - -| 名字 | config_node_ratis_log_appender_buffer_size_max | -| ------------ | ---------------------------------------------- | -| 描述 | confignode 一次同步日志RPC最大的传输字节限制 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_appender_buffer_size_max - -| 名字 | schema_region_ratis_log_appender_buffer_size_max | -| ------------ | ------------------------------------------------ | -| 描述 | schema region 一次同步日志RPC最大的传输字节限制 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_appender_buffer_size_max - -| 名字 | data_region_ratis_log_appender_buffer_size_max | -| ------------ | ---------------------------------------------- | -| 描述 | data region 一次同步日志RPC最大的传输字节限制 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_snapshot_trigger_threshold - -| 名字 | config_node_ratis_snapshot_trigger_threshold | -| ------------ | -------------------------------------------- | -| 描述 | confignode 触发snapshot需要的日志条数 | -| 类型 | int32 | -| 默认值 | 400,000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_snapshot_trigger_threshold - -| 名字 | schema_region_ratis_snapshot_trigger_threshold | -| ------------ | ---------------------------------------------- | -| 描述 | schema region 触发snapshot需要的日志条数 | -| 类型 | int32 | -| 默认值 | 400,000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_snapshot_trigger_threshold - -| 名字 | data_region_ratis_snapshot_trigger_threshold | -| ------------ | -------------------------------------------- | -| 描述 | data region 触发snapshot需要的日志条数 | -| 类型 | int32 | -| 默认值 | 400,000 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_unsafe_flush_enable - -| 名字 | config_node_ratis_log_unsafe_flush_enable | -| ------------ | ----------------------------------------- | -| 描述 | confignode 是否允许Raft日志异步刷盘 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_unsafe_flush_enable - -| 名字 | schema_region_ratis_log_unsafe_flush_enable | -| ------------ | ------------------------------------------- | -| 描述 | schema region 是否允许Raft日志异步刷盘 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_unsafe_flush_enable - -| 名字 | data_region_ratis_log_unsafe_flush_enable | -| ------------ | ----------------------------------------- | -| 描述 | data region 是否允许Raft日志异步刷盘 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_segment_size_max_in_byte - -| 名字 | config_node_ratis_log_segment_size_max_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | confignode 一个RaftLog日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_segment_size_max_in_byte - -| 名字 | schema_region_ratis_log_segment_size_max_in_byte | -| ------------ | ------------------------------------------------ | -| 描述 | schema region 一个RaftLog日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_segment_size_max_in_byte - -| 名字 | data_region_ratis_log_segment_size_max_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | data region 一个RaftLog日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- config_node_simple_consensus_log_segment_size_max_in_byte - -| 名字 | data_region_ratis_log_segment_size_max_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | Confignode 简单共识协议一个Log日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_grpc_flow_control_window - -| 名字 | config_node_ratis_grpc_flow_control_window | -| ------------ | ------------------------------------------ | -| 描述 | confignode grpc 流式拥塞窗口大小 | -| 类型 | int32 | -| 默认值 | 4194304 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_grpc_flow_control_window - -| 名字 | schema_region_ratis_grpc_flow_control_window | -| ------------ | -------------------------------------------- | -| 描述 | schema region grpc 流式拥塞窗口大小 | -| 类型 | int32 | -| 默认值 | 4194304 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_grpc_flow_control_window - -| 名字 | data_region_ratis_grpc_flow_control_window | -| ------------ | ------------------------------------------ | -| 描述 | data region grpc 流式拥塞窗口大小 | -| 类型 | int32 | -| 默认值 | 4194304 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_grpc_leader_outstanding_appends_max - -| 名字 | config_node_ratis_grpc_leader_outstanding_appends_max | -| ------------ | ----------------------------------------------------- | -| 描述 | config node grpc 流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_grpc_leader_outstanding_appends_max - -| 名字 | schema_region_ratis_grpc_leader_outstanding_appends_max | -| ------------ | ------------------------------------------------------- | -| 描述 | schema region grpc 流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_grpc_leader_outstanding_appends_max - -| 名字 | data_region_ratis_grpc_leader_outstanding_appends_max | -| ------------ | ----------------------------------------------------- | -| 描述 | data region grpc 流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_force_sync_num - -| 名字 | config_node_ratis_log_force_sync_num | -| ------------ | ------------------------------------ | -| 描述 | config node fsync 阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_force_sync_num - -| 名字 | schema_region_ratis_log_force_sync_num | -| ------------ | -------------------------------------- | -| 描述 | schema region fsync 阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_force_sync_num - -| 名字 | data_region_ratis_log_force_sync_num | -| ------------ | ------------------------------------ | -| 描述 | data region fsync 阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_rpc_leader_election_timeout_min_ms - -| 名字 | config_node_ratis_rpc_leader_election_timeout_min_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | confignode leader 选举超时最小值 | -| 类型 | int32 | -| 默认值 | 2000ms | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_rpc_leader_election_timeout_min_ms - -| 名字 | schema_region_ratis_rpc_leader_election_timeout_min_ms | -| ------------ | ------------------------------------------------------ | -| 描述 | schema region leader 选举超时最小值 | -| 类型 | int32 | -| 默认值 | 2000ms | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_rpc_leader_election_timeout_min_ms - -| 名字 | data_region_ratis_rpc_leader_election_timeout_min_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | data region leader 选举超时最小值 | -| 类型 | int32 | -| 默认值 | 2000ms | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_rpc_leader_election_timeout_max_ms - -| 名字 | config_node_ratis_rpc_leader_election_timeout_max_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | confignode leader 选举超时最大值 | -| 类型 | int32 | -| 默认值 | 4000ms | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_rpc_leader_election_timeout_max_ms - -| 名字 | schema_region_ratis_rpc_leader_election_timeout_max_ms | -| ------------ | ------------------------------------------------------ | -| 描述 | schema region leader 选举超时最大值 | -| 类型 | int32 | -| 默认值 | 4000ms | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_rpc_leader_election_timeout_max_ms - -| 名字 | data_region_ratis_rpc_leader_election_timeout_max_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | data region leader 选举超时最大值 | -| 类型 | int32 | -| 默认值 | 4000ms | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_request_timeout_ms - -| 名字 | config_node_ratis_request_timeout_ms | -| ------------ | ------------------------------------ | -| 描述 | confignode Raft 客户端重试超时 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_request_timeout_ms - -| 名字 | schema_region_ratis_request_timeout_ms | -| ------------ | -------------------------------------- | -| 描述 | schema region Raft 客户端重试超时 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_request_timeout_ms - -| 名字 | data_region_ratis_request_timeout_ms | -| ------------ | ------------------------------------ | -| 描述 | data region Raft 客户端重试超时 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_max_retry_attempts - -| 名字 | config_node_ratis_max_retry_attempts | -| ------------ | ------------------------------------ | -| 描述 | confignode Raft客户端最大重试次数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_initial_sleep_time_ms - -| 名字 | config_node_ratis_initial_sleep_time_ms | -| ------------ | --------------------------------------- | -| 描述 | confignode Raft客户端初始重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 100ms | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_max_sleep_time_ms - -| 名字 | config_node_ratis_max_sleep_time_ms | -| ------------ | ------------------------------------- | -| 描述 | confignode Raft客户端最大重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_max_retry_attempts - -| 名字 | schema_region_ratis_max_retry_attempts | -| ------------ | -------------------------------------- | -| 描述 | schema region Raft客户端最大重试次数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_initial_sleep_time_ms - -| 名字 | schema_region_ratis_initial_sleep_time_ms | -| ------------ | ----------------------------------------- | -| 描述 | schema region Raft客户端初始重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 100ms | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_max_sleep_time_ms - -| 名字 | schema_region_ratis_max_sleep_time_ms | -| ------------ | ---------------------------------------- | -| 描述 | schema region Raft客户端最大重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_max_retry_attempts - -| 名字 | data_region_ratis_max_retry_attempts | -| ------------ | ------------------------------------ | -| 描述 | data region Raft客户端最大重试次数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_initial_sleep_time_ms - -| 名字 | data_region_ratis_initial_sleep_time_ms | -| ------------ | --------------------------------------- | -| 描述 | data region Raft客户端初始重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 100ms | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_max_sleep_time_ms - -| 名字 | data_region_ratis_max_sleep_time_ms | -| ------------ | -------------------------------------- | -| 描述 | data region Raft客户端最大重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- ratis_first_election_timeout_min_ms - -| 名字 | ratis_first_election_timeout_min_ms | -| ------------ | ----------------------------------- | -| 描述 | Ratis协议首次选举最小超时时间 | -| 类型 | int64 | -| 默认值 | 50 (ms) | -| 改后生效方式 | 重启服务生效 | - -- ratis_first_election_timeout_max_ms - -| 名字 | ratis_first_election_timeout_max_ms | -| ------------ | ----------------------------------- | -| 描述 | Ratis协议首次选举最大超时时间 | -| 类型 | int64 | -| 默认值 | 150 (ms) | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_preserve_logs_num_when_purge - -| 名字 | config_node_ratis_preserve_logs_num_when_purge | -| ------------ | ---------------------------------------------- | -| 描述 | confignode snapshot后保持一定数量日志不删除 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_preserve_logs_num_when_purge - -| 名字 | schema_region_ratis_preserve_logs_num_when_purge | -| ------------ | ------------------------------------------------ | -| 描述 | schema region snapshot后保持一定数量日志不删除 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_preserve_logs_num_when_purge - -| 名字 | data_region_ratis_preserve_logs_num_when_purge | -| ------------ | ---------------------------------------------- | -| 描述 | data region snapshot后保持一定数量日志不删除 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_max_size - -| 名字 | config_node_ratis_log_max_size | -| ------------ | ----------------------------------- | -| 描述 | config node磁盘Raft Log最大占用空间 | -| 类型 | int64 | -| 默认值 | 2147483648 (2GB) | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_max_size - -| 名字 | schema_region_ratis_log_max_size | -| ------------ | -------------------------------------- | -| 描述 | schema region 磁盘Raft Log最大占用空间 | -| 类型 | int64 | -| 默认值 | 2147483648 (2GB) | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_max_size - -| 名字 | data_region_ratis_log_max_size | -| ------------ | ------------------------------------ | -| 描述 | data region 磁盘Raft Log最大占用空间 | -| 类型 | int64 | -| 默认值 | 21474836480 (20GB) | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_periodic_snapshot_interval - -| 名字 | config_node_ratis_periodic_snapshot_interval | -| ------------ | -------------------------------------------- | -| 描述 | config node定期snapshot的间隔时间 | -| 类型 | int64 | -| 默认值 | 86400 (秒) | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_periodic_snapshot_interval - -| 名字 | schema_region_ratis_preserve_logs_num_when_purge | -| ------------ | ------------------------------------------------ | -| 描述 | schema region定期snapshot的间隔时间 | -| 类型 | int64 | -| 默认值 | 86400 (秒) | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_periodic_snapshot_interval - -| 名字 | data_region_ratis_preserve_logs_num_when_purge | -| ------------ | ---------------------------------------------- | -| 描述 | data region定期snapshot的间隔时间 | -| 类型 | int64 | -| 默认值 | 86400 (秒) | -| 改后生效方式 | 重启服务生效 | - -### 3.31 IoTConsensusV2配置 - -- iot_consensus_v2_pipeline_size - -| 名字 | iot_consensus_v2_pipeline_size | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTConsensus V2中连接器(connector)和接收器(receiver)的默认事件缓冲区大小。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- iot_consensus_v2_mode - -| 名字 | iot_consensus_v2_pipeline_size | -| ------------ | ----------------------------------- | -| 描述 | IoTConsensus V2使用的共识协议模式。 | -| 类型 | String | -| 默认值 | batch | -| 改后生效方式 | 重启服务生效 | - -### 3.32 Procedure 配置 - -- procedure_core_worker_thread_count - -| 名字 | procedure_core_worker_thread_count | -| ------------ | ---------------------------------- | -| 描述 | 工作线程数量 | -| 类型 | int32 | -| 默认值 | 4 | -| 改后生效方式 | 重启服务生效 | - -- procedure_completed_clean_interval - -| 名字 | procedure_completed_clean_interval | -| ------------ | ---------------------------------- | -| 描述 | 清理已完成的 procedure 时间间隔 | -| 类型 | int32 | -| 默认值 | 30(s) | -| 改后生效方式 | 重启服务生效 | - -- procedure_completed_evict_ttl - -| 名字 | procedure_completed_evict_ttl | -| ------------ | --------------------------------- | -| 描述 | 已完成的 procedure 的数据保留时间 | -| 类型 | int32 | -| 默认值 | 60(s) | -| 改后生效方式 | 重启服务生效 | - -### 3.33 MQTT代理配置 - -- enable_mqtt_service - -| 名字 | enable_mqtt_service。 | -| ------------ | --------------------- | -| 描述 | 是否开启MQTT服务 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | - -- mqtt_host - -| 名字 | mqtt_host | -| ------------ | -------------------- | -| 描述 | MQTT服务绑定的host。 | -| 类型 | String | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 热加载 | - -- mqtt_port - -| 名字 | mqtt_port | -| ------------ | -------------------- | -| 描述 | MQTT服务绑定的port。 | -| 类型 | int32 | -| 默认值 | 1883 | -| 改后生效方式 | 热加载 | - -- mqtt_handler_pool_size - -| 名字 | mqtt_handler_pool_size | -| ------------ | ---------------------------------- | -| 描述 | 用于处理MQTT消息的处理程序池大小。 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- mqtt_payload_formatter - -| 名字 | mqtt_payload_formatter | -| ------------ | ---------------------------- | -| 描述 | MQTT消息有效负载格式化程序。 | -| 类型 | String | -| 默认值 | json | -| 改后生效方式 | 热加载 | - -- mqtt_max_message_size - -| 名字 | mqtt_max_message_size | -| ------------ | ------------------------------------ | -| 描述 | MQTT消息的最大长度(以字节为单位)。 | -| 类型 | int32 | -| 默认值 | 1048576 | -| 改后生效方式 | 热加载 | - -### 3.34 审计日志配置 - -- enable_audit_log - -| 名字 | enable_audit_log | -| ------------ | ------------------------------ | -| 描述 | 用于控制是否启用审计日志功能。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- audit_log_storage - -| 名字 | audit_log_storage | -| ------------ | -------------------------- | -| 描述 | 定义了审计日志的输出位置。 | -| 类型 | String | -| 默认值 | IOTDB,LOGGER | -| 改后生效方式 | 重启服务生效 | - -- audit_log_operation - -| 名字 | audit_log_operation | -| ------------ | -------------------------------------- | -| 描述 | 定义了哪些类型的操作需要记录审计日志。 | -| 类型 | String | -| 默认值 | DML,DDL,QUERY | -| 改后生效方式 | 重启服务生效 | - -- enable_audit_log_for_native_insert_api - -| 名字 | enable_audit_log_for_native_insert_api | -| ------------ | -------------------------------------- | -| 描述 | 用于控制本地写入API是否记录审计日志。 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -### 3.35 白名单配置 -- enable_white_list - -| 名字 | enable_white_list | -| ------------ | ----------------- | -| 描述 | 是否启用白名单。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | - -### 3.36 IoTDB-AI 配置 - -- model_inference_execution_thread_count - -| 名字 | model_inference_execution_thread_count | -| ------------ | -------------------------------------- | -| 描述 | 用于模型推理操作的线程数。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -### 3.37 TsFile 主动监听&加载功能配置 - -- load_clean_up_task_execution_delay_time_seconds - -| 名字 | load_clean_up_task_execution_delay_time_seconds | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在加载TsFile失败后,系统将等待多长时间才会执行清理任务来清除这些未成功加载的TsFile。 | -| 类型 | int | -| 默认值 | 1800 | -| 改后生效方式 | 热加载 | - -- load_write_throughput_bytes_per_second - -| 名字 | load_write_throughput_bytes_per_second | -| ------------ | -------------------------------------- | -| 描述 | 加载TsFile时磁盘写入的最大字节数每秒。 | -| 类型 | int | -| 默认值 | -1 | -| 改后生效方式 | 热加载 | - -- load_active_listening_enable - -| 名字 | load_active_listening_enable | -| ------------ | ------------------------------------------------------------ | -| 描述 | 是否开启 DataNode 主动监听并且加载 tsfile 的功能(默认开启)。 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- load_active_listening_dirs - -| 名字 | load_active_listening_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | 需要监听的目录(自动包括目录中的子目录),如有多个使用 “,“ 隔开默认的目录为 ext/load/pending(支持热装载)。 | -| 类型 | String | -| 默认值 | ext/load/pending | -| 改后生效方式 | 热加载 | - -- load_active_listening_fail_dir - -| 名字 | load_active_listening_fail_dir | -| ------------ | ---------------------------------------------------------- | -| 描述 | 执行加载 tsfile 文件失败后将文件转存的目录,只能配置一个。 | -| 类型 | String | -| 默认值 | ext/load/failed | -| 改后生效方式 | 热加载 | - -- load_active_listening_max_thread_num - -| 名字 | load_active_listening_max_thread_num | -| ------------ | ------------------------------------------------------------ | -| 描述 | 同时执行加载 tsfile 任务的最大线程数,参数被注释掉时的默值为 max(1, CPU 核心数 / 2),当用户设置的值不在这个区间[1, CPU核心数 /2]内时,会设置为默认值 (1, CPU 核心数 / 2)。 | -| 类型 | Long | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- load_active_listening_check_interval_seconds - -| 名字 | load_active_listening_check_interval_seconds | -| ------------ | ------------------------------------------------------------ | -| 描述 | 主动监听轮询间隔,单位秒。主动监听 tsfile 的功能是通过轮询检查文件夹实现的。该配置指定了两次检查 load_active_listening_dirs 的时间间隔,每次检查完成 load_active_listening_check_interval_seconds 秒后,会执行下一次检查。当用户设置的轮询间隔小于 1 时,会被设置为默认值 5 秒。 | -| 类型 | Long | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - - -* last_cache_operation_on_load - -|名字| last_cache_operation_on_load | -|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|描述| 当成功加载一个 TsFile 时,对 LastCache 执行的操作。`UPDATE`:使用 TsFile 中的数据更新 LastCache;`UPDATE_NO_BLOB`:与 UPDATE 类似,但会使 blob 序列的 LastCache 失效;`CLEAN_DEVICE`:使 TsFile 中包含的设备的 LastCache 失效;`CLEAN_ALL`:清空整个 LastCache。 | -|类型| String | -|默认值| UPDATE_NO_BLOB | -|改后生效方式| 重启后生效 | - -* cache_last_values_for_load - -|名字| cache_last_values_for_load | -|:---:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|描述| 在加载 TsFile 之前是否缓存最新值(last values)。仅在 `last_cache_operation_on_load=UPDATE_NO_BLOB` 或 `last_cache_operation_on_load=UPDATE` 时生效。当设置为 true 时,即使 `last_cache_operation_on_load=UPDATE`,也会忽略 blob 序列。启用此选项会在加载 TsFile 期间增加内存占用。 | -|类型| Boolean | -|默认值| true | -|改后生效方式| 重启后生效 | - -* cache_last_values_memory_budget_in_byte - -|名字| cache_last_values_memory_budget_in_byte | -|:---:|:----------------------------------------------------------------------------------------------------| -|描述| 当 `cache_last_values_for_load=true` 时,用于缓存最新值的最大内存大小(以字节为单位)。如果超过该值,缓存的值将被丢弃,并以流式方式直接从 TsFile 中读取最新值。 | -|类型| int32 | -|默认值| 4194304 | -|改后生效方式| 重启后生效 | - - -### 3.38 分发重试配置 - -- enable_retry_for_unknown_error - -| 名字 | enable_retry_for_unknown_error | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在遇到未知错误时,写请求远程分发的最大重试时间,单位是毫秒。 | -| 类型 | Long | -| 默认值 | 60000 | -| 改后生效方式 | 热加载 | - -- enable_retry_for_unknown_error - -| 名字 | enable_retry_for_unknown_error | -| ------------ | -------------------------------- | -| 描述 | 用于控制是否对未知错误进行重试。 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual_apache.md b/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual_apache.md new file mode 100644 index 000000000..672829976 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual_apache.md @@ -0,0 +1,3364 @@ + + +# 配置参数 + +IoTDB 配置文件位于 IoTDB 安装目录:`conf`文件夹下。 + +- `confignode-env.sh/bat`:环境配置项的配置文件,可以配置 ConfigNode 的内存大小。 +- `datanode-env.sh/bat`:环境配置项的配置文件,可以配置 DataNode 的内存大小。 +- `iotdb-system.properties`:IoTDB 的配置文件。 +- `iotdb-system.properties.template`:IoTDB 的配置文件模版。 + +## 1. 修改配置: + +在 `iotdb-system.properties` 文件中已存在的参数可以直接进行修改。对于那些在 `iotdb-system.properties` 中未列出的参数,可以从 `iotdb-system.properties.template` 配置文件模板中找到相应的参数,然后将其复制到 `iotdb-system.properties` 文件中进行修改。 + +### 1.1 改后生效方式 + +不同的配置参数有不同的生效方式,分为以下三种: + +- 仅允许在第一次启动服务前修改: 在第一次启动 ConfigNode/DataNode 后即禁止修改,修改会导致 ConfigNode/DataNode 无法启动。 +- 重启服务生效: ConfigNode/DataNode 启动后仍可修改,但需要重启 ConfigNode/DataNode 后才生效。 +- 热加载: 可在 ConfigNode/DataNode 运行时修改,修改后通过 Session 或 Cli 发送 `load configuration` 或 `set configuration key1 = 'value1'` 命令(SQL)至 IoTDB 使配置生效。 + +## 2. 环境配置项 + +### 2.1 confignode-env.sh/bat + +环境配置项主要用于对 ConfigNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。ConfigNode 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的十分之三,最多会被设置为 16G。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +### 2.2 datanode-env.sh/bat + +环境配置项主要用于对 DataNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。DataNode/Standalone 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ---------------------------------------------------- | +| 描述 | IoTDB DataNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的二分之一。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ---------------------------------------------------------- | +| 描述 | IoTDB DataNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB DataNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置 | +| 改后生效方式 | 重启服务生效 | + + +## 3. 系统配置项(iotdb-system.properties.template) + +### 3.1 集群管理 + +- cluster_name + +| 名字 | cluster_name | +| -------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 集群名称 | +| 类型 | String | +| 默认值 | default_cluster | +| 修改方式 | CLI 中执行语句 `set configuration cluster_name = 'xxx'` (xxx为希望修改成的集群名称) | +| 注意 | 此修改通过网络分发至每个节点。在网络波动或者有节点宕机的情况下,不保证能够在全部节点修改成功。未修改成功的节点重启时无法加入集群,此时需要手动修改该节点的配置文件中的cluster_name项,再重启。正常情况下,不建议通过手动修改配置文件的方式修改集群名称,不建议通过`load configuration`的方式热加载。 | + +### 3.2 SeedConfigNode 配置 + +- cn_seed_config_node + +| 名字 | cn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | 目标 ConfigNode 地址,ConfigNode 通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 cn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_seed_config_node + +| 名字 | dn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 地址,DataNode 启动时通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 dn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.3 Node RPC 配置 + +- cn_internal_address + +| 名字 | cn_internal_address | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群内部地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_internal_port + +| 名字 | cn_internal_port | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_consensus_port + +| 名字 | cn_consensus_port | +| ------------ | ----------------------------- | +| 描述 | ConfigNode 的共识协议通信端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10720 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_rpc_address + +| 名字 | dn_rpc_address | +| ------------ |----------------| +| 描述 | 客户端 RPC 服务监听地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_port + +| 名字 | dn_rpc_port | +| ------------ | ----------------------- | +| 描述 | Client RPC 服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 6667 | +| 改后生效方式 | 重启服务生效 | + +- dn_internal_address + +| 名字 | dn_internal_address | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信地址 | +| 类型 | string | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_internal_port + +| 名字 | dn_internal_port | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信端口 | +| 类型 | int | +| 默认值 | 10730 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_mpp_data_exchange_port + +| 名字 | dn_mpp_data_exchange_port | +| ------------ | ---------------------------- | +| 描述 | MPP 数据交换端口 | +| 类型 | int | +| 默认值 | 10740 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_schema_region_consensus_port + +| 名字 | dn_schema_region_consensus_port | +| ------------ | ------------------------------------- | +| 描述 | DataNode 元数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10750 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_data_region_consensus_port + +| 名字 | dn_data_region_consensus_port | +| ------------ | ----------------------------------- | +| 描述 | DataNode 数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10760 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_join_cluster_retry_interval_ms + +| 名字 | dn_join_cluster_retry_interval_ms | +| ------------ | --------------------------------- | +| 描述 | DataNode 再次重试加入集群等待时间 | +| 类型 | long | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.4 副本配置 + +- config_node_consensus_protocol_class + +| 名字 | config_node_consensus_protocol_class | +| ------------ | ------------------------------------------------ | +| 描述 | ConfigNode 副本的共识协议,仅支持 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_replication_factor + +| 名字 | schema_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认元数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- schema_region_consensus_protocol_class + +| 名字 | schema_region_consensus_protocol_class | +| ------------ | ----------------------------------------------------- | +| 描述 | 元数据副本的共识协议,多副本时只能使用 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- data_replication_factor + +| 名字 | data_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- data_region_consensus_protocol_class + +| 名字 | data_region_consensus_protocol_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据副本的共识协议,多副本时可以使用 IoTConsensus 或 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.iot.IoTConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.5 目录配置 + +- cn_system_dir + +| 名字 | cn_system_dir | +| ------------ | ----------------------------------------------------------- | +| 描述 | ConfigNode 系统数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/system(Windows:data\\configndoe\\system) | +| 改后生效方式 | 重启服务生效 | + +- cn_consensus_dir + +| 名字 | cn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 共识协议数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- cn_pipe_receiver_file_dir + +| 名字 | cn_pipe_receiver_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- dn_system_dir + +| 名字 | dn_system_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 元数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/system(Windows:data\\datanode\\system) | +| 改后生效方式 | 重启服务生效 | + +- dn_data_dirs + +| 名字 | dn_data_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/data(Windows:data\\datanode\\data) | +| 改后生效方式 | 重启服务生效 | + +- dn_multi_dir_strategy + +| 名字 | dn_multi_dir_strategy | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 在 data_dirs 中为 TsFile 选择目录时采用的策略。可使用简单类名或类名全称。系统提供以下三种策略:
1. SequenceStrategy:IoTDB 按顺序选择目录,依次遍历 data_dirs 中的所有目录,并不断轮循;
2. MaxDiskUsableSpaceFirstStrategy:IoTDB 优先选择 data_dirs 中对应磁盘空余空间最大的目录;
您可以通过以下方法完成用户自定义策略:
1. 继承 org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy 类并实现自身的 Strategy 方法;
2. 将实现的类的完整类名(包名加类名,UserDefineStrategyPackage)填写到该配置项;
3. 将该类 jar 包添加到工程中。 | +| 类型 | String | +| 默认值 | SequenceStrategy | +| 改后生效方式 | 热加载 | + +- dn_consensus_dir + +| 名字 | dn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 共识层日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- dn_wal_dirs + +| 名字 | dn_wal_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 写前日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/wal(Windows:data\\datanode\\wal) | +| 改后生效方式 | 重启服务生效 | + +- dn_tracing_dir + +| 名字 | dn_tracing_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 追踪根目录路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | datanode/tracing(Windows:datanode\\tracing) | +| 改后生效方式 | 重启服务生效 | + +- dn_sync_dir + +| 名字 | dn_sync_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB sync 存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/sync(Windows:data\\datanode\\sync) | +| 改后生效方式 | 重启服务生效 | + +- sort_tmp_dir + +| 名字 | sort_tmp_dir | +| ------------ | ------------------------------------------------- | +| 描述 | 用于配置排序操作的临时目录。 | +| 类型 | String | +| 默认值 | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| 改后生效方式 | 重启服务生效 | + +- dn_pipe_receiver_file_dirs + +| 名字 | dn_pipe_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_receiver_file_dirs + +| 名字 | iot_consensus_v2_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_deletion_file_dir + +| 名字 | iot_consensus_v2_deletion_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中删除操作用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| 改后生效方式 | 重启服务生效 | + +### 3.6 监控配置 + +- cn_metric_reporter_list + +| 名字 | cn_metric_reporter_list | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_level + +| 名字 | cn_metric_level | +| ------------ | ------------------------------------------ | +| 描述 | confignode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_async_collect_period + +| 名字 | cn_metric_async_collect_period | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_prometheus_reporter_port + +| 名字 | cn_metric_prometheus_reporter_port | +| ------------ | ------------------------------------------------------ | +| 描述 | confignode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9091 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_reporter_list + +| 名字 | dn_metric_reporter_list | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_level + +| 名字 | dn_metric_level | +| ------------ | ---------------------------------------- | +| 描述 | DataNode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_async_collect_period + +| 名字 | dn_metric_async_collect_period | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_prometheus_reporter_port + +| 名字 | dn_metric_prometheus_reporter_port | +| ------------ | ---------------------------------------------------- | +| 描述 | DataNode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9092 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_internal_reporter_type + +| 名字 | dn_metric_internal_reporter_type | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中监控模块内部报告者的种类,用于内部监控和检查数据是否已经成功写入和刷新。 | +| 类型 | String | +| 默认值 | IOTDB | +| 改后生效方式 | 重启服务生效 | + +### 3.7 SSL 配置 + +- enable_thrift_ssl + +| 名字 | enable_thrift_ssl | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当enable_thrift_ssl配置为true时,将通过dn_rpc_port使用 SSL 加密进行通信 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_https + +| 名字 | enable_https | +| ------------ | ------------------------------ | +| 描述 | REST Service 是否开启 SSL 配置 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- key_store_path + +| 名字 | key_store_path | +| ------------ | -------------- | +| 描述 | ssl证书路径 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- key_store_pwd + +| 名字 | key_store_pwd | +| ------------ | ------------- | +| 描述 | ssl证书密码 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.8 连接配置 + +- cn_rpc_thrift_compression_enable + +| 名字 | cn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- cn_rpc_max_concurrent_client_num + +| 名字 | cn_rpc_max_concurrent_client_num | +| ------------ |---------------------------------| +| 描述 | 最大连接数。 | +| 类型 | int | +| 默认值 | 3000 | +| 改后生效方式 | 重启服务生效 | + +- cn_connection_timeout_ms + +| 名字 | cn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- cn_selector_thread_nums_of_client_manager + +| 名字 | cn_selector_thread_nums_of_client_manager | +| ------------ | ----------------------------------------- | +| 描述 | 客户端异步线程管理的选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- cn_max_client_count_for_each_node_in_client_manager + +| 名字 | cn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +- dn_session_timeout_threshold + +| 名字 | dn_session_timeout_threshold | +| ------------ | ---------------------------- | +| 描述 | 最大的会话空闲时间 | +| 类型 | int | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_thrift_compression_enable + +| 名字 | dn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_advanced_compression_enable + +| 名字 | dn_rpc_advanced_compression_enable | +| ------------ | ---------------------------------- | +| 描述 | 是否启用 thrift 的自定制压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_selector_thread_count + +| 名字 | rpc_selector_thread_count | +| ------------ | ------------------------- | +| 描述 | rpc 选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_min_concurrent_client_num + +| 名字 | rpc_min_concurrent_client_num | +| ------------ | ----------------------------- | +| 描述 | 最小连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_max_concurrent_client_num + +| 名字 | dn_rpc_max_concurrent_client_num | +| ------------ |----------------------------------| +| 描述 | 最大连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_max_frame_size + +| 名字 | dn_thrift_max_frame_size | +| ------------ | ------------------------------------------------------ | +| 描述 | RPC 请求/响应的最大字节数 | +| 类型 | long | +| 默认值 | 536870912 (默认值512MB) | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_init_buffer_size + +| 名字 | dn_thrift_init_buffer_size | +| ------------ | -------------------------- | +| 描述 | 字节数 | +| 类型 | long | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- dn_connection_timeout_ms + +| 名字 | dn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- dn_selector_thread_count_of_client_manager + +| 名字 | dn_selector_thread_count_of_client_manager | +| ------------ | ------------------------------------------------------------ | +| 描述 | selector thread (TAsyncClientManager) nums for async thread in a clientManagerclientManager中异步线程的选择器线程(TAsyncClientManager)编号 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_max_client_count_for_each_node_in_client_manager + +| 名字 | dn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +### 3.9 对象存储管理 + +- remote_tsfile_cache_dirs + +| 名字 | remote_tsfile_cache_dirs | +| ------------ | ------------------------ | +| 描述 | 云端存储在本地的缓存目录 | +| 类型 | String | +| 默认值 | data/datanode/data/cache | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_page_size_in_kb + +| 名字 | remote_tsfile_cache_page_size_in_kb | +| ------------ | ----------------------------------- | +| 描述 | 云端存储在本地缓存文件的块大小 | +| 类型 | int | +| 默认值 | 20480 | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| 名字 | remote_tsfile_cache_max_disk_usage_in_mb | +| ------------ | ---------------------------------------- | +| 描述 | 云端存储本地缓存的最大磁盘占用大小 | +| 类型 | long | +| 默认值 | 51200 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_type + +| 名字 | object_storage_type | +| ------------ | ------------------- | +| 描述 | 云端存储类型 | +| 类型 | String | +| 默认值 | AWS_S3 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_endpoint + +| 名字 | object_storage_endpoint | +| ------------ | ----------------------- | +| 描述 | 云端存储的 endpoint | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_bucket + +| 名字 | object_storage_bucket | +| ------------ | ---------------------- | +| 描述 | 云端存储 bucket 的名称 | +| 类型 | String | +| 默认值 | iotdb_data | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_key + +| 名字 | object_storage_access_key | +| ------------ | ------------------------- | +| 描述 | 云端存储的验证信息 key | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_secret + +| 名字 | object_storage_access_secret | +| ------------ | ---------------------------- | +| 描述 | 云端存储的验证信息 secret | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.10 多级管理 + +- dn_default_space_usage_thresholds + +| 名字 | dn_default_space_usage_thresholds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 定义每个层级数据目录的最小剩余空间比例;当剩余空间少于该比例时,数据会被自动迁移至下一个层级;当最后一个层级的剩余存储空间到低于此阈值时,会将系统置为 READ_ONLY | +| 类型 | double | +| 默认值 | 0.85 | +| 改后生效方式 | 热加载 | + +- dn_tier_full_policy + +| 名字 | dn_tier_full_policy | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如何处理最后一层数据,当其已用空间高于其dn_default_space_usage_threshold时。| +| 类型 | String | +| 默认值 | NULL | +| 改后生效方式 | 热加载 | + +- migrate_thread_count + +| 名字 | migrate_thread_count | +| ------------ | ---------------------------------------- | +| 描述 | DataNode数据目录中迁移操作的线程池大小。 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| 名字 | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ------------ | ------------------------------------------------ | +| 描述 | 限制不同存储层级之间的数据迁移速度。 | +| 类型 | int | +| 默认值 | 10485760 | +| 改后生效方式 | 热加载 | + +### 3.11 REST服务配置 + +- enable_rest_service + +| 名字 | enable_rest_service | +| ------------ | ------------------- | +| 描述 | 是否开启Rest服务。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_service_port + +| 名字 | rest_service_port | +| ------------ | ------------------ | +| 描述 | Rest服务监听端口号 | +| 类型 | int32 | +| 默认值 | 18080 | +| 改后生效方式 | 重启服务生效 | + +- enable_swagger + +| 名字 | enable_swagger | +| ------------ | --------------------------------- | +| 描述 | 是否启用swagger来展示rest接口信息 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_query_default_row_size_limit + +| 名字 | rest_query_default_row_size_limit | +| ------------ | --------------------------------- | +| 描述 | 一次查询能返回的结果集最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- cache_expire_in_seconds + +| 名字 | cache_expire_in_seconds | +| ------------ | -------------------------------- | +| 描述 | 用户登录信息缓存的过期时间(秒) | +| 类型 | int32 | +| 默认值 | 28800 | +| 改后生效方式 | 重启服务生效 | + +- cache_max_num + +| 名字 | cache_max_num | +| ------------ | ------------------------ | +| 描述 | 缓存中存储的最大用户数量 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- cache_init_num + +| 名字 | cache_init_num | +| ------------ | -------------- | +| 描述 | 缓存初始容量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- client_auth + +| 名字 | client_auth | +| ------------ | ---------------------- | +| 描述 | 是否需要客户端身份验证 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- trust_store_path + +| 名字 | trust_store_path | +| ------------ | ----------------------- | +| 描述 | keyStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- trust_store_pwd + +| 名字 | trust_store_pwd | +| ------------ | ------------------------- | +| 描述 | trustStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- idle_timeout_in_seconds + +| 名字 | idle_timeout_in_seconds | +| ------------ | ----------------------- | +| 描述 | SSL 超时时间,单位为秒 | +| 类型 | int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.12 负载均衡配置 + +- series_slot_num + +| 名字 | series_slot_num | +| ------------ | ---------------------------- | +| 描述 | 序列分区槽数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- series_partition_executor_class + +| 名字 | series_partition_executor_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 序列分区哈希函数 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_region_group_extension_policy + +| 名字 | schema_region_group_extension_policy | +| ------------ | ------------------------------------ | +| 描述 | SchemaRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_schema_region_group_num_per_database + +| 名字 | default_schema_region_group_num_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-SchemaRegionGroup 扩容策略时,此参数为每个 Database 拥有的 SchemaRegionGroup 数量;当选用 AUTO-SchemaRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 SchemaRegionGroup 数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_per_data_node + +| 名字 | schema_region_per_data_node | +| ------------ | -------------------------------------------------- | +| 描述 | 期望每个 DataNode 可管理的 SchemaRegion 的最大数量 | +| 类型 | double | +| 默认值 | 1.0 | +| 改后生效方式 | 重启服务生效 | + +- data_region_group_extension_policy + +| 名字 | data_region_group_extension_policy | +| ------------ | ---------------------------------- | +| 描述 | DataRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_data_region_group_num_per_database + +| 名字 | default_data_region_group_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-DataRegionGroup 扩容策略时,此参数为每个 Database 拥有的 DataRegionGroup 数量;当选用 AUTO-DataRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 DataRegionGroup 数量 | +| 类型 | int | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- data_region_per_data_node + +| 名字 | data_region_per_data_node | +| ------------ | ------------------------------------------------ | +| 描述 | 期望每个 DataNode 可管理的 DataRegion 的最大数量 | +| 类型 | double | +| 默认值 | CPU 核心数的一半 | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_ratis_consensus + +| 名字 | enable_auto_leader_balance_for_ratis_consensus | +| ------------ | ---------------------------------------------- | +| 描述 | 是否为 Ratis 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_iot_consensus + +| 名字 | enable_auto_leader_balance_for_iot_consensus | +| ------------ | -------------------------------------------- | +| 描述 | 是否为 IoT 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.13 集群管理 + +- time_partition_origin + +| 名字 | time_partition_origin | +| ------------ | ------------------------------------------------------------ | +| 描述 | Database 数据时间分区的起始点,即从哪个时间点开始计算时间分区。 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 0 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- time_partition_interval + +| 名字 | time_partition_interval | +| ------------ | ------------------------------- | +| 描述 | Database 默认的数据时间分区间隔 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 604800000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- heartbeat_interval_in_ms + +| 名字 | heartbeat_interval_in_ms | +| ------------ | ------------------------ | +| 描述 | 集群节点间的心跳间隔 | +| 类型 | Long | +| 单位 | ms | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- disk_space_warning_threshold + +| 名字 | disk_space_warning_threshold | +| ------------ | ---------------------------- | +| 描述 | DataNode 磁盘剩余阈值 | +| 类型 | double(percentage) | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +### 3.14 内存控制配置 + +- datanode_memory_proportion + +| 名字 | datanode_memory_proportion | +| ------------ | ---------------------------------------------------- | +| 描述 | 存储引擎、查询引擎、元数据、共识、流处理引擎和空闲内存比例 | +| 类型 | Ratio | +| 默认值 | 3:3:1:1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- schema_memory_proportion + +| 名字 | schema_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | Schema 相关的内存如何在 SchemaRegion、SchemaCache 和 PartitionCache 之间分配 | +| 类型 | Ratio | +| 默认值 | 5:4:1 | +| 改后生效方式 | 重启服务生效 | + +- storage_engine_memory_proportion + +| 名字 | storage_engine_memory_proportion | +| ------------ | -------------------------------- | +| 描述 | 写入和合并占存储内存比例 | +| 类型 | Ratio | +| 默认值 | 8:2 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_proportion + +| 名字 | write_memory_proportion | +| ------------ | -------------------------------------------- | +| 描述 | Memtable 和 TimePartitionInfo 占写入内存比例 | +| 类型 | Ratio | +| 默认值 | 19:1 | +| 改后生效方式 | 重启服务生效 | + +- primitive_array_size + +| 名字 | primitive_array_size | +| ------------ | ---------------------------------------- | +| 描述 | 数组池中的原始数组大小(每个数组的长度) | +| 类型 | int32 | +| 默认值 | 64 | +| 改后生效方式 | 重启服务生效 | + +- chunk_metadata_size_proportion + +| 名字 | chunk_metadata_size_proportion | +| ------------ | -------------------------------------------- | +| 描述 | 在数据压缩过程中,用于存储块元数据的内存比例 | +| 类型 | Double | +| 默认值 | 0.1 | +| 改后生效方式 | 重启服务生效 | + +- flush_proportion + +| 名字 | flush_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 调用flush disk的写入内存比例,默认0.4,若有极高的写入负载力(比如batch=1000),可以设置为低于默认值,比如0.2 | +| 类型 | Double | +| 默认值 | 0.4 | +| 改后生效方式 | 重启服务生效 | + +- buffered_arrays_memory_proportion + +| 名字 | buffered_arrays_memory_proportion | +| ------------ | --------------------------------------- | +| 描述 | 为缓冲数组分配的写入内存比例,默认为0.6 | +| 类型 | Double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- reject_proportion + +| 名字 | reject_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 拒绝插入的写入内存比例,默认0.8,若有极高的写入负载力(比如batch=1000)并且物理内存足够大,它可以设置为高于默认值,如0.9 | +| 类型 | Double | +| 默认值 | 0.8 | +| 改后生效方式 | 重启服务生效 | + +- device_path_cache_proportion + +| 名字 | device_path_cache_proportion | +| ------------ | --------------------------------------------------- | +| 描述 | 在内存中分配给设备路径缓存(DevicePathCache)的比例 | +| 类型 | Double | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_variation_report_proportion + +| 名字 | write_memory_variation_report_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果 DataRegion 的内存增加超过写入可用内存的一定比例,则向系统报告。默认值为0.001 | +| 类型 | Double | +| 默认值 | 0.001 | +| 改后生效方式 | 重启服务生效 | + +- check_period_when_insert_blocked + +| 名字 | check_period_when_insert_blocked | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当插入被拒绝时,等待时间(以毫秒为单位)去再次检查系统,默认为50。若插入被拒绝,读取负载低,可以设置大一些。 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- io_task_queue_size_for_flushing + +| 名字 | io_task_queue_size_for_flushing | +| ------------ | -------------------------------- | +| 描述 | ioTaskQueue 的大小。默认值为10。 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- enable_query_memory_estimation + +| 名字 | enable_query_memory_estimation | +| ------------ | ------------------------------------------------------------ | +| 描述 | 开启后会预估每次查询的内存使用量,如果超过可用内存,会拒绝本次查询 | +| 类型 | bool | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.15 元数据引擎配置 + +- schema_engine_mode + +| 名字 | schema_engine_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 元数据引擎的运行模式,支持 Memory 和 PBTree;PBTree 模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。 | +| 类型 | string | +| 默认值 | Memory | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- partition_cache_size + +| 名字 | partition_cache_size | +| ------------ | ------------------------------ | +| 描述 | 分区信息缓存的最大缓存条目数。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- sync_mlog_period_in_ms + +| 名字 | sync_mlog_period_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | mlog定期刷新到磁盘的周期,单位毫秒。如果该参数为0,则表示每次对元数据的更新操作都会被立即写到磁盘上。 | +| 类型 | Int64 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- tag_attribute_flush_interval + +| 名字 | tag_attribute_flush_interval | +| ------------ | -------------------------------------------------- | +| 描述 | 标签和属性记录的间隔数,达到此记录数量时将强制刷盘 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- tag_attribute_total_size + +| 名字 | tag_attribute_total_size | +| ------------ | ---------------------------------------- | +| 描述 | 每个时间序列标签和属性的最大持久化字节数 | +| 类型 | int32 | +| 默认值 | 700 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_measurement_num_of_internal_request + +| 名字 | max_measurement_num_of_internal_request | +| ------------ | ------------------------------------------------------------ | +| 描述 | 一次注册序列请求中若物理量过多,在系统内部执行时将被拆分为若干个轻量级的子请求,每个子请求中的物理量数目不超过此参数设置的最大值。 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- datanode_schema_cache_eviction_policy + +| 名字 | datanode_schema_cache_eviction_policy | +| ------------ | ----------------------------------------------------- | +| 描述 | 当 Schema 缓存达到其最大容量时,Schema 缓存的淘汰策略 | +| 类型 | String | +| 默认值 | FIFO | +| 改后生效方式 | 重启服务生效 | + +- cluster_timeseries_limit_threshold + +| 名字 | cluster_timeseries_limit_threshold | +| ------------ | ---------------------------------- | +| 描述 | 集群中可以创建的时间序列的最大数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- cluster_device_limit_threshold + +| 名字 | cluster_device_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大设备数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- database_limit_threshold + +| 名字 | database_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大数据库数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +### 3.16 自动推断数据类型 + +- enable_auto_create_schema + +| 名字 | enable_auto_create_schema | +| ------------ | -------------------------------------- | +| 描述 | 当写入的序列不存在时,是否自动创建序列 | +| 取值 | true or false | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- default_storage_group_level + +| 名字 | default_storage_group_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当写入的数据不存在且自动创建序列时,若需要创建相应的 database,将序列路径的哪一层当做 database。例如,如果我们接到一个新序列 root.sg0.d1.s2, 并且 level=1, 那么 root.sg0 被视为database(因为 root 是 level 0 层) | +| 取值 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- boolean_string_infer_type + +| 名字 | boolean_string_infer_type | +| ------------ | ------------------------------------------ | +| 描述 | "true" 或者 "false" 字符串被推断的数据类型 | +| 取值 | BOOLEAN 或者 TEXT | +| 默认值 | BOOLEAN | +| 改后生效方式 | 热加载 | + +- integer_string_infer_type + +| 名字 | integer_string_infer_type | +| ------------ | --------------------------------- | +| 描述 | 整型字符串推断的数据类型 | +| 取值 | INT32, INT64, FLOAT, DOUBLE, TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- floating_string_infer_type + +| 名字 | floating_string_infer_type | +| ------------ | ----------------------------- | +| 描述 | "6.7"等字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- nan_string_infer_type + +| 名字 | nan_string_infer_type | +| ------------ | ---------------------------- | +| 描述 | "NaN" 字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- default_boolean_encoding + +| 名字 | default_boolean_encoding | +| ------------ | ------------------------ | +| 描述 | BOOLEAN 类型编码格式 | +| 取值 | PLAIN, RLE | +| 默认值 | RLE | +| 改后生效方式 | 热加载 | + +- default_int32_encoding + +| 名字 | default_int32_encoding | +| ------------ | -------------------------------------- | +| 描述 | int32 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_int64_encoding + +| 名字 | default_int64_encoding | +| ------------ | -------------------------------------- | +| 描述 | int64 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_float_encoding + +| 名字 | default_float_encoding | +| ------------ | ----------------------------- | +| 描述 | float 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_double_encoding + +| 名字 | default_double_encoding | +| ------------ | ----------------------------- | +| 描述 | double 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_text_encoding + +| 名字 | default_text_encoding | +| ------------ | --------------------- | +| 描述 | text 类型编码格式 | +| 取值 | PLAIN | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +* boolean_compressor + +| 名字 | boolean_compressor | +| -------------- | ----------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,BOOLEAN 数据类型的压缩方式 (V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int32_compressor + +| 名字 | int32_compressor | +| -------------- | ------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,INT32/DATE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int64_compressor + +| 名字 | int64_compressor | +| -------------- | ------------------------------------------------------------------------------ | +| 描述 | 启用自动创建模式时,INT64/TIMESTAMP 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* float_compressor + +| 名字 | float_compressor | +| -------------- | -------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,FLOAT 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* double_compressor + +| 名字 | double_compressor | +| -------------- | --------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,DOUBLE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* text_compressor + +| 名字 | text_compressor | +| -------------- | -------------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,TEXT/BINARY/BLOB 数据类型的压缩方式(V2.0.6 版本开始支持 ) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + + + +### 3.17 查询配置 + +- read_consistency_level + +| 名字 | read_consistency_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 查询一致性等级,取值 “strong” 时从 Leader 副本查询,取值 “weak” 时随机查询一个副本。 | +| 类型 | String | +| 默认值 | strong | +| 改后生效方式 | 重启服务生效 | + +- meta_data_cache_enable + +| 名字 | meta_data_cache_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否缓存元数据(包括 BloomFilter、Chunk Metadata 和 TimeSeries Metadata。) | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- chunk_timeseriesmeta_free_memory_proportion + +| 名字 | chunk_timeseriesmeta_free_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 读取内存分配比例,BloomFilterCache、ChunkCache、TimeseriesMetadataCache、数据集查询的内存和可用内存的查询。参数形式为a : b : c : d : e,其中a、b、c、d、e为整数。 例如“1 : 1 : 1 : 1 : 1” ,“1 : 100 : 200 : 300 : 400” 。 | +| 类型 | String | +| 默认值 | 1 : 100 : 200 : 300 : 400 | +| 改后生效方式 | 重启服务生效 | + +- enable_last_cache + +| 名字 | enable_last_cache | +| ------------ | ------------------ | +| 描述 | 是否开启最新点缓存 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_core_pool_size + +| 名字 | mpp_data_exchange_core_pool_size | +| ------------ | -------------------------------- | +| 描述 | MPP 数据交换线程池核心线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_max_pool_size + +| 名字 | mpp_data_exchange_max_pool_size | +| ------------ | ------------------------------- | +| 描述 | MPP 数据交换线程池最大线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_keep_alive_time_in_ms + +| 名字 | mpp_data_exchange_keep_alive_time_in_ms | +| ------------ | --------------------------------------- | +| 描述 | MPP 数据交换最大等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- driver_task_execution_time_slice_in_ms + +| 名字 | driver_task_execution_time_slice_in_ms | +| ------------ | -------------------------------------- | +| 描述 | 单个 DriverTask 最长执行时间(ms) | +| 类型 | int32 | +| 默认值 | 200 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_size_in_bytes + +| 名字 | max_tsblock_size_in_bytes | +| ------------ | ------------------------------- | +| 描述 | 单个 TsBlock 的最大容量(byte) | +| 类型 | int32 | +| 默认值 | 131072 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_line_numbers + +| 名字 | max_tsblock_line_numbers | +| ------------ | ------------------------ | +| 描述 | 单个 TsBlock 的最大行数 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- slow_query_threshold + +| 名字 | slow_query_threshold | +| ------------ | ------------------------------ | +| 描述 | 慢查询的时间阈值。单位:毫秒。 | +| 类型 | long | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- query_cost_stat_window + +| 名字 | query_cost_stat_window | +| ------------ |--------------------| +| 描述 | 查询耗时统计的窗口,单位为分钟。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 热加载 | + +- query_timeout_threshold + +| 名字 | query_timeout_threshold | +| ------------ | -------------------------------- | +| 描述 | 查询的最大执行时间。单位:毫秒。 | +| 类型 | Int32 | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- max_allowed_concurrent_queries + +| 名字 | max_allowed_concurrent_queries | +| ------------ | ------------------------------ | +| 描述 | 允许的最大并发查询数量。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- query_thread_count + +| 名字 | query_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 对内存中的数据进行查询时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- degree_of_query_parallelism + +| 名字 | degree_of_query_parallelism | +| ------------ | ------------------------------------------------------------ | +| 描述 | 设置单个查询片段实例将创建的 pipeline 驱动程序数量,也就是查询操作的并行度。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- mode_map_size_threshold + +| 名字 | mode_map_size_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | 计算 MODE 聚合函数时,计数映射可以增长到的阈值 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- batch_size + +| 名字 | batch_size | +| ------------ | ---------------------------------------------------------- | +| 描述 | 服务器中每次迭代的数据量(数据条目,即不同时间戳的数量。) | +| 类型 | Int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- sort_buffer_size_in_bytes + +| 名字 | sort_buffer_size_in_bytes | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 设置外部排序操作中使用的内存缓冲区大小 | +| 类型 | long | +| 默认值 | 1048576(V2.0.6 之前版本)
0(V2.0.6 及之后版本),当值小于等于 0 时,由系统自动进行计算,计算公式为:`sort_buffer_size_in_bytes = Math.min(32 * 1024 * 1024, 堆内内存 * 查询引擎内存比例 * 查询执行内存比例 / 查询线程数 / 2)` | +| 改后生效方式 | 热加载 | + +- merge_threshold_of_explain_analyze + +| 名字 | merge_threshold_of_explain_analyze | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于设置在 `EXPLAIN ANALYZE` 语句的结果集中操作符(operator)数量的合并阈值。 | +| 类型 | int | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +### 3.18 TTL配置 + +- ttl_check_interval + +| 名字 | ttl_check_interval | +| ------------ | -------------------------------------- | +| 描述 | ttl 检查任务的间隔,单位 ms,默认为 2h | +| 类型 | int | +| 默认值 | 7200000 | +| 改后生效方式 | 重启服务生效 | + +- max_expired_time + +| 名字 | max_expired_time | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果一个文件中存在设备已经过期超过此时间,那么这个文件将被立即整理。单位 ms,默认为一个月 | +| 类型 | int | +| 默认值 | 2592000000 | +| 改后生效方式 | 重启服务生效 | + +- expired_data_ratio + +| 名字 | expired_data_ratio | +| ------------ | ------------------------------------------------------------ | +| 描述 | 过期设备比例。如果一个文件中过期设备的比率超过这个值,那么这个文件中的过期数据将通过 compaction 清理。 | +| 类型 | float | +| 默认值 | 0.3 | +| 改后生效方式 | 重启服务生效 | + +### 3.19 存储引擎配置 + +- timestamp_precision + +| 名字 | timestamp_precision | +| ------------ | ---------------------------- | +| 描述 | 时间戳精度,支持 ms、us、ns | +| 类型 | String | +| 默认值 | ms | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- timestamp_precision_check_enabled + +| 名字 | timestamp_precision_check_enabled | +| ------------ | --------------------------------- | +| 描述 | 用于控制是否启用时间戳精度检查 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_waiting_time_when_insert_blocked + +| 名字 | max_waiting_time_when_insert_blocked | +| ------------ | ----------------------------------------------- | +| 描述 | 当插入请求等待超过这个时间,则抛出异常,单位 ms | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- handle_system_error + +| 名字 | handle_system_error | +| ------------ | ------------------------------------ | +| 描述 | 当系统遇到不可恢复的错误时的处理方法 | +| 类型 | String | +| 默认值 | CHANGE_TO_READ_ONLY | +| 改后生效方式 | 重启服务生效 | + +- enable_timed_flush_seq_memtable + +| 名字 | enable_timed_flush_seq_memtable | +| ------------ | ------------------------------- | +| 描述 | 是否开启定时刷盘顺序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_interval_in_ms + +| 名字 | seq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_check_interval_in_ms + +| 名字 | seq_memtable_flush_check_interval_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 检查顺序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- enable_timed_flush_unseq_memtable + +| 名字 | enable_timed_flush_unseq_memtable | +| ------------ | --------------------------------- | +| 描述 | 是否开启定时刷新乱序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_interval_in_ms + +| 名字 | unseq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_check_interval_in_ms + +| 名字 | unseq_memtable_flush_check_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 检查乱序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- tvlist_sort_algorithm + +| 名字 | tvlist_sort_algorithm | +| ------------ | ------------------------ | +| 描述 | memtable中数据的排序方法 | +| 类型 | String | +| 默认值 | TIM | +| 改后生效方式 | 重启服务生效 | + +- avg_series_point_number_threshold + +| 名字 | avg_series_point_number_threshold | +| ------------ | ------------------------------------------------ | +| 描述 | 内存中平均每个时间序列点数最大值,达到触发 flush | +| 类型 | int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- flush_thread_count + +| 名字 | flush_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 将内存中的数据写入磁盘时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。默认值为 0。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- enable_partial_insert + +| 名字 | enable_partial_insert | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一次 insert 请求中,如果部分测点写入失败,是否继续写入其他测点。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- recovery_log_interval_in_ms + +| 名字 | recovery_log_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | data region的恢复过程中打印日志信息的间隔 | +| 类型 | Int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +- 0.13_data_insert_adapt + +| 名字 | 0.13_data_insert_adapt | +| ------------ | ------------------------------------------------------- | +| 描述 | 如果 0.13 版本客户端进行写入,需要将此配置项设置为 true | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_tsfile_validation + +| 名字 | enable_tsfile_validation | +| ------------ | -------------------------------------- | +| 描述 | Flush, Load 或合并后验证 tsfile 正确性 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- tier_ttl_in_ms + +| 名字 | tier_ttl_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 定义每个层级负责的数据范围,通过 TTL 表示 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +* max_object_file_size_in_byte + +| 名字 | max\_object\_file\_size\_in\_byte | +| -------------- |-----------------------------------| +| 描述 | 单对象文件的最大尺寸限制 (V2.0.8-beta 版本起支持) | +| 类型 | long | +| 默认值 | 4294967296 | +| 改后生效方式 | 热加载 | + +* restrict_object_limit + +| 名字 | restrict\_object\_limit | +|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 对 OBJECT 类型的表名、列名和设备名称没有特殊限制。(V2.0.8-beta 版本起支持)当设置为 true 且表中包含 OBJECT 列时,需遵循以下限制:
1. 命名规范:TAG 列的值、表名和字段名禁止使用 “.” 或 “..”,且不得包含 “./” 或 “.\” 字符,否则元数据创建将失败。若名称包含文件系统不支持的字符,则会在数据写入时报错。
2. 大小写敏感:如果底层文件系统不区分大小写,则设备标识符(如 'd1' 与 'D1')将被视为相同。在此情况下,若创建此类名称相似的设备,其 OBJECT 数据文件可能互相覆盖,导致数据错误。
3. 存储路径:OBJECT 类型数据的实际存储路径格式为:`${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin`。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + + +### 3.20 合并配置 + +- enable_seq_space_compaction + +| 名字 | enable_seq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 顺序空间内合并,开启顺序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_unseq_space_compaction + +| 名字 | enable_unseq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 乱序空间内合并,开启乱序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_cross_space_compaction + +| 名字 | enable_cross_space_compaction | +| ------------ | ------------------------------------------ | +| 描述 | 跨空间合并,开启将乱序文件合并到顺序文件中 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_auto_repair_compaction + +| 名字 | enable_auto_repair_compaction | +| ------------ | ----------------------------- | +| 描述 | 启用通过合并操作自动修复未排序文件的功能 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- cross_selector + +| 名字 | cross_selector | +| ------------ |----------------| +| 描述 | 跨空间合并任务的选择器 | +| 类型 | String | +| 默认值 | rewrite | +| 改后生效方式 | 重启服务生效 | + +- cross_performer + +| 名字 | cross_performer | +| ------------ |-----------------------------------| +| 描述 | 跨空间合并任务的执行器,可选项:read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- inner_seq_selector + +| 名字 | inner_seq_selector | +| ------------ |------------------------------------------------------------------------| +| 描述 | 顺序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_seq_performer + +| 名字 | inner_seq_performer | +| ------------ |--------------------------------------| +| 描述 | 顺序空间内合并任务的执行器,可选项是 read_chunk 和 fast | +| 类型 | String | +| 默认值 | read_chunk | +| 改后生效方式 | 热加载 | + +- inner_unseq_selector + +| 名字 | inner_unseq_selector | +| ------------ |-------------------------------------------------------------------------| +| 描述 | 乱序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_unseq_performer + +| 名字 | inner_unseq_performer | +| ------------ |--------------------------------------| +| 描述 | 乱序空间内合并任务的执行器,可选项是 read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- compaction_priority + +| 名字 | compaction_priority | +| ------------ |-------------------------------------------------------------------------------------------| +| 描述 | 合并时的优先级。INNER_CROSS:优先执行空间内合并,优先减少文件数量;CROSS_INNER:优先执行跨空间合并,优先清理乱序文件;BALANCE:交替执行两种合并类型。 | +| 类型 | String | +| 默认值 | INNER_CROSS | +| 改后生效方式 | 重启服务生效 | + +- candidate_compaction_task_queue_size + +| 名字 | candidate_compaction_task_queue_size | +| ------------ | ------------------------------------ | +| 描述 | 待选合并任务队列容量 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- target_compaction_file_size + +| 名字 | target_compaction_file_size | +| ------------ |-----------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 该参数作用于两个场景:1. 空间内合并的目标文件大小 2. 跨空间合并中待选序列文件的大小需小于 target_compaction_file_size * 1.5 多数情况下,跨空间合并的目标文件大小不会超过此阈值,即便超出,幅度也不会过大 。 默认值:2GB ,单位:byte | +| 类型 | Long | +| 默认值 | 2147483648 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_size_threshold + +| 名字 | inner_compaction_total_file_size_threshold | +| ------------ |--------------------------------------------| +| 描述 | 空间内合并的文件总大小阈值,单位:byte | +| 类型 | Long | +| 默认值 | 10737418240 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_num_threshold + +| 名字 | inner_compaction_total_file_num_threshold | +| ------------ | ----------------------------------------- | +| 描述 | 空间内合并的文件总数阈值 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 热加载 | + +- max_level_gap_in_inner_compaction + +| 名字 | max_level_gap_in_inner_compaction | +| ------------ | -------------------------------------- | +| 描述 | 空间内合并筛选的最大层级差 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 热加载 | + +- target_chunk_size + +| 名字 | target_chunk_size | +| ------------ |--------------------------------------------------| +| 描述 | 刷盘与合并操作的目标数据块大小, 若内存表中某条时序数据的大小超过该值,数据会被刷盘至多个数据块 | +| 类型 | Long | +| 默认值 | 1600000 | +| 改后生效方式 | 重启服务生效 | + +- target_chunk_point_num + +| 名字 | target_chunk_point_num | +| ------------ |------------------------------------------------------| +| 描述 | 刷盘与合并操作中单个数据块的目标点数, 若内存表中某条时序数据的点数超过该值,数据会被刷盘至多个数据块中 | +| 类型 | Long | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- chunk_size_lower_bound_in_compaction + +| 名字 | chunk_size_lower_bound_in_compaction | +| ------------ |--------------------------------------| +| 描述 | 若数据块大小低于此阈值,则会被反序列化为数据点,默认值为128字节 | +| 类型 | Long | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- chunk_point_num_lower_bound_in_compaction + +| 名字 | chunk_point_num_lower_bound_in_compaction | +| ------------ |------------------------------------------| +| 描述 | 若数据块内的数据点数低于此阈值,则会被反序列化为数据点 | +| 类型 | Long | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- inner_compaction_candidate_file_num + +| 名字 | inner_compaction_candidate_file_num | +| ------------ | ---------------------------------------- | +| 描述 | 空间内合并待选文件筛选的文件数量要求 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_num + +| 名字 | max_cross_compaction_candidate_file_num | +| ------------ | --------------------------------------- | +| 描述 | 跨空间合并待选文件筛选的文件数量上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_size + +| 名字 | max_cross_compaction_candidate_file_size | +| ------------ |------------------------------------------| +| 描述 | 跨空间合并待选文件筛选的总大小上限 | +| 类型 | Long | +| 默认值 | 5368709120 | +| 改后生效方式 | 热加载 | + +- min_cross_compaction_unseq_file_level + +| 名字 | min_cross_compaction_unseq_file_level | +| ------------ |---------------------------------------| +| 描述 | 可被选为待选文件的乱序文件的最小空间内合并层级 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- compaction_thread_count + +| 名字 | compaction_thread_count | +| ------------ | ----------------------- | +| 描述 | 执行合并任务的线程数目 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_max_aligned_series_num_in_one_batch + +| 名字 | compaction_max_aligned_series_num_in_one_batch | +| ------------ | ---------------------------------------------- | +| 描述 | 对齐序列合并一次执行时处理的值列数量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_interval_in_ms + +| 名字 | compaction_schedule_interval_in_ms | +| ------------ |------------------------------------| +| 描述 | 合并调度的时间间隔,单位 ms | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- compaction_write_throughput_mb_per_sec + +| 名字 | compaction_write_throughput_mb_per_sec | +| ------------ |----------------------------------------| +| 描述 | 合并操作每秒可达到的写入吞吐量上限, 小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- compaction_read_throughput_mb_per_sec + +| 名字 | compaction_read_throughput_mb_per_sec | +| --------- | ---------------------------------------------------- | +| 描述 | 合并每秒读吞吐限制,单位为 megabyte,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- compaction_read_operation_per_sec + +| 名字 | compaction_read_operation_per_sec | +| --------- | ------------------------------------------- | +| 描述 | 合并每秒读操作数量限制,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- sub_compaction_thread_count + +| 名字 | sub_compaction_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 每个合并任务的子任务线程数,只对跨空间合并和乱序空间内合并生效 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_disk_redundancy + +| 名字 | inner_compaction_task_selection_disk_redundancy | +| ------------ | ----------------------------------------------- | +| 描述 | 定义了磁盘可用空间的冗余值,仅用于内部压缩 | +| 类型 | double | +| 默认值 | 0.05 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_mods_file_threshold + +| 名字 | inner_compaction_task_selection_mods_file_threshold | +| ------------ | --------------------------------------------------- | +| 描述 | 定义了mods文件大小的阈值,仅用于内部压缩。 | +| 类型 | long | +| 默认值 | 131072 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_thread_num + +| 名字 | compaction_schedule_thread_num | +| ------------ | ------------------------------ | +| 描述 | 选择合并任务的线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +### 3.21 写前日志配置 + +- wal_mode + +| 名字 | wal_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 写前日志的写入模式. DISABLE 模式下会关闭写前日志;SYNC 模式下写入请求会在成功写入磁盘后返回; ASYNC 模式下写入请求返回时可能尚未成功写入磁盘后。 | +| 类型 | String | +| 默认值 | ASYNC | +| 改后生效方式 | 重启服务生效 | + +- max_wal_nodes_num + +| 名字 | max_wal_nodes_num | +| ------------ | ----------------------------------------------------- | +| 描述 | 写前日志节点的最大数量,默认值 0 表示数量由系统控制。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- wal_async_mode_fsync_delay_in_ms + +| 名字 | wal_async_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------- | +| 描述 | async 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 热加载 | + +- wal_sync_mode_fsync_delay_in_ms + +| 名字 | wal_sync_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------ | +| 描述 | sync 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 3 | +| 改后生效方式 | 热加载 | + +- wal_buffer_size_in_byte + +| 名字 | wal_buffer_size_in_byte | +| ------------ | ----------------------- | +| 描述 | 写前日志的 buffer 大小 | +| 类型 | int32 | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +- wal_buffer_queue_capacity + +| 名字 | wal_buffer_queue_capacity | +| ------------ | ------------------------- | +| 描述 | 写前日志阻塞队列大小上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 重启服务生效 | + +- wal_file_size_threshold_in_byte + +| 名字 | wal_file_size_threshold_in_byte | +| ------------ | ------------------------------- | +| 描述 | 写前日志文件封口阈值 | +| 类型 | int32 | +| 默认值 | 31457280 | +| 改后生效方式 | 热加载 | + +- wal_min_effective_info_ratio + +| 名字 | wal_min_effective_info_ratio | +| ------------ | ---------------------------- | +| 描述 | 写前日志最小有效信息比 | +| 类型 | double | +| 默认值 | 0.1 | +| 改后生效方式 | 热加载 | + +- wal_memtable_snapshot_threshold_in_byte + +| 名字 | wal_memtable_snapshot_threshold_in_byte | +| ------------ | ---------------------------------------- | +| 描述 | 触发写前日志中内存表快照的内存表大小阈值 | +| 类型 | int64 | +| 默认值 | 8388608 | +| 改后生效方式 | 热加载 | + +- max_wal_memtable_snapshot_num + +| 名字 | max_wal_memtable_snapshot_num | +| ------------ | ------------------------------ | +| 描述 | 写前日志中内存表的最大数量上限 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- delete_wal_files_period_in_ms + +| 名字 | delete_wal_files_period_in_ms | +| ------------ | ----------------------------- | +| 描述 | 删除写前日志的检查间隔 | +| 类型 | int64 | +| 默认值 | 20000 | +| 改后生效方式 | 热加载 | + +- wal_throttle_threshold_in_byte + +| 名字 | wal_throttle_threshold_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在IoTConsensus中,当WAL文件的大小达到一定阈值时,会开始对写入操作进行节流,以控制写入速度。 | +| 类型 | long | +| 默认值 | 53687091200 | +| 改后生效方式 | 热加载 | + +- iot_consensus_cache_window_time_in_ms + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 在IoTConsensus中,写缓存的最大等待时间。 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- enable_wal_compression + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ------------------------------------- | +| 描述 | 用于控制是否启用WAL的压缩。 | +| 类型 | boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.22 IoT 共识协议配置 + +当Region配置了IoTConsensus共识协议之后,下述的配置项才会生效 + +- data_region_iot_max_log_entries_num_per_batch + +| 名字 | data_region_iot_max_log_entries_num_per_batch | +| ------------ | --------------------------------------------- | +| 描述 | IoTConsensus batch 的最大日志条数 | +| 类型 | int32 | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_size_per_batch + +| 名字 | data_region_iot_max_size_per_batch | +| ------------ | ---------------------------------- | +| 描述 | IoTConsensus batch 的最大大小 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_pending_batches_num + +| 名字 | data_region_iot_max_pending_batches_num | +| ------------ | --------------------------------------- | +| 描述 | IoTConsensus batch 的流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_memory_ratio_for_queue + +| 名字 | data_region_iot_max_memory_ratio_for_queue | +| ------------ | ------------------------------------------ | +| 描述 | IoTConsensus 队列内存分配比例 | +| 类型 | double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- region_migration_speed_limit_bytes_per_second + +| 名字 | region_migration_speed_limit_bytes_per_second | +| ------------ | --------------------------------------------- | +| 描述 | 定义了在region迁移过程中,数据传输的最大速率 | +| 类型 | long | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +### 3.23 TsFile配置 + +- group_size_in_byte + +| 名字 | group_size_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | 每次将内存中的数据写入到磁盘时的最大写入字节数 | +| 类型 | int32 | +| 默认值 | 134217728 | +| 改后生效方式 | 热加载 | + +- page_size_in_byte + +| 名字 | page_size_in_byte | +| ------------ | ---------------------------------------------------- | +| 描述 | 内存中每个列写出时,写成的单页最大的大小,单位为字节 | +| 类型 | int32 | +| 默认值 | 65536 | +| 改后生效方式 | 热加载 | + +- max_number_of_points_in_page + +| 名字 | max_number_of_points_in_page | +| ------------ | ------------------------------------------------- | +| 描述 | 一个页中最多包含的数据点(时间戳-值的二元组)数量 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- pattern_matching_threshold + +| 名字 | pattern_matching_threshold | +| ------------ | ------------------------------ | +| 描述 | 正则表达式匹配时最大的匹配次数 | +| 类型 | int32 | +| 默认值 | 1000000 | +| 改后生效方式 | 热加载 | + +- float_precision + +| 名字 | float_precision | +| ------------ | ------------------------------------------------------------ | +| 描述 | 浮点数精度,为小数点后数字的位数 | +| 类型 | int32 | +| 默认值 | 默认为 2 位。注意:32 位浮点数的十进制精度为 7 位,64 位浮点数的十进制精度为 15 位。如果设置超过机器精度将没有实际意义。 | +| 改后生效方式 | 热加载 | + +- value_encoder + +| 名字 | value_encoder | +| ------------ | ------------------------------------- | +| 描述 | value 列编码方式 | +| 类型 | 枚举 String: “TS_2DIFF”,“PLAIN”,“RLE” | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +- compressor + +| 名字 | compressor | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据压缩方法; 对齐序列中时间列的压缩方法 | +| 类型 | 枚举 String : "UNCOMPRESSED", "SNAPPY", "LZ4", "ZSTD", "LZMA2" | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +- encrypt_flag + +| 名字 | encrypt_flag | +| ------------ | ---------------------------- | +| 描述 | 用于开启或关闭数据加密功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- encrypt_type + +| 名字 | encrypt_type | +| ------------ | ------------------------------------- | +| 描述 | 数据加密的方法。 | +| 类型 | String | +| 默认值 | org.apache.tsfile.encrypt.UNENCRYPTED | +| 改后生效方式 | 重启服务生效 | + +- encrypt_key_path + +| 名字 | encrypt_key_path | +| ------------ | ---------------------------- | +| 描述 | 数据加密使用的密钥来源路径。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.24 授权配置 + +- authorizer_provider_class + +| 名字 | authorizer_provider_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 权限服务的类名 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| 改后生效方式 | 重启服务生效 | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| 名字 | openID_url | +| ------------ | ---------------------------------------------------------- | +| 描述 | openID 服务器地址 (当 OpenIdAuthorizer 被启用时必须设定) | +| 类型 | String(一个 http 地址) | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- iotdb_server_encrypt_decrypt_provider + +| 名字 | iotdb_server_encrypt_decrypt_provider | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于用户密码加密的类 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| 名字 | iotdb_server_encrypt_decrypt_provider_parameter | +| ------------ | ----------------------------------------------- | +| 描述 | 用于初始化用户密码加密类的参数 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- author_cache_size + +| 名字 | author_cache_size | +| ------------ | ------------------------ | +| 描述 | 用户缓存与角色缓存的大小 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- author_cache_expire_time + +| 名字 | author_cache_expire_time | +| ------------ | -------------------------------------- | +| 描述 | 用户缓存与角色缓存的有效期,单位为分钟 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 重启服务生效 | + +### 3.25 UDF配置 + +- udf_initial_byte_array_length_for_memory_control + +| 名字 | udf_initial_byte_array_length_for_memory_control | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于评估UDF查询中文本字段的内存使用情况。建议将此值设置为略大于所有文本的平均长度记录。 | +| 类型 | int32 | +| 默认值 | 48 | +| 改后生效方式 | 重启服务生效 | + +- udf_memory_budget_in_mb + +| 名字 | udf_memory_budget_in_mb | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一个UDF查询中使用多少内存(以 MB 为单位)。上限为已分配内存的 20% 用于读取。 | +| 类型 | Float | +| 默认值 | 30.0 | +| 改后生效方式 | 重启服务生效 | + +- udf_reader_transformer_collector_memory_proportion + +| 名字 | udf_reader_transformer_collector_memory_proportion | +| ------------ | --------------------------------------------------------- | +| 描述 | UDF内存分配比例。参数形式为a : b : c,其中a、b、c为整数。 | +| 类型 | String | +| 默认值 | 1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- udf_lib_dir + +| 名字 | udf_lib_dir | +| ------------ | ---------------------------- | +| 描述 | UDF 日志及jar文件存储路径 | +| 类型 | String | +| 默认值 | ext/udf(Windows:ext\\udf) | +| 改后生效方式 | 重启服务生效 | + +### 3.26 触发器配置 + +- trigger_lib_dir + +| 名字 | trigger_lib_dir | +| ------------ | ----------------------- | +| 描述 | 触发器 JAR 包存放的目录 | +| 类型 | String | +| 默认值 | ext/trigger | +| 改后生效方式 | 重启服务生效 | + +- stateful_trigger_retry_num_when_not_found + +| 名字 | stateful_trigger_retry_num_when_not_found | +| ------------ | ---------------------------------------------- | +| 描述 | 有状态触发器触发无法找到触发器实例时的重试次数 | +| 类型 | Int32 | +| 默认值 | 3 | +| 改后生效方式 | 重启服务生效 | + +### 3.27 SELECT-INTO配置 + +- into_operation_buffer_size_in_byte + +| 名字 | into_operation_buffer_size_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,待写入数据占用的最大内存(单位:Byte) | +| 类型 | long | +| 默认值 | 104857600 | +| 改后生效方式 | 热加载 | + +- select_into_insert_tablet_plan_row_limit + +| 名字 | select_into_insert_tablet_plan_row_limit | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,一个 insert-tablet-plan 中可以处理的最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- into_operation_execution_thread_count + +| 名字 | into_operation_execution_thread_count | +| ------------ | ------------------------------------------ | +| 描述 | SELECT INTO 中执行写入任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +### 3.28 连续查询配置 +- continuous_query_submit_thread_count + +| 名字 | continuous_query_execution_thread | +| ------------ | --------------------------------- | +| 描述 | 执行连续查询任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- continuous_query_min_every_interval_in_ms + +| 名字 | continuous_query_min_every_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 连续查询执行时间间隔的最小值 | +| 类型 | long (duration) | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +### 3.29 PIPE配置 + +- pipe_lib_dir + +| 名字 | pipe_lib_dir | +| ------------ | -------------------------- | +| 描述 | 自定义 Pipe 插件的存放目录 | +| 类型 | string | +| 默认值 | ext/pipe | +| 改后生效方式 | 暂不支持修改 | + +- pipe_subtask_executor_max_thread_num + +| 名字 | pipe_subtask_executor_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | pipe 子任务 processor、sink 中各自可以使用的最大线程数。实际值将是 min(pipe_subtask_executor_max_thread_num, max(1, CPU核心数 / 2))。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_timeout_ms + +| 名字 | pipe_sink_timeout_ms | +| ------------ | --------------------------------------------- | +| 描述 | thrift 客户端的连接超时时间(以毫秒为单位)。 | +| 类型 | int | +| 默认值 | 900000 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_selector_number + +| 名字 | pipe_sink_selector_number | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大执行结果处理线程数量。 建议将此值设置为小于或等于 pipe_sink_max_client_number。 | +| 类型 | int | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_max_client_number + +| 名字 | pipe_sink_max_client_number | +| ------------ | ----------------------------------------------------------- | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大客户端数量。 | +| 类型 | int | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_enabled + +| 名字 | pipe_air_gap_receiver_enabled | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否启用通过网闸接收 pipe 数据。接收器只能在 tcp 模式下返回 0 或 1,以指示数据是否成功接收。 \| | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_port + +| 名字 | pipe_air_gap_receiver_port | +| ------------ | ------------------------------------ | +| 描述 | 服务器通过网闸接收 pipe 数据的端口。 | +| 类型 | int | +| 默认值 | 9780 | +| 改后生效方式 | 重启服务生效 | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| 名字 | pipe_all_sinks_rate_limit_bytes_per_second | +| ------------ | ------------------------------------------------------------ | +| 描述 | 所有 pipe sink 每秒可以传输的总字节数。当给定的值小于或等于 0 时,表示没有限制。默认值是 -1,表示没有限制。 | +| 类型 | double | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +### 3.30 Ratis共识协议配置 + +当Region配置了RatisConsensus共识协议之后,下述的配置项才会生效 + +- config_node_ratis_log_appender_buffer_size_max + +| 名字 | config_node_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_appender_buffer_size_max + +| 名字 | schema_region_ratis_log_appender_buffer_size_max | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_appender_buffer_size_max + +| 名字 | data_region_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_snapshot_trigger_threshold + +| 名字 | config_node_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | confignode 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_snapshot_trigger_threshold + +| 名字 | schema_region_ratis_snapshot_trigger_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | schema region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_snapshot_trigger_threshold + +| 名字 | data_region_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | data region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_unsafe_flush_enable + +| 名字 | config_node_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | confignode 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_unsafe_flush_enable + +| 名字 | schema_region_ratis_log_unsafe_flush_enable | +| ------------ | ------------------------------------------- | +| 描述 | schema region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_unsafe_flush_enable + +| 名字 | data_region_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | data region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_segment_size_max_in_byte + +| 名字 | config_node_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_segment_size_max_in_byte + +| 名字 | schema_region_ratis_log_segment_size_max_in_byte | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | Confignode 简单共识协议一个Log日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_flow_control_window + +| 名字 | config_node_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | confignode grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_flow_control_window + +| 名字 | schema_region_ratis_grpc_flow_control_window | +| ------------ | -------------------------------------------- | +| 描述 | schema region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_flow_control_window + +| 名字 | data_region_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | data region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| 名字 | config_node_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | config node grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ------------------------------------------------------- | +| 描述 | schema region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | data_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | data region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_force_sync_num + +| 名字 | config_node_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | config node fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_force_sync_num + +| 名字 | schema_region_ratis_log_force_sync_num | +| ------------ | -------------------------------------- | +| 描述 | schema region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_force_sync_num + +| 名字 | data_region_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | data region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_request_timeout_ms + +| 名字 | config_node_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_request_timeout_ms + +| 名字 | schema_region_ratis_request_timeout_ms | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_request_timeout_ms + +| 名字 | data_region_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | data region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_retry_attempts + +| 名字 | config_node_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_initial_sleep_time_ms + +| 名字 | config_node_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | confignode Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_sleep_time_ms + +| 名字 | config_node_ratis_max_sleep_time_ms | +| ------------ | ------------------------------------- | +| 描述 | confignode Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_retry_attempts + +| 名字 | schema_region_ratis_max_retry_attempts | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_initial_sleep_time_ms + +| 名字 | schema_region_ratis_initial_sleep_time_ms | +| ------------ | ----------------------------------------- | +| 描述 | schema region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_sleep_time_ms + +| 名字 | schema_region_ratis_max_sleep_time_ms | +| ------------ | ---------------------------------------- | +| 描述 | schema region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_retry_attempts + +| 名字 | data_region_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | data region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_initial_sleep_time_ms + +| 名字 | data_region_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | data region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_sleep_time_ms + +| 名字 | data_region_ratis_max_sleep_time_ms | +| ------------ | -------------------------------------- | +| 描述 | data region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_min_ms + +| 名字 | ratis_first_election_timeout_min_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最小超时时间 | +| 类型 | int64 | +| 默认值 | 50 (ms) | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_max_ms + +| 名字 | ratis_first_election_timeout_max_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最大超时时间 | +| 类型 | int64 | +| 默认值 | 150 (ms) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_preserve_logs_num_when_purge + +| 名字 | config_node_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | confignode snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_preserve_logs_num_when_purge + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_preserve_logs_num_when_purge + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_max_size + +| 名字 | config_node_ratis_log_max_size | +| ------------ | ----------------------------------- | +| 描述 | config node磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_max_size + +| 名字 | schema_region_ratis_log_max_size | +| ------------ | -------------------------------------- | +| 描述 | schema region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_max_size + +| 名字 | data_region_ratis_log_max_size | +| ------------ | ------------------------------------ | +| 描述 | data region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 21474836480 (20GB) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_periodic_snapshot_interval + +| 名字 | config_node_ratis_periodic_snapshot_interval | +| ------------ | -------------------------------------------- | +| 描述 | config node定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_periodic_snapshot_interval + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_periodic_snapshot_interval + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +### 3.31 IoTConsensusV2配置 + +- iot_consensus_v2_pipeline_size + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中连接器(connector)和接收器(receiver)的默认事件缓冲区大小。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_mode + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ----------------------------------- | +| 描述 | IoTConsensus V2使用的共识协议模式。 | +| 类型 | String | +| 默认值 | batch | +| 改后生效方式 | 重启服务生效 | + +### 3.32 Procedure 配置 + +- procedure_core_worker_thread_count + +| 名字 | procedure_core_worker_thread_count | +| ------------ | ---------------------------------- | +| 描述 | 工作线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_clean_interval + +| 名字 | procedure_completed_clean_interval | +| ------------ | ---------------------------------- | +| 描述 | 清理已完成的 procedure 时间间隔 | +| 类型 | int32 | +| 默认值 | 30(s) | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_evict_ttl + +| 名字 | procedure_completed_evict_ttl | +| ------------ | --------------------------------- | +| 描述 | 已完成的 procedure 的数据保留时间 | +| 类型 | int32 | +| 默认值 | 60(s) | +| 改后生效方式 | 重启服务生效 | + +### 3.33 MQTT代理配置 + +- enable_mqtt_service + +| 名字 | enable_mqtt_service。 | +| ------------ | --------------------- | +| 描述 | 是否开启MQTT服务 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- mqtt_host + +| 名字 | mqtt_host | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的host。 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 热加载 | + +- mqtt_port + +| 名字 | mqtt_port | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的port。 | +| 类型 | int32 | +| 默认值 | 1883 | +| 改后生效方式 | 热加载 | + +- mqtt_handler_pool_size + +| 名字 | mqtt_handler_pool_size | +| ------------ | ---------------------------------- | +| 描述 | 用于处理MQTT消息的处理程序池大小。 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- mqtt_payload_formatter + +| 名字 | mqtt_payload_formatter | +| ------------ | ---------------------------- | +| 描述 | MQTT消息有效负载格式化程序。 | +| 类型 | String | +| 默认值 | json | +| 改后生效方式 | 热加载 | + +- mqtt_max_message_size + +| 名字 | mqtt_max_message_size | +| ------------ | ------------------------------------ | +| 描述 | MQTT消息的最大长度(以字节为单位)。 | +| 类型 | int32 | +| 默认值 | 1048576 | +| 改后生效方式 | 热加载 | + +### 3.34 审计日志配置 + +- enable_audit_log + +| 名字 | enable_audit_log | +| ------------ | ------------------------------ | +| 描述 | 用于控制是否启用审计日志功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- audit_log_storage + +| 名字 | audit_log_storage | +| ------------ | -------------------------- | +| 描述 | 定义了审计日志的输出位置。 | +| 类型 | String | +| 默认值 | IOTDB,LOGGER | +| 改后生效方式 | 重启服务生效 | + +- audit_log_operation + +| 名字 | audit_log_operation | +| ------------ | -------------------------------------- | +| 描述 | 定义了哪些类型的操作需要记录审计日志。 | +| 类型 | String | +| 默认值 | DML,DDL,QUERY | +| 改后生效方式 | 重启服务生效 | + +- enable_audit_log_for_native_insert_api + +| 名字 | enable_audit_log_for_native_insert_api | +| ------------ | -------------------------------------- | +| 描述 | 用于控制本地写入API是否记录审计日志。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.35 白名单配置 +- enable_white_list + +| 名字 | enable_white_list | +| ------------ | ----------------- | +| 描述 | 是否启用白名单。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +### 3.36 IoTDB-AI 配置 + +- model_inference_execution_thread_count + +| 名字 | model_inference_execution_thread_count | +| ------------ | -------------------------------------- | +| 描述 | 用于模型推理操作的线程数。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +### 3.37 TsFile 主动监听&加载功能配置 + +- load_clean_up_task_execution_delay_time_seconds + +| 名字 | load_clean_up_task_execution_delay_time_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在加载TsFile失败后,系统将等待多长时间才会执行清理任务来清除这些未成功加载的TsFile。 | +| 类型 | int | +| 默认值 | 1800 | +| 改后生效方式 | 热加载 | + +- load_write_throughput_bytes_per_second + +| 名字 | load_write_throughput_bytes_per_second | +| ------------ | -------------------------------------- | +| 描述 | 加载TsFile时磁盘写入的最大字节数每秒。 | +| 类型 | int | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- load_active_listening_enable + +| 名字 | load_active_listening_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否开启 DataNode 主动监听并且加载 tsfile 的功能(默认开启)。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- load_active_listening_dirs + +| 名字 | load_active_listening_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | 需要监听的目录(自动包括目录中的子目录),如有多个使用 “,“ 隔开默认的目录为 ext/load/pending(支持热装载)。 | +| 类型 | String | +| 默认值 | ext/load/pending | +| 改后生效方式 | 热加载 | + +- load_active_listening_fail_dir + +| 名字 | load_active_listening_fail_dir | +| ------------ | ---------------------------------------------------------- | +| 描述 | 执行加载 tsfile 文件失败后将文件转存的目录,只能配置一个。 | +| 类型 | String | +| 默认值 | ext/load/failed | +| 改后生效方式 | 热加载 | + +- load_active_listening_max_thread_num + +| 名字 | load_active_listening_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | 同时执行加载 tsfile 任务的最大线程数,参数被注释掉时的默值为 max(1, CPU 核心数 / 2),当用户设置的值不在这个区间[1, CPU核心数 /2]内时,会设置为默认值 (1, CPU 核心数 / 2)。 | +| 类型 | Long | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- load_active_listening_check_interval_seconds + +| 名字 | load_active_listening_check_interval_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 主动监听轮询间隔,单位秒。主动监听 tsfile 的功能是通过轮询检查文件夹实现的。该配置指定了两次检查 load_active_listening_dirs 的时间间隔,每次检查完成 load_active_listening_check_interval_seconds 秒后,会执行下一次检查。当用户设置的轮询间隔小于 1 时,会被设置为默认值 5 秒。 | +| 类型 | Long | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + + +* last_cache_operation_on_load + +|名字| last_cache_operation_on_load | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 当成功加载一个 TsFile 时,对 LastCache 执行的操作。`UPDATE`:使用 TsFile 中的数据更新 LastCache;`UPDATE_NO_BLOB`:与 UPDATE 类似,但会使 blob 序列的 LastCache 失效;`CLEAN_DEVICE`:使 TsFile 中包含的设备的 LastCache 失效;`CLEAN_ALL`:清空整个 LastCache。 | +|类型| String | +|默认值| UPDATE_NO_BLOB | +|改后生效方式| 重启后生效 | + +* cache_last_values_for_load + +|名字| cache_last_values_for_load | +|:---:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 在加载 TsFile 之前是否缓存最新值(last values)。仅在 `last_cache_operation_on_load=UPDATE_NO_BLOB` 或 `last_cache_operation_on_load=UPDATE` 时生效。当设置为 true 时,即使 `last_cache_operation_on_load=UPDATE`,也会忽略 blob 序列。启用此选项会在加载 TsFile 期间增加内存占用。 | +|类型| Boolean | +|默认值| true | +|改后生效方式| 重启后生效 | + +* cache_last_values_memory_budget_in_byte + +|名字| cache_last_values_memory_budget_in_byte | +|:---:|:----------------------------------------------------------------------------------------------------| +|描述| 当 `cache_last_values_for_load=true` 时,用于缓存最新值的最大内存大小(以字节为单位)。如果超过该值,缓存的值将被丢弃,并以流式方式直接从 TsFile 中读取最新值。 | +|类型| int32 | +|默认值| 4194304 | +|改后生效方式| 重启后生效 | + + +### 3.38 分发重试配置 + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在遇到未知错误时,写请求远程分发的最大重试时间,单位是毫秒。 | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 热加载 | + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | -------------------------------- | +| 描述 | 用于控制是否对未知错误进行重试。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual_timecho.md b/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual_timecho.md new file mode 100644 index 000000000..b6ce9a099 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Reference/System-Config-Manual_timecho.md @@ -0,0 +1,3364 @@ + + +# 配置参数 + +IoTDB 配置文件位于 IoTDB 安装目录:`conf`文件夹下。 + +- `confignode-env.sh/bat`:环境配置项的配置文件,可以配置 ConfigNode 的内存大小。 +- `datanode-env.sh/bat`:环境配置项的配置文件,可以配置 DataNode 的内存大小。 +- `iotdb-system.properties`:IoTDB 的配置文件。 +- `iotdb-system.properties.template`:IoTDB 的配置文件模版。 + +## 1. 修改配置: + +在 `iotdb-system.properties` 文件中已存在的参数可以直接进行修改。对于那些在 `iotdb-system.properties` 中未列出的参数,可以从 `iotdb-system.properties.template` 配置文件模板中找到相应的参数,然后将其复制到 `iotdb-system.properties` 文件中进行修改。 + +### 1.1 改后生效方式 + +不同的配置参数有不同的生效方式,分为以下三种: + +- 仅允许在第一次启动服务前修改: 在第一次启动 ConfigNode/DataNode 后即禁止修改,修改会导致 ConfigNode/DataNode 无法启动。 +- 重启服务生效: ConfigNode/DataNode 启动后仍可修改,但需要重启 ConfigNode/DataNode 后才生效。 +- 热加载: 可在 ConfigNode/DataNode 运行时修改,修改后通过 Session 或 Cli 发送 `load configuration` 或 `set configuration key1 = 'value1'` 命令(SQL)至 IoTDB 使配置生效。 + +## 2. 环境配置项 + +### 2.1 confignode-env.sh/bat + +环境配置项主要用于对 ConfigNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。ConfigNode 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的十分之三,最多会被设置为 16G。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +### 2.2 datanode-env.sh/bat + +环境配置项主要用于对 DataNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。DataNode/Standalone 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ---------------------------------------------------- | +| 描述 | IoTDB DataNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的二分之一。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ---------------------------------------------------------- | +| 描述 | IoTDB DataNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB DataNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置 | +| 改后生效方式 | 重启服务生效 | + + +## 3. 系统配置项(iotdb-system.properties.template) + +### 3.1 集群管理 + +- cluster_name + +| 名字 | cluster_name | +| -------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 集群名称 | +| 类型 | String | +| 默认值 | default_cluster | +| 修改方式 | CLI 中执行语句 `set configuration cluster_name = 'xxx'` (xxx为希望修改成的集群名称) | +| 注意 | 此修改通过网络分发至每个节点。在网络波动或者有节点宕机的情况下,不保证能够在全部节点修改成功。未修改成功的节点重启时无法加入集群,此时需要手动修改该节点的配置文件中的cluster_name项,再重启。正常情况下,不建议通过手动修改配置文件的方式修改集群名称,不建议通过`load configuration`的方式热加载。 | + +### 3.2 SeedConfigNode 配置 + +- cn_seed_config_node + +| 名字 | cn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | 目标 ConfigNode 地址,ConfigNode 通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 cn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_seed_config_node + +| 名字 | dn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 地址,DataNode 启动时通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 dn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.3 Node RPC 配置 + +- cn_internal_address + +| 名字 | cn_internal_address | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群内部地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_internal_port + +| 名字 | cn_internal_port | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_consensus_port + +| 名字 | cn_consensus_port | +| ------------ | ----------------------------- | +| 描述 | ConfigNode 的共识协议通信端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10720 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_rpc_address + +| 名字 | dn_rpc_address | +| ------------ |----------------| +| 描述 | 客户端 RPC 服务监听地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_port + +| 名字 | dn_rpc_port | +| ------------ | ----------------------- | +| 描述 | Client RPC 服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 6667 | +| 改后生效方式 | 重启服务生效 | + +- dn_internal_address + +| 名字 | dn_internal_address | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信地址 | +| 类型 | string | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_internal_port + +| 名字 | dn_internal_port | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信端口 | +| 类型 | int | +| 默认值 | 10730 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_mpp_data_exchange_port + +| 名字 | dn_mpp_data_exchange_port | +| ------------ | ---------------------------- | +| 描述 | MPP 数据交换端口 | +| 类型 | int | +| 默认值 | 10740 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_schema_region_consensus_port + +| 名字 | dn_schema_region_consensus_port | +| ------------ | ------------------------------------- | +| 描述 | DataNode 元数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10750 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_data_region_consensus_port + +| 名字 | dn_data_region_consensus_port | +| ------------ | ----------------------------------- | +| 描述 | DataNode 数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10760 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_join_cluster_retry_interval_ms + +| 名字 | dn_join_cluster_retry_interval_ms | +| ------------ | --------------------------------- | +| 描述 | DataNode 再次重试加入集群等待时间 | +| 类型 | long | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.4 副本配置 + +- config_node_consensus_protocol_class + +| 名字 | config_node_consensus_protocol_class | +| ------------ | ------------------------------------------------ | +| 描述 | ConfigNode 副本的共识协议,仅支持 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_replication_factor + +| 名字 | schema_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认元数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- schema_region_consensus_protocol_class + +| 名字 | schema_region_consensus_protocol_class | +| ------------ | ----------------------------------------------------- | +| 描述 | 元数据副本的共识协议,多副本时只能使用 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- data_replication_factor + +| 名字 | data_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- data_region_consensus_protocol_class + +| 名字 | data_region_consensus_protocol_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据副本的共识协议,多副本时可以使用 IoTConsensus 或 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.iot.IoTConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.5 目录配置 + +- cn_system_dir + +| 名字 | cn_system_dir | +| ------------ | ----------------------------------------------------------- | +| 描述 | ConfigNode 系统数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/system(Windows:data\\configndoe\\system) | +| 改后生效方式 | 重启服务生效 | + +- cn_consensus_dir + +| 名字 | cn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 共识协议数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- cn_pipe_receiver_file_dir + +| 名字 | cn_pipe_receiver_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- dn_system_dir + +| 名字 | dn_system_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 元数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/system(Windows:data\\datanode\\system) | +| 改后生效方式 | 重启服务生效 | + +- dn_data_dirs + +| 名字 | dn_data_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/data(Windows:data\\datanode\\data) | +| 改后生效方式 | 重启服务生效 | + +- dn_multi_dir_strategy + +| 名字 | dn_multi_dir_strategy | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 在 data_dirs 中为 TsFile 选择目录时采用的策略。可使用简单类名或类名全称。系统提供以下三种策略:
1. SequenceStrategy:IoTDB 按顺序选择目录,依次遍历 data_dirs 中的所有目录,并不断轮循;
2. MaxDiskUsableSpaceFirstStrategy:IoTDB 优先选择 data_dirs 中对应磁盘空余空间最大的目录;
您可以通过以下方法完成用户自定义策略:
1. 继承 org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy 类并实现自身的 Strategy 方法;
2. 将实现的类的完整类名(包名加类名,UserDefineStrategyPackage)填写到该配置项;
3. 将该类 jar 包添加到工程中。 | +| 类型 | String | +| 默认值 | SequenceStrategy | +| 改后生效方式 | 热加载 | + +- dn_consensus_dir + +| 名字 | dn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 共识层日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- dn_wal_dirs + +| 名字 | dn_wal_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 写前日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/wal(Windows:data\\datanode\\wal) | +| 改后生效方式 | 重启服务生效 | + +- dn_tracing_dir + +| 名字 | dn_tracing_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 追踪根目录路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | datanode/tracing(Windows:datanode\\tracing) | +| 改后生效方式 | 重启服务生效 | + +- dn_sync_dir + +| 名字 | dn_sync_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB sync 存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/sync(Windows:data\\datanode\\sync) | +| 改后生效方式 | 重启服务生效 | + +- sort_tmp_dir + +| 名字 | sort_tmp_dir | +| ------------ | ------------------------------------------------- | +| 描述 | 用于配置排序操作的临时目录。 | +| 类型 | String | +| 默认值 | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| 改后生效方式 | 重启服务生效 | + +- dn_pipe_receiver_file_dirs + +| 名字 | dn_pipe_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_receiver_file_dirs + +| 名字 | iot_consensus_v2_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_deletion_file_dir + +| 名字 | iot_consensus_v2_deletion_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中删除操作用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| 改后生效方式 | 重启服务生效 | + +### 3.6 监控配置 + +- cn_metric_reporter_list + +| 名字 | cn_metric_reporter_list | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_level + +| 名字 | cn_metric_level | +| ------------ | ------------------------------------------ | +| 描述 | confignode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_async_collect_period + +| 名字 | cn_metric_async_collect_period | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_prometheus_reporter_port + +| 名字 | cn_metric_prometheus_reporter_port | +| ------------ | ------------------------------------------------------ | +| 描述 | confignode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9091 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_reporter_list + +| 名字 | dn_metric_reporter_list | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_level + +| 名字 | dn_metric_level | +| ------------ | ---------------------------------------- | +| 描述 | DataNode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_async_collect_period + +| 名字 | dn_metric_async_collect_period | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_prometheus_reporter_port + +| 名字 | dn_metric_prometheus_reporter_port | +| ------------ | ---------------------------------------------------- | +| 描述 | DataNode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9092 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_internal_reporter_type + +| 名字 | dn_metric_internal_reporter_type | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中监控模块内部报告者的种类,用于内部监控和检查数据是否已经成功写入和刷新。 | +| 类型 | String | +| 默认值 | IOTDB | +| 改后生效方式 | 重启服务生效 | + +### 3.7 SSL 配置 + +- enable_thrift_ssl + +| 名字 | enable_thrift_ssl | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当enable_thrift_ssl配置为true时,将通过dn_rpc_port使用 SSL 加密进行通信 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_https + +| 名字 | enable_https | +| ------------ | ------------------------------ | +| 描述 | REST Service 是否开启 SSL 配置 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- key_store_path + +| 名字 | key_store_path | +| ------------ | -------------- | +| 描述 | ssl证书路径 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- key_store_pwd + +| 名字 | key_store_pwd | +| ------------ | ------------- | +| 描述 | ssl证书密码 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.8 连接配置 + +- cn_rpc_thrift_compression_enable + +| 名字 | cn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- cn_rpc_max_concurrent_client_num + +| 名字 | cn_rpc_max_concurrent_client_num | +| ------------ |---------------------------------| +| 描述 | 最大连接数。 | +| 类型 | int | +| 默认值 | 3000 | +| 改后生效方式 | 重启服务生效 | + +- cn_connection_timeout_ms + +| 名字 | cn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- cn_selector_thread_nums_of_client_manager + +| 名字 | cn_selector_thread_nums_of_client_manager | +| ------------ | ----------------------------------------- | +| 描述 | 客户端异步线程管理的选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- cn_max_client_count_for_each_node_in_client_manager + +| 名字 | cn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +- dn_session_timeout_threshold + +| 名字 | dn_session_timeout_threshold | +| ------------ | ---------------------------- | +| 描述 | 最大的会话空闲时间 | +| 类型 | int | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_thrift_compression_enable + +| 名字 | dn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_advanced_compression_enable + +| 名字 | dn_rpc_advanced_compression_enable | +| ------------ | ---------------------------------- | +| 描述 | 是否启用 thrift 的自定制压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_selector_thread_count + +| 名字 | rpc_selector_thread_count | +| ------------ | ------------------------- | +| 描述 | rpc 选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_min_concurrent_client_num + +| 名字 | rpc_min_concurrent_client_num | +| ------------ | ----------------------------- | +| 描述 | 最小连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_max_concurrent_client_num + +| 名字 | dn_rpc_max_concurrent_client_num | +| ------------ |----------------------------------| +| 描述 | 最大连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_max_frame_size + +| 名字 | dn_thrift_max_frame_size | +| ------------ | ------------------------------------------------------ | +| 描述 | RPC 请求/响应的最大字节数 | +| 类型 | long | +| 默认值 | 536870912 (默认值512MB) | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_init_buffer_size + +| 名字 | dn_thrift_init_buffer_size | +| ------------ | -------------------------- | +| 描述 | 字节数 | +| 类型 | long | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- dn_connection_timeout_ms + +| 名字 | dn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- dn_selector_thread_count_of_client_manager + +| 名字 | dn_selector_thread_count_of_client_manager | +| ------------ | ------------------------------------------------------------ | +| 描述 | selector thread (TAsyncClientManager) nums for async thread in a clientManagerclientManager中异步线程的选择器线程(TAsyncClientManager)编号 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_max_client_count_for_each_node_in_client_manager + +| 名字 | dn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +### 3.9 对象存储管理 + +- remote_tsfile_cache_dirs + +| 名字 | remote_tsfile_cache_dirs | +| ------------ | ------------------------ | +| 描述 | 云端存储在本地的缓存目录 | +| 类型 | String | +| 默认值 | data/datanode/data/cache | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_page_size_in_kb + +| 名字 | remote_tsfile_cache_page_size_in_kb | +| ------------ | ----------------------------------- | +| 描述 | 云端存储在本地缓存文件的块大小 | +| 类型 | int | +| 默认值 | 20480 | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| 名字 | remote_tsfile_cache_max_disk_usage_in_mb | +| ------------ | ---------------------------------------- | +| 描述 | 云端存储本地缓存的最大磁盘占用大小 | +| 类型 | long | +| 默认值 | 51200 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_type + +| 名字 | object_storage_type | +| ------------ | ------------------- | +| 描述 | 云端存储类型 | +| 类型 | String | +| 默认值 | AWS_S3 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_endpoint + +| 名字 | object_storage_endpoint | +| ------------ | ----------------------- | +| 描述 | 云端存储的 endpoint | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_bucket + +| 名字 | object_storage_bucket | +| ------------ | ---------------------- | +| 描述 | 云端存储 bucket 的名称 | +| 类型 | String | +| 默认值 | iotdb_data | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_key + +| 名字 | object_storage_access_key | +| ------------ | ------------------------- | +| 描述 | 云端存储的验证信息 key | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_secret + +| 名字 | object_storage_access_secret | +| ------------ | ---------------------------- | +| 描述 | 云端存储的验证信息 secret | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.10 多级管理 + +- dn_default_space_usage_thresholds + +| 名字 | dn_default_space_usage_thresholds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 定义每个层级数据目录的最小剩余空间比例;当剩余空间少于该比例时,数据会被自动迁移至下一个层级;当最后一个层级的剩余存储空间到低于此阈值时,会将系统置为 READ_ONLY | +| 类型 | double | +| 默认值 | 0.85 | +| 改后生效方式 | 热加载 | + +- dn_tier_full_policy + +| 名字 | dn_tier_full_policy | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如何处理最后一层数据,当其已用空间高于其dn_default_space_usage_threshold时。| +| 类型 | String | +| 默认值 | NULL | +| 改后生效方式 | 热加载 | + +- migrate_thread_count + +| 名字 | migrate_thread_count | +| ------------ | ---------------------------------------- | +| 描述 | DataNode数据目录中迁移操作的线程池大小。 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| 名字 | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ------------ | ------------------------------------------------ | +| 描述 | 限制不同存储层级之间的数据迁移速度。 | +| 类型 | int | +| 默认值 | 10485760 | +| 改后生效方式 | 热加载 | + +### 3.11 REST服务配置 + +- enable_rest_service + +| 名字 | enable_rest_service | +| ------------ | ------------------- | +| 描述 | 是否开启Rest服务。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_service_port + +| 名字 | rest_service_port | +| ------------ | ------------------ | +| 描述 | Rest服务监听端口号 | +| 类型 | int32 | +| 默认值 | 18080 | +| 改后生效方式 | 重启服务生效 | + +- enable_swagger + +| 名字 | enable_swagger | +| ------------ | --------------------------------- | +| 描述 | 是否启用swagger来展示rest接口信息 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_query_default_row_size_limit + +| 名字 | rest_query_default_row_size_limit | +| ------------ | --------------------------------- | +| 描述 | 一次查询能返回的结果集最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- cache_expire_in_seconds + +| 名字 | cache_expire_in_seconds | +| ------------ | -------------------------------- | +| 描述 | 用户登录信息缓存的过期时间(秒) | +| 类型 | int32 | +| 默认值 | 28800 | +| 改后生效方式 | 重启服务生效 | + +- cache_max_num + +| 名字 | cache_max_num | +| ------------ | ------------------------ | +| 描述 | 缓存中存储的最大用户数量 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- cache_init_num + +| 名字 | cache_init_num | +| ------------ | -------------- | +| 描述 | 缓存初始容量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- client_auth + +| 名字 | client_auth | +| ------------ | ---------------------- | +| 描述 | 是否需要客户端身份验证 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- trust_store_path + +| 名字 | trust_store_path | +| ------------ | ----------------------- | +| 描述 | keyStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- trust_store_pwd + +| 名字 | trust_store_pwd | +| ------------ | ------------------------- | +| 描述 | trustStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- idle_timeout_in_seconds + +| 名字 | idle_timeout_in_seconds | +| ------------ | ----------------------- | +| 描述 | SSL 超时时间,单位为秒 | +| 类型 | int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.12 负载均衡配置 + +- series_slot_num + +| 名字 | series_slot_num | +| ------------ | ---------------------------- | +| 描述 | 序列分区槽数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- series_partition_executor_class + +| 名字 | series_partition_executor_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 序列分区哈希函数 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_region_group_extension_policy + +| 名字 | schema_region_group_extension_policy | +| ------------ | ------------------------------------ | +| 描述 | SchemaRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_schema_region_group_num_per_database + +| 名字 | default_schema_region_group_num_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-SchemaRegionGroup 扩容策略时,此参数为每个 Database 拥有的 SchemaRegionGroup 数量;当选用 AUTO-SchemaRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 SchemaRegionGroup 数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_per_data_node + +| 名字 | schema_region_per_data_node | +| ------------ | -------------------------------------------------- | +| 描述 | 期望每个 DataNode 可管理的 SchemaRegion 的最大数量 | +| 类型 | double | +| 默认值 | 1.0 | +| 改后生效方式 | 重启服务生效 | + +- data_region_group_extension_policy + +| 名字 | data_region_group_extension_policy | +| ------------ | ---------------------------------- | +| 描述 | DataRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_data_region_group_num_per_database + +| 名字 | default_data_region_group_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-DataRegionGroup 扩容策略时,此参数为每个 Database 拥有的 DataRegionGroup 数量;当选用 AUTO-DataRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 DataRegionGroup 数量 | +| 类型 | int | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- data_region_per_data_node + +| 名字 | data_region_per_data_node | +| ------------ | ------------------------------------------------ | +| 描述 | 期望每个 DataNode 可管理的 DataRegion 的最大数量 | +| 类型 | double | +| 默认值 | CPU 核心数的一半 | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_ratis_consensus + +| 名字 | enable_auto_leader_balance_for_ratis_consensus | +| ------------ | ---------------------------------------------- | +| 描述 | 是否为 Ratis 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_iot_consensus + +| 名字 | enable_auto_leader_balance_for_iot_consensus | +| ------------ | -------------------------------------------- | +| 描述 | 是否为 IoT 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.13 集群管理 + +- time_partition_origin + +| 名字 | time_partition_origin | +| ------------ | ------------------------------------------------------------ | +| 描述 | Database 数据时间分区的起始点,即从哪个时间点开始计算时间分区。 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 0 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- time_partition_interval + +| 名字 | time_partition_interval | +| ------------ | ------------------------------- | +| 描述 | Database 默认的数据时间分区间隔 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 604800000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- heartbeat_interval_in_ms + +| 名字 | heartbeat_interval_in_ms | +| ------------ | ------------------------ | +| 描述 | 集群节点间的心跳间隔 | +| 类型 | Long | +| 单位 | ms | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- disk_space_warning_threshold + +| 名字 | disk_space_warning_threshold | +| ------------ | ---------------------------- | +| 描述 | DataNode 磁盘剩余阈值 | +| 类型 | double(percentage) | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +### 3.14 内存控制配置 + +- datanode_memory_proportion + +| 名字 | datanode_memory_proportion | +| ------------ | ---------------------------------------------------- | +| 描述 | 存储引擎、查询引擎、元数据、共识、流处理引擎和空闲内存比例 | +| 类型 | Ratio | +| 默认值 | 3:3:1:1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- schema_memory_proportion + +| 名字 | schema_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | Schema 相关的内存如何在 SchemaRegion、SchemaCache 和 PartitionCache 之间分配 | +| 类型 | Ratio | +| 默认值 | 5:4:1 | +| 改后生效方式 | 重启服务生效 | + +- storage_engine_memory_proportion + +| 名字 | storage_engine_memory_proportion | +| ------------ | -------------------------------- | +| 描述 | 写入和合并占存储内存比例 | +| 类型 | Ratio | +| 默认值 | 8:2 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_proportion + +| 名字 | write_memory_proportion | +| ------------ | -------------------------------------------- | +| 描述 | Memtable 和 TimePartitionInfo 占写入内存比例 | +| 类型 | Ratio | +| 默认值 | 19:1 | +| 改后生效方式 | 重启服务生效 | + +- primitive_array_size + +| 名字 | primitive_array_size | +| ------------ | ---------------------------------------- | +| 描述 | 数组池中的原始数组大小(每个数组的长度) | +| 类型 | int32 | +| 默认值 | 64 | +| 改后生效方式 | 重启服务生效 | + +- chunk_metadata_size_proportion + +| 名字 | chunk_metadata_size_proportion | +| ------------ | -------------------------------------------- | +| 描述 | 在数据压缩过程中,用于存储块元数据的内存比例 | +| 类型 | Double | +| 默认值 | 0.1 | +| 改后生效方式 | 重启服务生效 | + +- flush_proportion + +| 名字 | flush_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 调用flush disk的写入内存比例,默认0.4,若有极高的写入负载力(比如batch=1000),可以设置为低于默认值,比如0.2 | +| 类型 | Double | +| 默认值 | 0.4 | +| 改后生效方式 | 重启服务生效 | + +- buffered_arrays_memory_proportion + +| 名字 | buffered_arrays_memory_proportion | +| ------------ | --------------------------------------- | +| 描述 | 为缓冲数组分配的写入内存比例,默认为0.6 | +| 类型 | Double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- reject_proportion + +| 名字 | reject_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 拒绝插入的写入内存比例,默认0.8,若有极高的写入负载力(比如batch=1000)并且物理内存足够大,它可以设置为高于默认值,如0.9 | +| 类型 | Double | +| 默认值 | 0.8 | +| 改后生效方式 | 重启服务生效 | + +- device_path_cache_proportion + +| 名字 | device_path_cache_proportion | +| ------------ | --------------------------------------------------- | +| 描述 | 在内存中分配给设备路径缓存(DevicePathCache)的比例 | +| 类型 | Double | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_variation_report_proportion + +| 名字 | write_memory_variation_report_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果 DataRegion 的内存增加超过写入可用内存的一定比例,则向系统报告。默认值为0.001 | +| 类型 | Double | +| 默认值 | 0.001 | +| 改后生效方式 | 重启服务生效 | + +- check_period_when_insert_blocked + +| 名字 | check_period_when_insert_blocked | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当插入被拒绝时,等待时间(以毫秒为单位)去再次检查系统,默认为50。若插入被拒绝,读取负载低,可以设置大一些。 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- io_task_queue_size_for_flushing + +| 名字 | io_task_queue_size_for_flushing | +| ------------ | -------------------------------- | +| 描述 | ioTaskQueue 的大小。默认值为10。 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- enable_query_memory_estimation + +| 名字 | enable_query_memory_estimation | +| ------------ | ------------------------------------------------------------ | +| 描述 | 开启后会预估每次查询的内存使用量,如果超过可用内存,会拒绝本次查询 | +| 类型 | bool | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.15 元数据引擎配置 + +- schema_engine_mode + +| 名字 | schema_engine_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 元数据引擎的运行模式,支持 Memory 和 PBTree;PBTree 模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。 | +| 类型 | string | +| 默认值 | Memory | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- partition_cache_size + +| 名字 | partition_cache_size | +| ------------ | ------------------------------ | +| 描述 | 分区信息缓存的最大缓存条目数。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- sync_mlog_period_in_ms + +| 名字 | sync_mlog_period_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | mlog定期刷新到磁盘的周期,单位毫秒。如果该参数为0,则表示每次对元数据的更新操作都会被立即写到磁盘上。 | +| 类型 | Int64 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- tag_attribute_flush_interval + +| 名字 | tag_attribute_flush_interval | +| ------------ | -------------------------------------------------- | +| 描述 | 标签和属性记录的间隔数,达到此记录数量时将强制刷盘 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- tag_attribute_total_size + +| 名字 | tag_attribute_total_size | +| ------------ | ---------------------------------------- | +| 描述 | 每个时间序列标签和属性的最大持久化字节数 | +| 类型 | int32 | +| 默认值 | 700 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_measurement_num_of_internal_request + +| 名字 | max_measurement_num_of_internal_request | +| ------------ | ------------------------------------------------------------ | +| 描述 | 一次注册序列请求中若物理量过多,在系统内部执行时将被拆分为若干个轻量级的子请求,每个子请求中的物理量数目不超过此参数设置的最大值。 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- datanode_schema_cache_eviction_policy + +| 名字 | datanode_schema_cache_eviction_policy | +| ------------ | ----------------------------------------------------- | +| 描述 | 当 Schema 缓存达到其最大容量时,Schema 缓存的淘汰策略 | +| 类型 | String | +| 默认值 | FIFO | +| 改后生效方式 | 重启服务生效 | + +- cluster_timeseries_limit_threshold + +| 名字 | cluster_timeseries_limit_threshold | +| ------------ | ---------------------------------- | +| 描述 | 集群中可以创建的时间序列的最大数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- cluster_device_limit_threshold + +| 名字 | cluster_device_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大设备数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- database_limit_threshold + +| 名字 | database_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大数据库数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +### 3.16 自动推断数据类型 + +- enable_auto_create_schema + +| 名字 | enable_auto_create_schema | +| ------------ | -------------------------------------- | +| 描述 | 当写入的序列不存在时,是否自动创建序列 | +| 取值 | true or false | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- default_storage_group_level + +| 名字 | default_storage_group_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当写入的数据不存在且自动创建序列时,若需要创建相应的 database,将序列路径的哪一层当做 database。例如,如果我们接到一个新序列 root.sg0.d1.s2, 并且 level=1, 那么 root.sg0 被视为database(因为 root 是 level 0 层) | +| 取值 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- boolean_string_infer_type + +| 名字 | boolean_string_infer_type | +| ------------ | ------------------------------------------ | +| 描述 | "true" 或者 "false" 字符串被推断的数据类型 | +| 取值 | BOOLEAN 或者 TEXT | +| 默认值 | BOOLEAN | +| 改后生效方式 | 热加载 | + +- integer_string_infer_type + +| 名字 | integer_string_infer_type | +| ------------ | --------------------------------- | +| 描述 | 整型字符串推断的数据类型 | +| 取值 | INT32, INT64, FLOAT, DOUBLE, TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- floating_string_infer_type + +| 名字 | floating_string_infer_type | +| ------------ | ----------------------------- | +| 描述 | "6.7"等字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- nan_string_infer_type + +| 名字 | nan_string_infer_type | +| ------------ | ---------------------------- | +| 描述 | "NaN" 字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- default_boolean_encoding + +| 名字 | default_boolean_encoding | +| ------------ | ------------------------ | +| 描述 | BOOLEAN 类型编码格式 | +| 取值 | PLAIN, RLE | +| 默认值 | RLE | +| 改后生效方式 | 热加载 | + +- default_int32_encoding + +| 名字 | default_int32_encoding | +| ------------ | -------------------------------------- | +| 描述 | int32 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_int64_encoding + +| 名字 | default_int64_encoding | +| ------------ | -------------------------------------- | +| 描述 | int64 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_float_encoding + +| 名字 | default_float_encoding | +| ------------ | ----------------------------- | +| 描述 | float 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_double_encoding + +| 名字 | default_double_encoding | +| ------------ | ----------------------------- | +| 描述 | double 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_text_encoding + +| 名字 | default_text_encoding | +| ------------ | --------------------- | +| 描述 | text 类型编码格式 | +| 取值 | PLAIN | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +* boolean_compressor + +| 名字 | boolean_compressor | +| -------------- | ----------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,BOOLEAN 数据类型的压缩方式 (V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int32_compressor + +| 名字 | int32_compressor | +| -------------- | ------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,INT32/DATE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int64_compressor + +| 名字 | int64_compressor | +| -------------- | ------------------------------------------------------------------------------ | +| 描述 | 启用自动创建模式时,INT64/TIMESTAMP 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* float_compressor + +| 名字 | float_compressor | +| -------------- | -------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,FLOAT 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* double_compressor + +| 名字 | double_compressor | +| -------------- | --------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,DOUBLE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* text_compressor + +| 名字 | text_compressor | +| -------------- | -------------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,TEXT/BINARY/BLOB 数据类型的压缩方式(V2.0.6 版本开始支持 ) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + + + +### 3.17 查询配置 + +- read_consistency_level + +| 名字 | read_consistency_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 查询一致性等级,取值 “strong” 时从 Leader 副本查询,取值 “weak” 时随机查询一个副本。 | +| 类型 | String | +| 默认值 | strong | +| 改后生效方式 | 重启服务生效 | + +- meta_data_cache_enable + +| 名字 | meta_data_cache_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否缓存元数据(包括 BloomFilter、Chunk Metadata 和 TimeSeries Metadata。) | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- chunk_timeseriesmeta_free_memory_proportion + +| 名字 | chunk_timeseriesmeta_free_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 读取内存分配比例,BloomFilterCache、ChunkCache、TimeseriesMetadataCache、数据集查询的内存和可用内存的查询。参数形式为a : b : c : d : e,其中a、b、c、d、e为整数。 例如“1 : 1 : 1 : 1 : 1” ,“1 : 100 : 200 : 300 : 400” 。 | +| 类型 | String | +| 默认值 | 1 : 100 : 200 : 300 : 400 | +| 改后生效方式 | 重启服务生效 | + +- enable_last_cache + +| 名字 | enable_last_cache | +| ------------ | ------------------ | +| 描述 | 是否开启最新点缓存 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_core_pool_size + +| 名字 | mpp_data_exchange_core_pool_size | +| ------------ | -------------------------------- | +| 描述 | MPP 数据交换线程池核心线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_max_pool_size + +| 名字 | mpp_data_exchange_max_pool_size | +| ------------ | ------------------------------- | +| 描述 | MPP 数据交换线程池最大线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_keep_alive_time_in_ms + +| 名字 | mpp_data_exchange_keep_alive_time_in_ms | +| ------------ | --------------------------------------- | +| 描述 | MPP 数据交换最大等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- driver_task_execution_time_slice_in_ms + +| 名字 | driver_task_execution_time_slice_in_ms | +| ------------ | -------------------------------------- | +| 描述 | 单个 DriverTask 最长执行时间(ms) | +| 类型 | int32 | +| 默认值 | 200 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_size_in_bytes + +| 名字 | max_tsblock_size_in_bytes | +| ------------ | ------------------------------- | +| 描述 | 单个 TsBlock 的最大容量(byte) | +| 类型 | int32 | +| 默认值 | 131072 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_line_numbers + +| 名字 | max_tsblock_line_numbers | +| ------------ | ------------------------ | +| 描述 | 单个 TsBlock 的最大行数 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- slow_query_threshold + +| 名字 | slow_query_threshold | +| ------------ | ------------------------------ | +| 描述 | 慢查询的时间阈值。单位:毫秒。 | +| 类型 | long | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- query_cost_stat_window + +| 名字 | query_cost_stat_window | +| ------------ |--------------------| +| 描述 | 查询耗时统计的窗口,单位为分钟。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 热加载 | + +- query_timeout_threshold + +| 名字 | query_timeout_threshold | +| ------------ | -------------------------------- | +| 描述 | 查询的最大执行时间。单位:毫秒。 | +| 类型 | Int32 | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- max_allowed_concurrent_queries + +| 名字 | max_allowed_concurrent_queries | +| ------------ | ------------------------------ | +| 描述 | 允许的最大并发查询数量。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- query_thread_count + +| 名字 | query_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 对内存中的数据进行查询时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- degree_of_query_parallelism + +| 名字 | degree_of_query_parallelism | +| ------------ | ------------------------------------------------------------ | +| 描述 | 设置单个查询片段实例将创建的 pipeline 驱动程序数量,也就是查询操作的并行度。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- mode_map_size_threshold + +| 名字 | mode_map_size_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | 计算 MODE 聚合函数时,计数映射可以增长到的阈值 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- batch_size + +| 名字 | batch_size | +| ------------ | ---------------------------------------------------------- | +| 描述 | 服务器中每次迭代的数据量(数据条目,即不同时间戳的数量。) | +| 类型 | Int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- sort_buffer_size_in_bytes + +| 名字 | sort_buffer_size_in_bytes | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 设置外部排序操作中使用的内存缓冲区大小 | +| 类型 | long | +| 默认值 | 1048576(V2.0.6 之前版本)
0(V2.0.6 及之后版本),当值小于等于 0 时,由系统自动进行计算,计算公式为:`sort_buffer_size_in_bytes = Math.min(32 * 1024 * 1024, 堆内内存 * 查询引擎内存比例 * 查询执行内存比例 / 查询线程数 / 2)` | +| 改后生效方式 | 热加载 | + +- merge_threshold_of_explain_analyze + +| 名字 | merge_threshold_of_explain_analyze | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于设置在 `EXPLAIN ANALYZE` 语句的结果集中操作符(operator)数量的合并阈值。 | +| 类型 | int | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +### 3.18 TTL配置 + +- ttl_check_interval + +| 名字 | ttl_check_interval | +| ------------ | -------------------------------------- | +| 描述 | ttl 检查任务的间隔,单位 ms,默认为 2h | +| 类型 | int | +| 默认值 | 7200000 | +| 改后生效方式 | 重启服务生效 | + +- max_expired_time + +| 名字 | max_expired_time | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果一个文件中存在设备已经过期超过此时间,那么这个文件将被立即整理。单位 ms,默认为一个月 | +| 类型 | int | +| 默认值 | 2592000000 | +| 改后生效方式 | 重启服务生效 | + +- expired_data_ratio + +| 名字 | expired_data_ratio | +| ------------ | ------------------------------------------------------------ | +| 描述 | 过期设备比例。如果一个文件中过期设备的比率超过这个值,那么这个文件中的过期数据将通过 compaction 清理。 | +| 类型 | float | +| 默认值 | 0.3 | +| 改后生效方式 | 重启服务生效 | + +### 3.19 存储引擎配置 + +- timestamp_precision + +| 名字 | timestamp_precision | +| ------------ | ---------------------------- | +| 描述 | 时间戳精度,支持 ms、us、ns | +| 类型 | String | +| 默认值 | ms | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- timestamp_precision_check_enabled + +| 名字 | timestamp_precision_check_enabled | +| ------------ | --------------------------------- | +| 描述 | 用于控制是否启用时间戳精度检查 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_waiting_time_when_insert_blocked + +| 名字 | max_waiting_time_when_insert_blocked | +| ------------ | ----------------------------------------------- | +| 描述 | 当插入请求等待超过这个时间,则抛出异常,单位 ms | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- handle_system_error + +| 名字 | handle_system_error | +| ------------ | ------------------------------------ | +| 描述 | 当系统遇到不可恢复的错误时的处理方法 | +| 类型 | String | +| 默认值 | CHANGE_TO_READ_ONLY | +| 改后生效方式 | 重启服务生效 | + +- enable_timed_flush_seq_memtable + +| 名字 | enable_timed_flush_seq_memtable | +| ------------ | ------------------------------- | +| 描述 | 是否开启定时刷盘顺序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_interval_in_ms + +| 名字 | seq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_check_interval_in_ms + +| 名字 | seq_memtable_flush_check_interval_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 检查顺序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- enable_timed_flush_unseq_memtable + +| 名字 | enable_timed_flush_unseq_memtable | +| ------------ | --------------------------------- | +| 描述 | 是否开启定时刷新乱序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_interval_in_ms + +| 名字 | unseq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_check_interval_in_ms + +| 名字 | unseq_memtable_flush_check_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 检查乱序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- tvlist_sort_algorithm + +| 名字 | tvlist_sort_algorithm | +| ------------ | ------------------------ | +| 描述 | memtable中数据的排序方法 | +| 类型 | String | +| 默认值 | TIM | +| 改后生效方式 | 重启服务生效 | + +- avg_series_point_number_threshold + +| 名字 | avg_series_point_number_threshold | +| ------------ | ------------------------------------------------ | +| 描述 | 内存中平均每个时间序列点数最大值,达到触发 flush | +| 类型 | int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- flush_thread_count + +| 名字 | flush_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 将内存中的数据写入磁盘时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。默认值为 0。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- enable_partial_insert + +| 名字 | enable_partial_insert | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一次 insert 请求中,如果部分测点写入失败,是否继续写入其他测点。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- recovery_log_interval_in_ms + +| 名字 | recovery_log_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | data region的恢复过程中打印日志信息的间隔 | +| 类型 | Int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +- 0.13_data_insert_adapt + +| 名字 | 0.13_data_insert_adapt | +| ------------ | ------------------------------------------------------- | +| 描述 | 如果 0.13 版本客户端进行写入,需要将此配置项设置为 true | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_tsfile_validation + +| 名字 | enable_tsfile_validation | +| ------------ | -------------------------------------- | +| 描述 | Flush, Load 或合并后验证 tsfile 正确性 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- tier_ttl_in_ms + +| 名字 | tier_ttl_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 定义每个层级负责的数据范围,通过 TTL 表示 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +* max_object_file_size_in_byte + +| 名字 | max\_object\_file\_size\_in\_byte | +| -------------- |------------------------------| +| 描述 | 单对象文件的最大尺寸限制 (V2.0.8 版本起支持) | +| 类型 | long | +| 默认值 | 4294967296 | +| 改后生效方式 | 热加载 | + +* restrict_object_limit + +| 名字 | restrict\_object\_limit | +|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 对 OBJECT 类型的表名、列名和设备名称没有特殊限制。(V2.0.8 版本起支持)当设置为 true 且表中包含 OBJECT 列时,需遵循以下限制:
1. 命名规范:TAG 列的值、表名和字段名禁止使用 “.” 或 “..”,且不得包含 “./” 或 “.\” 字符,否则元数据创建将失败。若名称包含文件系统不支持的字符,则会在数据写入时报错。
2. 大小写敏感:如果底层文件系统不区分大小写,则设备标识符(如 'd1' 与 'D1')将被视为相同。在此情况下,若创建此类名称相似的设备,其 OBJECT 数据文件可能互相覆盖,导致数据错误。
3. 存储路径:OBJECT 类型数据的实际存储路径格式为:`${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin`。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + + +### 3.20 合并配置 + +- enable_seq_space_compaction + +| 名字 | enable_seq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 顺序空间内合并,开启顺序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_unseq_space_compaction + +| 名字 | enable_unseq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 乱序空间内合并,开启乱序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_cross_space_compaction + +| 名字 | enable_cross_space_compaction | +| ------------ | ------------------------------------------ | +| 描述 | 跨空间合并,开启将乱序文件合并到顺序文件中 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_auto_repair_compaction + +| 名字 | enable_auto_repair_compaction | +| ------------ | ----------------------------- | +| 描述 | 启用通过合并操作自动修复未排序文件的功能 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- cross_selector + +| 名字 | cross_selector | +| ------------ |----------------| +| 描述 | 跨空间合并任务的选择器 | +| 类型 | String | +| 默认值 | rewrite | +| 改后生效方式 | 重启服务生效 | + +- cross_performer + +| 名字 | cross_performer | +| ------------ |-----------------------------------| +| 描述 | 跨空间合并任务的执行器,可选项:read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- inner_seq_selector + +| 名字 | inner_seq_selector | +| ------------ |------------------------------------------------------------------------| +| 描述 | 顺序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_seq_performer + +| 名字 | inner_seq_performer | +| ------------ |--------------------------------------| +| 描述 | 顺序空间内合并任务的执行器,可选项是 read_chunk 和 fast | +| 类型 | String | +| 默认值 | read_chunk | +| 改后生效方式 | 热加载 | + +- inner_unseq_selector + +| 名字 | inner_unseq_selector | +| ------------ |-------------------------------------------------------------------------| +| 描述 | 乱序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_unseq_performer + +| 名字 | inner_unseq_performer | +| ------------ |--------------------------------------| +| 描述 | 乱序空间内合并任务的执行器,可选项是 read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- compaction_priority + +| 名字 | compaction_priority | +| ------------ |-------------------------------------------------------------------------------------------| +| 描述 | 合并时的优先级。INNER_CROSS:优先执行空间内合并,优先减少文件数量;CROSS_INNER:优先执行跨空间合并,优先清理乱序文件;BALANCE:交替执行两种合并类型。 | +| 类型 | String | +| 默认值 | INNER_CROSS | +| 改后生效方式 | 重启服务生效 | + +- candidate_compaction_task_queue_size + +| 名字 | candidate_compaction_task_queue_size | +| ------------ | ------------------------------------ | +| 描述 | 待选合并任务队列容量 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- target_compaction_file_size + +| 名字 | target_compaction_file_size | +| ------------ |-----------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 该参数作用于两个场景:1. 空间内合并的目标文件大小 2. 跨空间合并中待选序列文件的大小需小于 target_compaction_file_size * 1.5 多数情况下,跨空间合并的目标文件大小不会超过此阈值,即便超出,幅度也不会过大 。 默认值:2GB ,单位:byte | +| 类型 | Long | +| 默认值 | 2147483648 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_size_threshold + +| 名字 | inner_compaction_total_file_size_threshold | +| ------------ |--------------------------------------------| +| 描述 | 空间内合并的文件总大小阈值,单位:byte | +| 类型 | Long | +| 默认值 | 10737418240 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_num_threshold + +| 名字 | inner_compaction_total_file_num_threshold | +| ------------ | ----------------------------------------- | +| 描述 | 空间内合并的文件总数阈值 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 热加载 | + +- max_level_gap_in_inner_compaction + +| 名字 | max_level_gap_in_inner_compaction | +| ------------ | -------------------------------------- | +| 描述 | 空间内合并筛选的最大层级差 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 热加载 | + +- target_chunk_size + +| 名字 | target_chunk_size | +| ------------ |--------------------------------------------------| +| 描述 | 刷盘与合并操作的目标数据块大小, 若内存表中某条时序数据的大小超过该值,数据会被刷盘至多个数据块 | +| 类型 | Long | +| 默认值 | 1600000 | +| 改后生效方式 | 重启服务生效 | + +- target_chunk_point_num + +| 名字 | target_chunk_point_num | +| ------------ |------------------------------------------------------| +| 描述 | 刷盘与合并操作中单个数据块的目标点数, 若内存表中某条时序数据的点数超过该值,数据会被刷盘至多个数据块中 | +| 类型 | Long | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- chunk_size_lower_bound_in_compaction + +| 名字 | chunk_size_lower_bound_in_compaction | +| ------------ |--------------------------------------| +| 描述 | 若数据块大小低于此阈值,则会被反序列化为数据点,默认值为128字节 | +| 类型 | Long | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- chunk_point_num_lower_bound_in_compaction + +| 名字 | chunk_point_num_lower_bound_in_compaction | +| ------------ |------------------------------------------| +| 描述 | 若数据块内的数据点数低于此阈值,则会被反序列化为数据点 | +| 类型 | Long | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- inner_compaction_candidate_file_num + +| 名字 | inner_compaction_candidate_file_num | +| ------------ | ---------------------------------------- | +| 描述 | 空间内合并待选文件筛选的文件数量要求 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_num + +| 名字 | max_cross_compaction_candidate_file_num | +| ------------ | --------------------------------------- | +| 描述 | 跨空间合并待选文件筛选的文件数量上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_size + +| 名字 | max_cross_compaction_candidate_file_size | +| ------------ |------------------------------------------| +| 描述 | 跨空间合并待选文件筛选的总大小上限 | +| 类型 | Long | +| 默认值 | 5368709120 | +| 改后生效方式 | 热加载 | + +- min_cross_compaction_unseq_file_level + +| 名字 | min_cross_compaction_unseq_file_level | +| ------------ |---------------------------------------| +| 描述 | 可被选为待选文件的乱序文件的最小空间内合并层级 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- compaction_thread_count + +| 名字 | compaction_thread_count | +| ------------ | ----------------------- | +| 描述 | 执行合并任务的线程数目 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_max_aligned_series_num_in_one_batch + +| 名字 | compaction_max_aligned_series_num_in_one_batch | +| ------------ | ---------------------------------------------- | +| 描述 | 对齐序列合并一次执行时处理的值列数量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_interval_in_ms + +| 名字 | compaction_schedule_interval_in_ms | +| ------------ |------------------------------------| +| 描述 | 合并调度的时间间隔,单位 ms | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- compaction_write_throughput_mb_per_sec + +| 名字 | compaction_write_throughput_mb_per_sec | +| ------------ |----------------------------------------| +| 描述 | 合并操作每秒可达到的写入吞吐量上限, 小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- compaction_read_throughput_mb_per_sec + +| 名字 | compaction_read_throughput_mb_per_sec | +| --------- | ---------------------------------------------------- | +| 描述 | 合并每秒读吞吐限制,单位为 megabyte,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- compaction_read_operation_per_sec + +| 名字 | compaction_read_operation_per_sec | +| --------- | ------------------------------------------- | +| 描述 | 合并每秒读操作数量限制,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- sub_compaction_thread_count + +| 名字 | sub_compaction_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 每个合并任务的子任务线程数,只对跨空间合并和乱序空间内合并生效 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_disk_redundancy + +| 名字 | inner_compaction_task_selection_disk_redundancy | +| ------------ | ----------------------------------------------- | +| 描述 | 定义了磁盘可用空间的冗余值,仅用于内部压缩 | +| 类型 | double | +| 默认值 | 0.05 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_mods_file_threshold + +| 名字 | inner_compaction_task_selection_mods_file_threshold | +| ------------ | --------------------------------------------------- | +| 描述 | 定义了mods文件大小的阈值,仅用于内部压缩。 | +| 类型 | long | +| 默认值 | 131072 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_thread_num + +| 名字 | compaction_schedule_thread_num | +| ------------ | ------------------------------ | +| 描述 | 选择合并任务的线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +### 3.21 写前日志配置 + +- wal_mode + +| 名字 | wal_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 写前日志的写入模式. DISABLE 模式下会关闭写前日志;SYNC 模式下写入请求会在成功写入磁盘后返回; ASYNC 模式下写入请求返回时可能尚未成功写入磁盘后。 | +| 类型 | String | +| 默认值 | ASYNC | +| 改后生效方式 | 重启服务生效 | + +- max_wal_nodes_num + +| 名字 | max_wal_nodes_num | +| ------------ | ----------------------------------------------------- | +| 描述 | 写前日志节点的最大数量,默认值 0 表示数量由系统控制。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- wal_async_mode_fsync_delay_in_ms + +| 名字 | wal_async_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------- | +| 描述 | async 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 热加载 | + +- wal_sync_mode_fsync_delay_in_ms + +| 名字 | wal_sync_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------ | +| 描述 | sync 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 3 | +| 改后生效方式 | 热加载 | + +- wal_buffer_size_in_byte + +| 名字 | wal_buffer_size_in_byte | +| ------------ | ----------------------- | +| 描述 | 写前日志的 buffer 大小 | +| 类型 | int32 | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +- wal_buffer_queue_capacity + +| 名字 | wal_buffer_queue_capacity | +| ------------ | ------------------------- | +| 描述 | 写前日志阻塞队列大小上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 重启服务生效 | + +- wal_file_size_threshold_in_byte + +| 名字 | wal_file_size_threshold_in_byte | +| ------------ | ------------------------------- | +| 描述 | 写前日志文件封口阈值 | +| 类型 | int32 | +| 默认值 | 31457280 | +| 改后生效方式 | 热加载 | + +- wal_min_effective_info_ratio + +| 名字 | wal_min_effective_info_ratio | +| ------------ | ---------------------------- | +| 描述 | 写前日志最小有效信息比 | +| 类型 | double | +| 默认值 | 0.1 | +| 改后生效方式 | 热加载 | + +- wal_memtable_snapshot_threshold_in_byte + +| 名字 | wal_memtable_snapshot_threshold_in_byte | +| ------------ | ---------------------------------------- | +| 描述 | 触发写前日志中内存表快照的内存表大小阈值 | +| 类型 | int64 | +| 默认值 | 8388608 | +| 改后生效方式 | 热加载 | + +- max_wal_memtable_snapshot_num + +| 名字 | max_wal_memtable_snapshot_num | +| ------------ | ------------------------------ | +| 描述 | 写前日志中内存表的最大数量上限 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- delete_wal_files_period_in_ms + +| 名字 | delete_wal_files_period_in_ms | +| ------------ | ----------------------------- | +| 描述 | 删除写前日志的检查间隔 | +| 类型 | int64 | +| 默认值 | 20000 | +| 改后生效方式 | 热加载 | + +- wal_throttle_threshold_in_byte + +| 名字 | wal_throttle_threshold_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在IoTConsensus中,当WAL文件的大小达到一定阈值时,会开始对写入操作进行节流,以控制写入速度。 | +| 类型 | long | +| 默认值 | 53687091200 | +| 改后生效方式 | 热加载 | + +- iot_consensus_cache_window_time_in_ms + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 在IoTConsensus中,写缓存的最大等待时间。 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- enable_wal_compression + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ------------------------------------- | +| 描述 | 用于控制是否启用WAL的压缩。 | +| 类型 | boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.22 IoT 共识协议配置 + +当Region配置了IoTConsensus共识协议之后,下述的配置项才会生效 + +- data_region_iot_max_log_entries_num_per_batch + +| 名字 | data_region_iot_max_log_entries_num_per_batch | +| ------------ | --------------------------------------------- | +| 描述 | IoTConsensus batch 的最大日志条数 | +| 类型 | int32 | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_size_per_batch + +| 名字 | data_region_iot_max_size_per_batch | +| ------------ | ---------------------------------- | +| 描述 | IoTConsensus batch 的最大大小 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_pending_batches_num + +| 名字 | data_region_iot_max_pending_batches_num | +| ------------ | --------------------------------------- | +| 描述 | IoTConsensus batch 的流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_memory_ratio_for_queue + +| 名字 | data_region_iot_max_memory_ratio_for_queue | +| ------------ | ------------------------------------------ | +| 描述 | IoTConsensus 队列内存分配比例 | +| 类型 | double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- region_migration_speed_limit_bytes_per_second + +| 名字 | region_migration_speed_limit_bytes_per_second | +| ------------ | --------------------------------------------- | +| 描述 | 定义了在region迁移过程中,数据传输的最大速率 | +| 类型 | long | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +### 3.23 TsFile配置 + +- group_size_in_byte + +| 名字 | group_size_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | 每次将内存中的数据写入到磁盘时的最大写入字节数 | +| 类型 | int32 | +| 默认值 | 134217728 | +| 改后生效方式 | 热加载 | + +- page_size_in_byte + +| 名字 | page_size_in_byte | +| ------------ | ---------------------------------------------------- | +| 描述 | 内存中每个列写出时,写成的单页最大的大小,单位为字节 | +| 类型 | int32 | +| 默认值 | 65536 | +| 改后生效方式 | 热加载 | + +- max_number_of_points_in_page + +| 名字 | max_number_of_points_in_page | +| ------------ | ------------------------------------------------- | +| 描述 | 一个页中最多包含的数据点(时间戳-值的二元组)数量 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- pattern_matching_threshold + +| 名字 | pattern_matching_threshold | +| ------------ | ------------------------------ | +| 描述 | 正则表达式匹配时最大的匹配次数 | +| 类型 | int32 | +| 默认值 | 1000000 | +| 改后生效方式 | 热加载 | + +- float_precision + +| 名字 | float_precision | +| ------------ | ------------------------------------------------------------ | +| 描述 | 浮点数精度,为小数点后数字的位数 | +| 类型 | int32 | +| 默认值 | 默认为 2 位。注意:32 位浮点数的十进制精度为 7 位,64 位浮点数的十进制精度为 15 位。如果设置超过机器精度将没有实际意义。 | +| 改后生效方式 | 热加载 | + +- value_encoder + +| 名字 | value_encoder | +| ------------ | ------------------------------------- | +| 描述 | value 列编码方式 | +| 类型 | 枚举 String: “TS_2DIFF”,“PLAIN”,“RLE” | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +- compressor + +| 名字 | compressor | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据压缩方法; 对齐序列中时间列的压缩方法 | +| 类型 | 枚举 String : "UNCOMPRESSED", "SNAPPY", "LZ4", "ZSTD", "LZMA2" | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +- encrypt_flag + +| 名字 | encrypt_flag | +| ------------ | ---------------------------- | +| 描述 | 用于开启或关闭数据加密功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- encrypt_type + +| 名字 | encrypt_type | +| ------------ | ------------------------------------- | +| 描述 | 数据加密的方法。 | +| 类型 | String | +| 默认值 | org.apache.tsfile.encrypt.UNENCRYPTED | +| 改后生效方式 | 重启服务生效 | + +- encrypt_key_path + +| 名字 | encrypt_key_path | +| ------------ | ---------------------------- | +| 描述 | 数据加密使用的密钥来源路径。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.24 授权配置 + +- authorizer_provider_class + +| 名字 | authorizer_provider_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 权限服务的类名 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| 改后生效方式 | 重启服务生效 | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| 名字 | openID_url | +| ------------ | ---------------------------------------------------------- | +| 描述 | openID 服务器地址 (当 OpenIdAuthorizer 被启用时必须设定) | +| 类型 | String(一个 http 地址) | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- iotdb_server_encrypt_decrypt_provider + +| 名字 | iotdb_server_encrypt_decrypt_provider | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于用户密码加密的类 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| 名字 | iotdb_server_encrypt_decrypt_provider_parameter | +| ------------ | ----------------------------------------------- | +| 描述 | 用于初始化用户密码加密类的参数 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- author_cache_size + +| 名字 | author_cache_size | +| ------------ | ------------------------ | +| 描述 | 用户缓存与角色缓存的大小 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- author_cache_expire_time + +| 名字 | author_cache_expire_time | +| ------------ | -------------------------------------- | +| 描述 | 用户缓存与角色缓存的有效期,单位为分钟 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 重启服务生效 | + +### 3.25 UDF配置 + +- udf_initial_byte_array_length_for_memory_control + +| 名字 | udf_initial_byte_array_length_for_memory_control | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于评估UDF查询中文本字段的内存使用情况。建议将此值设置为略大于所有文本的平均长度记录。 | +| 类型 | int32 | +| 默认值 | 48 | +| 改后生效方式 | 重启服务生效 | + +- udf_memory_budget_in_mb + +| 名字 | udf_memory_budget_in_mb | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一个UDF查询中使用多少内存(以 MB 为单位)。上限为已分配内存的 20% 用于读取。 | +| 类型 | Float | +| 默认值 | 30.0 | +| 改后生效方式 | 重启服务生效 | + +- udf_reader_transformer_collector_memory_proportion + +| 名字 | udf_reader_transformer_collector_memory_proportion | +| ------------ | --------------------------------------------------------- | +| 描述 | UDF内存分配比例。参数形式为a : b : c,其中a、b、c为整数。 | +| 类型 | String | +| 默认值 | 1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- udf_lib_dir + +| 名字 | udf_lib_dir | +| ------------ | ---------------------------- | +| 描述 | UDF 日志及jar文件存储路径 | +| 类型 | String | +| 默认值 | ext/udf(Windows:ext\\udf) | +| 改后生效方式 | 重启服务生效 | + +### 3.26 触发器配置 + +- trigger_lib_dir + +| 名字 | trigger_lib_dir | +| ------------ | ----------------------- | +| 描述 | 触发器 JAR 包存放的目录 | +| 类型 | String | +| 默认值 | ext/trigger | +| 改后生效方式 | 重启服务生效 | + +- stateful_trigger_retry_num_when_not_found + +| 名字 | stateful_trigger_retry_num_when_not_found | +| ------------ | ---------------------------------------------- | +| 描述 | 有状态触发器触发无法找到触发器实例时的重试次数 | +| 类型 | Int32 | +| 默认值 | 3 | +| 改后生效方式 | 重启服务生效 | + +### 3.27 SELECT-INTO配置 + +- into_operation_buffer_size_in_byte + +| 名字 | into_operation_buffer_size_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,待写入数据占用的最大内存(单位:Byte) | +| 类型 | long | +| 默认值 | 104857600 | +| 改后生效方式 | 热加载 | + +- select_into_insert_tablet_plan_row_limit + +| 名字 | select_into_insert_tablet_plan_row_limit | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,一个 insert-tablet-plan 中可以处理的最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- into_operation_execution_thread_count + +| 名字 | into_operation_execution_thread_count | +| ------------ | ------------------------------------------ | +| 描述 | SELECT INTO 中执行写入任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +### 3.28 连续查询配置 +- continuous_query_submit_thread_count + +| 名字 | continuous_query_execution_thread | +| ------------ | --------------------------------- | +| 描述 | 执行连续查询任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- continuous_query_min_every_interval_in_ms + +| 名字 | continuous_query_min_every_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 连续查询执行时间间隔的最小值 | +| 类型 | long (duration) | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +### 3.29 PIPE配置 + +- pipe_lib_dir + +| 名字 | pipe_lib_dir | +| ------------ | -------------------------- | +| 描述 | 自定义 Pipe 插件的存放目录 | +| 类型 | string | +| 默认值 | ext/pipe | +| 改后生效方式 | 暂不支持修改 | + +- pipe_subtask_executor_max_thread_num + +| 名字 | pipe_subtask_executor_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | pipe 子任务 processor、sink 中各自可以使用的最大线程数。实际值将是 min(pipe_subtask_executor_max_thread_num, max(1, CPU核心数 / 2))。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_timeout_ms + +| 名字 | pipe_sink_timeout_ms | +| ------------ | --------------------------------------------- | +| 描述 | thrift 客户端的连接超时时间(以毫秒为单位)。 | +| 类型 | int | +| 默认值 | 900000 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_selector_number + +| 名字 | pipe_sink_selector_number | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大执行结果处理线程数量。 建议将此值设置为小于或等于 pipe_sink_max_client_number。 | +| 类型 | int | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_max_client_number + +| 名字 | pipe_sink_max_client_number | +| ------------ | ----------------------------------------------------------- | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大客户端数量。 | +| 类型 | int | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_enabled + +| 名字 | pipe_air_gap_receiver_enabled | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否启用通过网闸接收 pipe 数据。接收器只能在 tcp 模式下返回 0 或 1,以指示数据是否成功接收。 \| | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_port + +| 名字 | pipe_air_gap_receiver_port | +| ------------ | ------------------------------------ | +| 描述 | 服务器通过网闸接收 pipe 数据的端口。 | +| 类型 | int | +| 默认值 | 9780 | +| 改后生效方式 | 重启服务生效 | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| 名字 | pipe_all_sinks_rate_limit_bytes_per_second | +| ------------ | ------------------------------------------------------------ | +| 描述 | 所有 pipe sink 每秒可以传输的总字节数。当给定的值小于或等于 0 时,表示没有限制。默认值是 -1,表示没有限制。 | +| 类型 | double | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +### 3.30 Ratis共识协议配置 + +当Region配置了RatisConsensus共识协议之后,下述的配置项才会生效 + +- config_node_ratis_log_appender_buffer_size_max + +| 名字 | config_node_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_appender_buffer_size_max + +| 名字 | schema_region_ratis_log_appender_buffer_size_max | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_appender_buffer_size_max + +| 名字 | data_region_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_snapshot_trigger_threshold + +| 名字 | config_node_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | confignode 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_snapshot_trigger_threshold + +| 名字 | schema_region_ratis_snapshot_trigger_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | schema region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_snapshot_trigger_threshold + +| 名字 | data_region_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | data region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_unsafe_flush_enable + +| 名字 | config_node_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | confignode 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_unsafe_flush_enable + +| 名字 | schema_region_ratis_log_unsafe_flush_enable | +| ------------ | ------------------------------------------- | +| 描述 | schema region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_unsafe_flush_enable + +| 名字 | data_region_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | data region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_segment_size_max_in_byte + +| 名字 | config_node_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_segment_size_max_in_byte + +| 名字 | schema_region_ratis_log_segment_size_max_in_byte | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | Confignode 简单共识协议一个Log日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_flow_control_window + +| 名字 | config_node_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | confignode grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_flow_control_window + +| 名字 | schema_region_ratis_grpc_flow_control_window | +| ------------ | -------------------------------------------- | +| 描述 | schema region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_flow_control_window + +| 名字 | data_region_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | data region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| 名字 | config_node_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | config node grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ------------------------------------------------------- | +| 描述 | schema region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | data_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | data region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_force_sync_num + +| 名字 | config_node_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | config node fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_force_sync_num + +| 名字 | schema_region_ratis_log_force_sync_num | +| ------------ | -------------------------------------- | +| 描述 | schema region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_force_sync_num + +| 名字 | data_region_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | data region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_request_timeout_ms + +| 名字 | config_node_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_request_timeout_ms + +| 名字 | schema_region_ratis_request_timeout_ms | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_request_timeout_ms + +| 名字 | data_region_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | data region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_retry_attempts + +| 名字 | config_node_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_initial_sleep_time_ms + +| 名字 | config_node_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | confignode Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_sleep_time_ms + +| 名字 | config_node_ratis_max_sleep_time_ms | +| ------------ | ------------------------------------- | +| 描述 | confignode Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_retry_attempts + +| 名字 | schema_region_ratis_max_retry_attempts | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_initial_sleep_time_ms + +| 名字 | schema_region_ratis_initial_sleep_time_ms | +| ------------ | ----------------------------------------- | +| 描述 | schema region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_sleep_time_ms + +| 名字 | schema_region_ratis_max_sleep_time_ms | +| ------------ | ---------------------------------------- | +| 描述 | schema region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_retry_attempts + +| 名字 | data_region_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | data region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_initial_sleep_time_ms + +| 名字 | data_region_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | data region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_sleep_time_ms + +| 名字 | data_region_ratis_max_sleep_time_ms | +| ------------ | -------------------------------------- | +| 描述 | data region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_min_ms + +| 名字 | ratis_first_election_timeout_min_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最小超时时间 | +| 类型 | int64 | +| 默认值 | 50 (ms) | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_max_ms + +| 名字 | ratis_first_election_timeout_max_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最大超时时间 | +| 类型 | int64 | +| 默认值 | 150 (ms) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_preserve_logs_num_when_purge + +| 名字 | config_node_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | confignode snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_preserve_logs_num_when_purge + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_preserve_logs_num_when_purge + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_max_size + +| 名字 | config_node_ratis_log_max_size | +| ------------ | ----------------------------------- | +| 描述 | config node磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_max_size + +| 名字 | schema_region_ratis_log_max_size | +| ------------ | -------------------------------------- | +| 描述 | schema region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_max_size + +| 名字 | data_region_ratis_log_max_size | +| ------------ | ------------------------------------ | +| 描述 | data region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 21474836480 (20GB) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_periodic_snapshot_interval + +| 名字 | config_node_ratis_periodic_snapshot_interval | +| ------------ | -------------------------------------------- | +| 描述 | config node定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_periodic_snapshot_interval + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_periodic_snapshot_interval + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +### 3.31 IoTConsensusV2配置 + +- iot_consensus_v2_pipeline_size + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中连接器(connector)和接收器(receiver)的默认事件缓冲区大小。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_mode + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ----------------------------------- | +| 描述 | IoTConsensus V2使用的共识协议模式。 | +| 类型 | String | +| 默认值 | batch | +| 改后生效方式 | 重启服务生效 | + +### 3.32 Procedure 配置 + +- procedure_core_worker_thread_count + +| 名字 | procedure_core_worker_thread_count | +| ------------ | ---------------------------------- | +| 描述 | 工作线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_clean_interval + +| 名字 | procedure_completed_clean_interval | +| ------------ | ---------------------------------- | +| 描述 | 清理已完成的 procedure 时间间隔 | +| 类型 | int32 | +| 默认值 | 30(s) | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_evict_ttl + +| 名字 | procedure_completed_evict_ttl | +| ------------ | --------------------------------- | +| 描述 | 已完成的 procedure 的数据保留时间 | +| 类型 | int32 | +| 默认值 | 60(s) | +| 改后生效方式 | 重启服务生效 | + +### 3.33 MQTT代理配置 + +- enable_mqtt_service + +| 名字 | enable_mqtt_service。 | +| ------------ | --------------------- | +| 描述 | 是否开启MQTT服务 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- mqtt_host + +| 名字 | mqtt_host | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的host。 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 热加载 | + +- mqtt_port + +| 名字 | mqtt_port | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的port。 | +| 类型 | int32 | +| 默认值 | 1883 | +| 改后生效方式 | 热加载 | + +- mqtt_handler_pool_size + +| 名字 | mqtt_handler_pool_size | +| ------------ | ---------------------------------- | +| 描述 | 用于处理MQTT消息的处理程序池大小。 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- mqtt_payload_formatter + +| 名字 | mqtt_payload_formatter | +| ------------ | ---------------------------- | +| 描述 | MQTT消息有效负载格式化程序。 | +| 类型 | String | +| 默认值 | json | +| 改后生效方式 | 热加载 | + +- mqtt_max_message_size + +| 名字 | mqtt_max_message_size | +| ------------ | ------------------------------------ | +| 描述 | MQTT消息的最大长度(以字节为单位)。 | +| 类型 | int32 | +| 默认值 | 1048576 | +| 改后生效方式 | 热加载 | + +### 3.34 审计日志配置 + +- enable_audit_log + +| 名字 | enable_audit_log | +| ------------ | ------------------------------ | +| 描述 | 用于控制是否启用审计日志功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- audit_log_storage + +| 名字 | audit_log_storage | +| ------------ | -------------------------- | +| 描述 | 定义了审计日志的输出位置。 | +| 类型 | String | +| 默认值 | IOTDB,LOGGER | +| 改后生效方式 | 重启服务生效 | + +- audit_log_operation + +| 名字 | audit_log_operation | +| ------------ | -------------------------------------- | +| 描述 | 定义了哪些类型的操作需要记录审计日志。 | +| 类型 | String | +| 默认值 | DML,DDL,QUERY | +| 改后生效方式 | 重启服务生效 | + +- enable_audit_log_for_native_insert_api + +| 名字 | enable_audit_log_for_native_insert_api | +| ------------ | -------------------------------------- | +| 描述 | 用于控制本地写入API是否记录审计日志。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.35 白名单配置 +- enable_white_list + +| 名字 | enable_white_list | +| ------------ | ----------------- | +| 描述 | 是否启用白名单。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +### 3.36 IoTDB-AI 配置 + +- model_inference_execution_thread_count + +| 名字 | model_inference_execution_thread_count | +| ------------ | -------------------------------------- | +| 描述 | 用于模型推理操作的线程数。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +### 3.37 TsFile 主动监听&加载功能配置 + +- load_clean_up_task_execution_delay_time_seconds + +| 名字 | load_clean_up_task_execution_delay_time_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在加载TsFile失败后,系统将等待多长时间才会执行清理任务来清除这些未成功加载的TsFile。 | +| 类型 | int | +| 默认值 | 1800 | +| 改后生效方式 | 热加载 | + +- load_write_throughput_bytes_per_second + +| 名字 | load_write_throughput_bytes_per_second | +| ------------ | -------------------------------------- | +| 描述 | 加载TsFile时磁盘写入的最大字节数每秒。 | +| 类型 | int | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- load_active_listening_enable + +| 名字 | load_active_listening_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否开启 DataNode 主动监听并且加载 tsfile 的功能(默认开启)。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- load_active_listening_dirs + +| 名字 | load_active_listening_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | 需要监听的目录(自动包括目录中的子目录),如有多个使用 “,“ 隔开默认的目录为 ext/load/pending(支持热装载)。 | +| 类型 | String | +| 默认值 | ext/load/pending | +| 改后生效方式 | 热加载 | + +- load_active_listening_fail_dir + +| 名字 | load_active_listening_fail_dir | +| ------------ | ---------------------------------------------------------- | +| 描述 | 执行加载 tsfile 文件失败后将文件转存的目录,只能配置一个。 | +| 类型 | String | +| 默认值 | ext/load/failed | +| 改后生效方式 | 热加载 | + +- load_active_listening_max_thread_num + +| 名字 | load_active_listening_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | 同时执行加载 tsfile 任务的最大线程数,参数被注释掉时的默值为 max(1, CPU 核心数 / 2),当用户设置的值不在这个区间[1, CPU核心数 /2]内时,会设置为默认值 (1, CPU 核心数 / 2)。 | +| 类型 | Long | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- load_active_listening_check_interval_seconds + +| 名字 | load_active_listening_check_interval_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 主动监听轮询间隔,单位秒。主动监听 tsfile 的功能是通过轮询检查文件夹实现的。该配置指定了两次检查 load_active_listening_dirs 的时间间隔,每次检查完成 load_active_listening_check_interval_seconds 秒后,会执行下一次检查。当用户设置的轮询间隔小于 1 时,会被设置为默认值 5 秒。 | +| 类型 | Long | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + + +* last_cache_operation_on_load + +|名字| last_cache_operation_on_load | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 当成功加载一个 TsFile 时,对 LastCache 执行的操作。`UPDATE`:使用 TsFile 中的数据更新 LastCache;`UPDATE_NO_BLOB`:与 UPDATE 类似,但会使 blob 序列的 LastCache 失效;`CLEAN_DEVICE`:使 TsFile 中包含的设备的 LastCache 失效;`CLEAN_ALL`:清空整个 LastCache。 | +|类型| String | +|默认值| UPDATE_NO_BLOB | +|改后生效方式| 重启后生效 | + +* cache_last_values_for_load + +|名字| cache_last_values_for_load | +|:---:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 在加载 TsFile 之前是否缓存最新值(last values)。仅在 `last_cache_operation_on_load=UPDATE_NO_BLOB` 或 `last_cache_operation_on_load=UPDATE` 时生效。当设置为 true 时,即使 `last_cache_operation_on_load=UPDATE`,也会忽略 blob 序列。启用此选项会在加载 TsFile 期间增加内存占用。 | +|类型| Boolean | +|默认值| true | +|改后生效方式| 重启后生效 | + +* cache_last_values_memory_budget_in_byte + +|名字| cache_last_values_memory_budget_in_byte | +|:---:|:----------------------------------------------------------------------------------------------------| +|描述| 当 `cache_last_values_for_load=true` 时,用于缓存最新值的最大内存大小(以字节为单位)。如果超过该值,缓存的值将被丢弃,并以流式方式直接从 TsFile 中读取最新值。 | +|类型| int32 | +|默认值| 4194304 | +|改后生效方式| 重启后生效 | + + +### 3.38 分发重试配置 + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在遇到未知错误时,写请求远程分发的最大重试时间,单位是毫秒。 | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 热加载 | + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | -------------------------------- | +| 描述 | 用于控制是否对未知错误进行重试。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function.md index 16a9bc3f0..46a8529cd 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function.md @@ -1,3 +1,6 @@ +--- +redirectTo: Basis-Function_apache.html +--- - -# 基础函数 - -## 1. 比较函数和运算符 - -### 1.1 基本比较运算符 - -比较运算符用于比较两个值,并返回比较结果(true或false)。 - -| 运算符 | 描述 | -| ------ | ---------- | -| < | 小于 | -| > | 大于 | -| <= | 小于或等于 | -| >= | 大于或等于 | -| = | 等于 | -| <> | 不等于 | -| != | 不等于 | - -#### 1.1.1 比较规则: - -1. 所有类型都可以与自身进行比较 -2. 数值类型(INT32, INT64, FLOAT, DOUBLE, TIMESTAMP)之间可以相互比较 -3. 字符类型(STRING, TEXT)之间也可以相互比较 -4. 除上述规则外的类型进行比较时,均会报错。 - -### 1.2 BETWEEN 运算符 - -1. `BETWEEN` 操作符用于判断一个值是否在指定的范围内。 -2. `NOT BETWEEN`操作符用于判断一个值是否不在指定范围内。 -3. `BETWEEN` 和 `NOT BETWEEN` 操作符可用于评估任何可排序的类型。 -4. `BETWEEN` 和 `NOT BETWEEN` 的值、最小值和最大值参数必须是同一类型,否则会报错。 - -**语法**: - -```SQL - value BETWEEN min AND max: - value NOT BETWEEN min AND max: -``` - -示例 1 :BETWEEN - -```SQL --- 查询 temperature 在 85.0 和 90.0 之间的记录 -SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; -``` - -示例 2 :NOT BETWEEN - -```SQL -3-- 查询 humidity 不在 35.0 和 40.0 之间的记录 -SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; -``` - -### 1.3 IS NULL 运算符 - -1. `IS NULL` 和 `IS NOT NULL` 运算符用于判断一个值是否为 NULL。 -2. 这两个运算符适用于所有数据类型。 - -示例1:查询 temperature 为 NULL 的记录 - -```SQL -SELECT * FROM table1 WHERE temperature IS NULL; -``` - -示例2:查询 humidity 不为 NULL 的记录 - -```SQL -SELECT * FROM table1 WHERE humidity IS NOT NULL; -``` - -### 1.4 IN 运算符 - -1. `IN` 操作符可用于 `WHERE` 子句中,比较一列中的一些值。 -2. 这些值可以由静态数组、标量表达式。 - -**语法:** - -```SQL -... WHERE column [NOT] IN ('value1','value2', expression1) -``` - -示例 1:静态数组:查询 region 为 '北京' 或 '上海' 的记录 - -```SQL -SELECT * FROM table1 WHERE region IN ('北京', '上海'); ---等价于 -SELECT * FROM region WHERE name = '北京' OR name = '上海'; -``` - -示例 2:标量表达式:查询 temperature 在特定值中的记录 - -```SQL -SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); -``` - -示例 3:查询 region 不为 '北京' 或 '上海' 的记录 - -```SQL -SELECT * FROM table1 WHERE region NOT IN ('北京', '上海'); -``` - -### 1.5 GREATEST 和 LEAST - -`Greatest` 函数用于返回参数列表中的最大值,`Least` 函数用于返回参数列表中的最小值,返回数据类型与输入类型相同。 -1. 空值处理:若所有参数均为 NULL,则返回 NULL。 -2. 参数要求:必须提供 至少 2 个参数。 -3. 类型约束:仅支持 相同数据类型 的参数比较。 -4. 支持类型: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` - -**语法:** - -```sql - greatest(value1, value2, ..., valueN) - least(value1, value2, ..., valueN) -``` - -**示例:** - -```sql --- 查询 table2 中 temperature 和 humidity 的最大记录 -SELECT GREATEST(temperature,humidity) FROM table2; - --- 查询 table2 中 temperature 和 humidity 的最小记录 -SELECT LEAST(temperature,humidity) FROM table2; -``` - - -## 2. 聚合函数 - -### 2.1 概述 - -1. 聚合函数是多对一函数。它们对一组值进行聚合计算,得到单个聚合结果。 -2. 除了 `COUNT()`之外,其他所有聚合函数都忽略空值,并在没有输入行或所有值为空时返回空值。 例如,`SUM()` 返回 null 而不是零,而 `AVG()` 在计数中不包括 null 值。 - -### 2.2 支持的聚合函数 - -| 函数名 | 功能描述 | 允许的输入类型 | 输出类型 | -|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|------------------| -| COUNT | 计算数据点数。 | 所有类型 | INT64 | -| COUNT_IF | COUNT_IF(exp) 用于统计满足指定布尔表达式的记录行数 | exp 必须是一个布尔类型的表达式,例如 count_if(temperature>20) | INT64 | -| APPROX_COUNT_DISTINCT | APPROX_COUNT_DISTINCT(x[,maxStandardError]) 函数提供 COUNT(DISTINCT x) 的近似值,返回不同输入值的近似个数。 | `x`:待计算列,支持所有类型;
`maxStandardError`:指定该函数应产生的最大标准误差,取值范围[0.0040625, 0.26],未指定值时默认0.023。 | INT64 | -| APPROX_MOST_FREQUENT | APPROX_MOST_FREQUENT(x, k, capacity) 函数用于近似计算数据集中出现频率最高的前 k 个元素。它返回一个JSON 格式的字符串,其中键是该元素的值,值是该元素对应的近似频率。(V 2.0.5.1 及以后版本支持) | `x`:待计算列,支持 IoTDB 现有所有的数据类型;
`k`:返回出现频率最高的 k 个值;
`capacity`: 用于计算的桶的数量,跟内存占用相关:其值越大误差越小,但占用内存更大,反之capacity值越小误差越大,但占用内存更小。 | STRING | -| SUM | 求和。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| AVG | 求平均值。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| MAX | 求最大值。 | 所有类型 | 与输入类型一致 | -| MIN | 求最小值。 | 所有类型 | 与输入类型一致 | -| FIRST | 求时间戳最小且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | -| LAST | 求时间戳最大且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | -| STDDEV | STDDEV_SAMP 的别名,求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_POP | 求总体标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_SAMP | 求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VARIANCE | VAR_SAMP 的别名,求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_POP | 求总体方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_SAMP | 求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| EXTREME | 求具有最大绝对值的值。如果正值和负值的最大绝对值相等,则返回正值。 | INT32 INT64 FLOAT DOUBLE | 与输入类型一致 | -| MODE | 求众数。注意: 1.输入序列的不同值个数过多时会有内存异常风险; 2.如果所有元素出现的频次相同,即没有众数,则随机返回一个元素; 3.如果有多个众数,则随机返回一个众数; 4. NULL 值也会被统计频次,所以即使输入序列的值不全为 NULL,最终结果也可能为 NULL。 | 所有类型 | 与输入类型一致 | -| MAX_BY | MAX_BY(x, y) 求二元输入 x 和 y 在 y 最大时对应的 x 的值。MAX_BY(time, x) 返回 x 取最大值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | -| MIN_BY | MIN_BY(x, y) 求二元输入 x 和 y 在 y 最小时对应的 x 的值。MIN_BY(time, x) 返回 x 取最小值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | -| FIRST_BY | FIRST_BY(x, y) 求当 y 为第一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | -| LAST_BY | LAST_BY(x, y) 求当 y 为最后一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | - - -### 2.3 示例 - -#### 2.3.1 示例数据 - -在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 - -#### 2.3.2 Count - -统计的是整张表的行数和 `temperature` 列非 NULL 值的数量。 - -```SQL -IoTDB> select count(*), count(temperature) from table1; -``` - -执行结果如下: - -> 注意:只有COUNT函数可以与*一起使用,否则将抛出错误。 - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 18| 12| -+-----+-----+ -Total line number = 1 -It costs 0.834s -``` - - -#### 2.3.3 Count_if - -统计 `table2` 中 到达时间 `arrival_time` 不是 `null` 的记录行数。 - -```sql -IoTDB> select count_if(arrival_time is not null) from table2; -``` - -执行结果如下: - -```sql -+-----+ -|_col0| -+-----+ -| 4| -+-----+ -Total line number = 1 -It costs 0.047s -``` - -#### 2.3.4 Approx_count_distinct - -查询 `table1` 中 `temperature` 列不同值的个数。 - -```sql -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; -``` - -执行结果如下: - -```sql -+------+------+ -|origin|approx| -+------+------+ -| 3| 3| -+------+------+ -Total line number = 1 -It costs 0.022s -``` - -#### 2.3.5 Approx_most_frequent - -查询 `table1` 中 `temperature` 列出现频次最高的2个值 - -```sql -IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; -``` - -执行结果如下: - -```sql -+-------------------+ -| topk| -+-------------------+ -|{"85.0":6,"90.0":5}| -+-------------------+ -Total line number = 1 -It costs 0.064s -``` - - -#### 2.3.6 First - -查询`temperature`列、`humidity`列时间戳最小且不为 NULL 的值。 - -```SQL -IoTDB> select first(temperature), first(humidity) from table1; -``` - -执行结果如下: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 35.1| -+-----+-----+ -Total line number = 1 -It costs 0.170s -``` - -#### 2.3.7 Last - -查询`temperature`列、`humidity`列时间戳最大且不为 NULL 的值。 - -```SQL -IoTDB> select last(temperature), last(humidity) from table1; -``` - -执行结果如下: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 34.8| -+-----+-----+ -Total line number = 1 -It costs 0.211s -``` - -#### 2.3.8 First_by - -查询 `temperature` 列中非 NULL 且时间戳最小的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最小的行的 `humidity` 值。 - -```SQL -IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-26T13:37:00.000+08:00| 35.1| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.269s -``` - -#### 2.3.9 Last_by - -查询`temperature` 列中非 NULL 且时间戳最大的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最大的行的 `humidity` 值。 - -```SQL -IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T14:30:00.000+08:00| 34.8| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.070s -``` - -#### 2.3.10 Max_by - -查询`temperature` 列中最大值所在行的 `time` 值,以及`temperature` 列中最大值所在行的 `humidity` 值。 - -```SQL -IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T09:30:00.000+08:00| 35.2| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.172s -``` - -#### 2.3.11 Min_by - -查询`temperature` 列中最小值所在行的 `time` 值,以及`temperature` 列中最小值所在行的 `humidity` 值。 - -```SQL -select min_by(time, temperature), min_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-29T10:00:00.000+08:00| null| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.244s -``` - - -## 3. 逻辑运算符 - -### 3.1 概述 - -逻辑运算符用于组合条件或否定条件,返回布尔结果(`true` 或 `false`)。 - -以下是常用的逻辑运算符及其描述: - -| 运算符 | 描述 | 示例 | -| ------ | ----------------------------- | ------- | -| AND | 仅当两个值都为 true 时为 true | a AND b | -| OR | 任一值为 true 时为 true | a OR b | -| NOT | 当值为 false 时为 true | NOT a | - -### 3.2 NULL 对逻辑运算符的影响 - -#### 3.2.1 AND 运算符 - -- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 -- 如果 `AND` 运算符的一侧为 `FALSE`,则表达式结果为 `FALSE`。 - -示例: - -```SQL -NULL AND true -- null -NULL AND false -- false -NULL AND NULL -- null -``` - -#### 3.2.2 OR 运算符 - -- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 -- 如果 `OR` 运算符的一侧为 `TRUE`,则表达式结果为 `TRUE`。 - -示例: - -```SQL -NULL OR NULL -- null -NULL OR false -- null -NULL OR true -- true -``` - -##### 3.2.2.1 真值表 - -以下真值表展示了 `NULL` 在 `AND` 和 `OR` 运算符中的处理方式: - -| a | b | a AND b | a OR b | -| ----- | ----- | ------- | ------ | -| TRUE | TRUE | TRUE | TRUE | -| TRUE | FALSE | FALSE | TRUE | -| TRUE | NULL | NULL | TRUE | -| FALSE | TRUE | FALSE | TRUE | -| FALSE | FALSE | FALSE | FALSE | -| FALSE | NULL | FALSE | NULL | -| NULL | TRUE | NULL | TRUE | -| NULL | FALSE | FALSE | NULL | -| NULL | NULL | NULL | NULL | - -#### 3.2.3 NOT 运算符 - -NULL 的逻辑否定仍然是 NULL - -示例: - -```SQL -NOT NULL -- null -``` - -##### 3.2.3.1真值表 - -以下真值表展示了 `NULL` 在 `NOT` 运算符中的处理方式: - -| a | NOT a | -| ----- | ----- | -| TRUE | FALSE | -| FALSE | TRUE | -| NULL | NULL | - - -## 4. 日期和时间函数和运算符 - -### 4.1 now() -> Timestamp - -返回当前时间的时间戳。 - -### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp - -`date_bin` 函数是一种用于处理时间数据的函数,作用是将一个时间戳(Timestamp)舍入到指定的时间间隔(interval)的边界上。 - -**语法:** - -```SQL --- 从时间戳为 0 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 -date_bin(interval,source) - --- 从时间戳为 origin 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 -date_bin(interval,source,origin) - --- interval支持的时间单位有: --- 年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 --- source必须为时间戳类型。 -``` - -**参数:** - -| 参数 | 含义 | -| -------- | ------------------------------------------------------------ | -| interval | 时间间隔支持的时间单位有:年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 | -| source | 待计算时间列,也可以是表达式。必须为时间戳类型。 | -| origin | 起始时间戳 | - -#### 4.2.1 语法约定: - -1. 不传入 `origin` 时,起始时间戳从 1970-01-01T00:00:00Z 开始计算(北京时间为 1970-01-01 08:00:00)。 -2. `interval` 为一个非负数,且必须带上时间单位。`interval` 为 0ms 时,不进行计算,直接返回 `source`。 -3. 当传入 `origin` 或 `source` 为负时,表示纪元时间之前的某个时间点,`date_bin` 会正常计算并返回与该时间点相关的时间段。 -4. 如果 `source` 中的值为 `null`,则返回 `null`。 -5. 不支持月份和非月份时间单位混用,例如 `1 MONTH 1 DAY`,这种时间间隔有歧义。 - -> 假设是起始时间是 2000 年 4 月 30 日进行计算,那么在一个时间间隔后,如果是先算 DAY再算MONTH,则会得到 2000 年 6 月 1 日,如果先算 MONTH 再算 DAY 则会得到 2000 年 5 月 31 日,二者得出的时间日期不同。 - -#### 4.2.2 示例 - -##### 示例数据 - -在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 - -示例 1:不指定起始时间戳 - -```SQL -SELECT - time, - date_bin(1h,time) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.683s -``` - -示例 2:指定起始时间戳 - -```SQL -SELECT - time, - date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.056s -``` - -示例 3:`origin` 为负数的情况 - -```SQL -SELECT - time, - date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.203s -``` - -示例 4:`interval` 为 0 的情况 - -```SQL -SELECT - time, - date_bin(0ms, time) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.107s -``` - -示例 5:`source` 为 null 的情况 - -```SQL -SELECT - arrival_time, - date_bin(1h,arrival_time) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| arrival_time| time_bin| -+-----------------------------+-----------------------------+ -| null| null| -|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -| null| null| -|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| -| null| null| -|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.319s -``` - -### 4.3 Extract 函数 - -该函数用于提取日期对应部分的值。(V2.0.6 版本起支持) - -#### 4.3.1 语法定义 - -```SQL -EXTRACT (identifier FROM expression) -``` -* 参数说明 - * **expression**: `TIMESTAMP` 类型或时间常量 - * **identifier** :取值范围及对应的返回值见下表 - - | 取值范围 | 返回值类型 | 返回值范围 | - | -------------------------- | ------------- | ------------- | - | `YEAR` | `INT64` | `/` | - | `QUARTER` | `INT64` | `1-4` | - | `MONTH` | `INT64` | `1-12` | - | `WEEK` | `INT64` | `1-53` | - | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | - | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | - | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | - | `HOUR` | `INT64` | `0-23` | - | `MINUTE` | `INT64` | `0-59` | - | `SECOND` | `INT64` | `0-59` | - | `MS` | `INT64` | `0-999` | - | `US` | `INT64` | `0-999` | - | `NS` | `INT64` | `0-999` | - - -#### 4.3.2 使用示例 - -以[示例数据](../Reference/Sample-Data.md)中的 table1 为源数据,查询某段时间每天前12个小时的温度平均值 - -```SQL -IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) -+----------+-----+ -| fmtdate|avgtp| -+----------+-----+ -|2024-11-28| 86.0| -|2024-11-29| 85.0| -|2024-11-30| 90.0| -+----------+-----+ -Total line number = 3 -It costs 0.041s -``` - -`Format` 函数介绍:[Format 函数](../SQL-Manual/Basis-Function.md#_7-2-format-函数) - -`Date_bin` 函数介绍:[Date_bin 函数](../SQL-Manual/Basis-Function.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) - - -## 5. 数学函数和运算符 - -### 5.1 数学运算符 - -| **运算符** | **描述** | -| ---------- | ------------------------ | -| + | 加法 | -| - | 减法 | -| * | 乘法 | -| / | 除法(整数除法执行截断) | -| % | 模(余数) | -| - | 取反 | - -### 5.2 数学函数 - -| 函数名 | 描述 | 输入 | 输出 | 用法 | -|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------| ---------------------- | ---------- | -| sin | 正弦函数 | double、float、INT64、INT32 | double | sin(x) | -| cos | 余弦函数 | double、float、INT64、INT32 | double | cos(x) | -| tan | 正切函数 | double、float、INT64、INT32 | double | tan(x) | -| asin | 反正弦函数 | double、float、INT64、INT32 | double | asin(x) | -| acos | 反余弦函数 | double、float、INT64、INT32 | double | acos(x) | -| atan | 反正切函数 | double、float、INT64、INT32 | double | atan(x) | -| sinh | 双曲正弦函数 | double、float、INT64、INT32 | double | sinh(x) | -| cosh | 双曲余弦函数 | double、float、INT64、INT32 | double | cosh(x) | -| tanh | 双曲正切函数 | double、float、INT64、INT32 | double | tanh(x) | -| degrees | 将弧度角 x 转换为度 | double、float、INT64、INT32 | double | degrees(x) | -| radians | 将度转换为弧度 | double、float、INT64、INT32 | double | radians(x) | -| abs | 绝对值 | double、float、INT64、INT32 | 返回与输入类型相同的值 | abs(x) | -| sign | 返回 x 的符号函数,即:如果参数为 0,则返回 0,如果参数大于 0,则返回 1,如果参数小于 0,则返回 -1。对于 double/float 类型的参数,函数还会返回:如果参数为 NaN,则返回 NaN,如果参数为 +Infinity,则返回 1.0,如果参数为 -Infinity,则返回 -1.0。 | double、float、INT64、INT32 | 返回与输入类型相同的值 | sign(x) | -| ceil | 返回 x 向上取整到最近的整数。 | double、float、INT64、INT32 | double | ceil(x) | -| floor | 返回 x 向下取整到最近的整数。 | double、float、INT64、INT32 | double | floor(x) | -| exp | 返回欧拉数 e 的 x 次幂。 | double、float、INT64、INT32 | double | exp(x) | -| ln | 返回 x 的自然对数。 | double、float、INT64、INT32 | double | ln(x) | -| log10 | 返回 x 的以 10 为底的对数。 | double、float、INT64、INT32 | double | log10(x) | -| round | 返回 x 四舍五入到最近的整数。 | double、float、INT64、INT32 | double | round(x) | -| round | 返回 x 四舍五入到 d 位小数。 | double、float、INT64、INT32 | double | round(x, d) | -| sqrt | 返回 x 的平方根。 | double、float、INT64、INT32 | double | sqrt(x) | -| e | 自然指数 | | double | e() | -| pi | π | | double | pi() | - - -## 6. 位运算函数 - -> V 2.0.6 版本起支持 - -示例原始数据如下: - -```SQL -IoTDB:database1> select * from bit_table -+-----------------------------+---------+------+-----+ -| time|device_id|length|width| -+-----------------------------+---------+------+-----+ -|2025-10-29T15:59:42.957+08:00| d1| 14| 12| -|2025-10-29T15:58:59.399+08:00| d3| 15| 10| -|2025-10-29T15:59:32.769+08:00| d2| 13| 12| -+-----------------------------+---------+------+-----+ - ---建表语句 -CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); - ---写入数据 -INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); -``` - -### 6.1 bit\_count(num, bits) - -`bit_count(num, bits)` 函数用于统计整数 `num`在指定位宽 `bits`下的二进制表示中 1 的个数。 - -#### 6.1.1 语法定义 - -```SQL -bit_count(num, bits) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * **​num:​**任意整型数值(int32 或者 int64) - * **​bits:​**整型数值,取值范围为2\~64 - -注意:如果 bits 位数不够表示 num,会报错(此处是​**有符号补码**​):`Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` - -* 调用方式 - * 两个具体数值:`bit_count(9, 64)` - * 列与数值:`bit_count(column1, 64)` - * 两列之间:`bit_count(column1, column2)` - -#### 6.1.2 使用示例 - -```SQL --- 两个具体数值 -IoTDB:database1> select distinct bit_count(2,8) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ --- 两个具体数值 -IoTDB:database1> select distinct bit_count(-5,8) from bit_table -+-----+ -|_col0| -+-----+ -| 7| -+-----+ ---列与数值 -IoTDB:database1> select length,bit_count(length,8) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 3| -| 15| 4| -| 13| 3| -+------+-----+ ---bits位数不够 -IoTDB:database1> select length,bit_count(length,2) from bit_table -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. -``` - -### 6.2 bitwise\_and(x, y) - -`bitwise_and(x, y)`函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑与操作,并返回其按位与(bitwise AND)的运算结果。 - -#### 6.2.1 语法定义 - -```SQL -bitwise_and(x, y) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 两个具体数值:`bitwise_and(19, 25)` - * 列与数值:`bitwise_and(column1, 25)` - * 两列之间:`bitwise_and(column1, column2)` - -#### 6.2.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_and(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 17| -+-----+ ---列与数值 -IoTDB:database1> select length, bitwise_and(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 8| -| 15| 9| -| 13| 9| -+------+-----+ ---俩列之间 -IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 12| -| 15| 10| 10| -| 13| 12| 12| -+------+-----+-----+ -``` - -### 6.3 bitwise\_not(x) - -`bitwise_not(x)` 函数基于二进制补码表示法,对整数 x 的每一位进行逻辑非操作,并返回其按位取反(bitwise NOT)的运算结果。 - -#### 6.3.1 语法定义 - -```SQL -bitwise_not(x) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 具体数值:`bitwise_not(5)` - * 单列操作:`bitwise_not(column1)` - -#### 6.3.2 使用示例 - -```SQL --- 具体数值 -IoTDB:database1> select distinct bitwise_not(5) from bit_table -+-----+ -|_col0| -+-----+ -| -6| -+-----+ --- 单列 -IoTDB:database1> select length, bitwise_not(length) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| -15| -| 15| -16| -| 13| -14| -+------+-----+ -``` - -### 6.4 bitwise\_or(x, y) - -`bitwise_or(x,y)` 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑或操作,并返回其按位或(bitwise OR)的运算结果。 - -#### 6.4.1 语法定义 - -```SQL -bitwise_or(x, y) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 两个具体数值:`bitwise_or(19, 25)` - * 列与数值:`bitwise_or(column1, 25)` - * 两列之间:`bitwise_or(column1, column2)` - -#### 6.4.2 使用示例 - -```SQL --- 两个具体数值 -IoTDB:database1> select distinct bitwise_or(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 27| -+-----+ --- 列与数值 -IoTDB:database1> select length,bitwise_or(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 31| -| 15| 31| -| 13| 29| -+------+-----+ --- 两列之间 -IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 14| -| 15| 10| 15| -| 13| 12| 13| -+------+-----+-----+ -``` - -### 6.5 bitwise\_xor(x, y) - -bitwise\_xor(x,y) 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑异或操作,并返回其按位异或(bitwise XOR)的运算结果。异或规则:相同为0,不同为1。 - -#### 6.5.1 语法定义 - -```SQL -bitwise_xor(x, y) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 两个具体数值:`bitwise_xor(19, 25)` - * 列与数值:`bitwise_xor(column1, 25)` - * 两列之间:`bitwise_xor(column1, column2)` - -#### 6.5.2 使用示例 - -```SQL --- 两个具体数值 -IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 10| -+-----+ --- 列与数值 -IoTDB:database1> select length,bitwise_xor(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 23| -| 15| 22| -| 13| 20| -+------+-----+ --- 两列之间 -IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 2| -| 15| 10| 5| -| 13| 12| 1| -+------+-----+-----+ -``` - -### 6.6 bitwise\_left\_shift(value, shift) - -`bitwise_left_shift(value, shift)` 函数返回将整数 `value`的二进制表示左移 `shift`位后的结果。左移操作将二进制位向高位方向移动,右侧空出的位用 0 填充,左侧溢出的位直接丢弃。等价于: `value << shift`。 - -#### 6.6.1 语法定义 - -```SQL -bitwise_left_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 -``` - -* 参数说明 - * ​**value**​: 要左移的整数值,必须是 Int32 或 Int64 数据类型 - * ​**shift**​: 左移的位数,必须是 Int32 或 Int64 数据类型 -* 调用方式 - * 两个具体数值:`bitwise_left_shift(1, 2)` - * 列与数值:`bitwise_left_shift(column1, 2)` - * 两列之间:`bitwise_left_shift(column1, column2)` - -#### 6.6.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table -+-----+ -|_col0| -+-----+ -| 4| -+-----+ --- 列与数值 -IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 56| -| 15| 60| -| 13| 52| -+------+-----+ --- 两列之间 -IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - -### 6.7 bitwise\_right\_shift(value, shift) - -`bitwise_right_shift(value, shift)`函数返回将整数 `value`的二进制表示逻辑右移(无符号右移) `shift`位后的结果。逻辑右移操作将二进制位向低位方向移动,左侧空出的高位用 0 填充,右侧溢出的低位直接丢弃。 - -#### 6.7.1 语法定义 - -```SQL -bitwise_right_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 -``` - -* 参数说明 - * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 - * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 -* 调用方式 - * 两个具体数值:`bitwise_right_shift(8, 3)` - * 列与数值:`bitwise_right_shift(column1, 3)` - * 两列之间:`bitwise_right_shift(column1, column2)` - -#### 6.7.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ ---列与数值 -IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---两列之间 -IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -``` - -### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) - -`bitwise_right_shift_arithmetic(value, shift)`函数返回将整数 `value`的二进制表示算术右移 `shift`位后的结果。算术右移操作将二进制位向低位方向移动,右侧溢出的低位直接丢弃,左侧空出的高位用符号位填充(正数补0,负数补1),以保持数值的符号不变。 - -#### 6.8.1 语法定义 - -```SQL -bitwise_right_shift_arithmetic(value, shift) -> [same as value]--返回结果类型与value数据类型相同 -``` - -* 参数说明 - * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 - * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 -* 调用方式: - * 两个具体数值:`bitwise_right_shift_arithmetic(12, 2)` - * 列与数值:`bitwise_right_shift_arithmetic(column1, 64)` - * 两列之间:`bitwise_right_shift_arithmetic(column1, column2)` - -#### 6.8.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table -+-----+ -|_col0| -+-----+ -| 3| -+-----+ --- 列与数值 -IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---两列之间 -IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - -## 7. 条件表达式 - -### 7.1 CASE 表达式 - -CASE 表达式有两种形式:简单形式、搜索形式 - -#### 7.1.1 简单形式 - -简单形式从左到右搜索每个值表达式,直到找到一个与表达式相等的值: - -```SQL -CASE expression - WHEN value THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -如果找到匹配的值,则返回相应的结果。如果没有找到匹配项,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: - -```SQL -SELECT a, - CASE a - WHEN 1 THEN 'one' - WHEN 2 THEN 'two' - ELSE 'many' - END -``` - -#### 7.1.2 搜索形式 - -搜索形式从左到右评估每个布尔条件,直到找到一个为真的条件,并返回相应的结果: - -```SQL -CASE - WHEN condition THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -如果没有条件为真,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: - -```SQL -SELECT a, b, - CASE - WHEN a = 1 THEN 'aaa' - WHEN b = 2 THEN 'bbb' - ELSE 'ccc' - END -``` - -### 7.2 COALESCE 函数 - -返回参数列表中的第一个非空值。 - -```SQL -coalesce(value1, value2[, ...]) -``` - -## 8. 转换函数 - -### 8.1 转换函数 - -#### 8.1.1 cast(value AS type) → type - -1. 显式地将一个值转换为指定类型。 -2. 可以用于将字符串(varchar)转换为数值类型,或数值转换为字符串类型,V2.0.8-beta 版本起支持 OBJECT 类型强转成 STRING 类型。 -3. 如果转换失败,将抛出运行时错误。 - -示例: - -```SQL -SELECT * - FROM table1 - WHERE CAST(time AS DATE) - IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); -``` - -#### 8.1.2 try_cast(value AS type) → type - -1. 与 `cast()` 类似。 -2. 如果转换失败,则返回 `null`。 - -示例: - -```SQL -SELECT * - FROM table1 - WHERE try_cast(time AS DATE) - IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); -``` - -### 8.2 Format 函数 -该函数基于指定的格式字符串与输入参数,生成并返回格式化后的字符串输出。其功能与 Java 语言中的`String.format` 方法及 C 语言中的`printf`函数相类似,支持开发者通过占位符语法构建动态字符串模板,其中预设的格式标识符将被传入的对应参数值精准替换,最终形成符合特定格式要求的完整字符串。 - -#### 8.2.1 语法介绍 - -```SQL -format(pattern,...args) -> String -``` - -**参数定义** - -* `pattern`: 格式字符串,可包含静态文本及一个或多个格式说明符(如 `%s`, `%d` 等),或任意返回类型为 `STRING/TEXT` 的表达式。 -* `args`: 用于替换格式说明符的输入参数。需满足以下条件: - * 参数数量 ≥ 1 - * 若存在多个参数,以逗号`,`分隔(如 `arg1,arg2`) - * 参数总数可多于 `pattern` 中的占位符数量,但不可少于,否则触发异常 - -**返回值** - -* 类型为 `STRING` 的格式化结果字符串 - -#### 8.2.2 使用示例 - -1. 格式化浮点数 - -```SQL -IoTDB:database1> select format('%.5f',humidity) from table1 where humidity = 35.4 -+--------+ -| _col0| -+--------+ -|35.40000| -+--------+ -``` - -2. 格式化整数 - -```SQL -IoTDB:database1> select format('%03d',8) from table1 limit 1 -+-----+ -|_col0| -+-----+ -| 008| -+-----+ -``` - -3. 格式化日期和时间戳 - -* Locale-specific日期 - -```SQL -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) from table1 limit 1 -+--------------------+ -| _col0| -+--------------------+ -|星期一, 一月 1, 2024| -+--------------------+ -``` - -* 去除时区信息 - -```SQL -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 -+-----------------------+ -| _col0| -+-----------------------+ -|2024-01-01 00:00:00.000| -+-----------------------+ -``` - -* 获取秒级时间戳精度 - -```SQL -IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 -+-------------------+ -| _col0| -+-------------------+ -|2024-01-01 00:00:00| -+-------------------+ -``` - -* 日期符号说明如下 - -| **符号** | **​ 描述** | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 'H' | 24 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 00 - 23。 | -| 'I' | 12 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 01 - 12。 | -| 'k' | 24 小时制的小时数,i.e. 0 - 23。 | -| 'l' | 12 小时制的小时数,i.e. 1 - 12。 | -| 'M' | 小时内的分钟,格式为两位数,必要时加上前导零,i.e. 00 - 59。 | -| 'S' | 分钟内的秒数,格式为两位数,必要时加上前导零,i.e. 00 - 60(“60 ”是支持闰秒所需的特殊值)。 | -| 'L' | 秒内毫秒,格式为三位数,必要时加前导零,i.e. 000 - 999。 | -| 'N' | 秒内的纳秒,格式为九位数,必要时加前导零,i.e. 000000000 - 999999999。 | -| 'p' | 当地特定的[上午或下午](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getAmPmStrings())标记,小写,如 “am ”或 “pm”。使用转换前缀 “T ”会强制输出为大写。 | -| 'z' | 从格林尼治标准时间偏移的[RFC 822](http://www.ietf.org/rfc/rfc0822.txt)式数字时区,例如 -0800。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是 Java 虚拟机此实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。 | -| 'Z' | 表示时区缩写的字符串。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是此 Java 虚拟机实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。Formatter 的时区将取代参数的时区(如果有)。 | -| 's' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的秒数,i.e. Long.MIN\_VALUE/1000 至 Long.MAX\_VALUE/1000。 | -| 'Q' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的毫秒数,i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | - -* 用于格式化常见的日期/时间组成的转换字符说明如下 - -| **符号** | **描述** | -| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 'B' | 特定于区域设置[的完整月份名称](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getMonths()),例如 “January”、“February”。 | -| 'b' | 当地特定月份的缩写名称,如"1 月"、"2 月"。 | -| 'h' | 与"b "相同。 | -| 'A' | 一周中某一天在当地的全称,如"星期日"、"星期一"。 | -| 'a' | 当地特有的星期简短名称,例如"星期日"、"星期一 | -| 'C' | 四位数年份除以100,格式为两位数,必要时加上前导零,即00 - 99 | -| 'Y' | 年份,格式为至少四位数,必要时加上前导零,例如0092相当于公历92年。 | -| 'y' | 年份的最后两位数,格式为必要的前导零,即00 - 99。 | -| 'j' | 年号,格式为三位数,必要时加前导零,例如公历为001 - 366。 | -| 'm' | 月份,格式为两位数,必要时加前导零,即01 - 13。 | -| 'd' | 月日,格式为两位数,必要时加前导零,即01 - 31 | -| 'e' | 月日,格式为两位数,即1 - 31。 | - -4. 格式化字符串 - -```SQL -IoTDB:database1> SELECT format('The measurement status is :%s',status) FROM table2 limit 1 -+-------------------------------+ -| _col0| -+-------------------------------+ -|The measurement status is :true| -+-------------------------------+ -``` - -5. 格式化百分号 - -```SQL -IoTDB:database1> SELECT format('%s%%', 99.9) from table1 limit 1 -+-----+ -|_col0| -+-----+ -|99.9%| -+-----+ -``` - -#### 8.2.3 **格式转换失败场景说明** - -1. 类型不匹配错误 - -* 时间戳类型冲突 若格式说明符中包含时间相关标记(如 `%Y-%m-%d`),但参数提供: - * 非 `DATE`/`TIMESTAMP` 类型值 - * 或涉及日期细粒度单位(如 `%H` 小时、`%M` 分钟)时,参数仅支持 `TIMESTAMP` 类型,否则将抛出类型异常 - -```SQL --- 示例1 -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) - --- 示例2 -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) -``` - -* 浮点数类型冲突 若使用 `%f` 等浮点格式说明符,但参数提供非数值类型(如字符串、布尔值),将触发类型转换错误 - -```SQL -IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) -``` - -2. 参数数量不匹配错误 - -* 实际提供的参数数量 必须等于或大于 格式字符串中格式说明符的数量 -* 若参数数量少于格式说明符数量,将抛出 `ArgumentCountMismatch` 异常 - -```SQL -IoTDB:database1> select format('%.5f %03d', humidity) from table1 where humidity = 35.4 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') -``` - -3. 无效调用错误 - -* 当函数参数满足以下任一条件时,视为非法调用: - * 参数总数 小于 2(必须包含格式字符串及至少一个参数) - * 格式字符串(`pattern`)类型非 `STRING/TEXT` - -```SQL --- 示例1 -IoTDB:database1> select format('%s') from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. - ---示例2 -IoTDB:database1> select format(123, humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. -``` - - - -## 9. 字符串函数和操作符 - -### 9.1 字符串操作符 - -#### 9.1.1 || 操作符 - -`||` 操作符用于字符串连接,功能与 `concat` 函数相同。 - -#### 9.1.2 LIKE 语句 - -`LIKE` 语句用于模式匹配,具体用法在[模式匹配:LIKE](#1-like-运算符) 中有详细文档。 - -### 9.2 字符串函数 - -| 函数名 | 描述 | 输入 | 输出 | 用法 | -| ----------- |---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------ | ------------------------------------------------------------ | -| length | 返回字符串的字符长度,而不是字符数组的长度。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | INT32 | length(string) | -| upper | 将字符串中的字母转换为大写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | upper(string) | -| lower | 将字符串中的字母转换为小写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | lower(string) | -| trim | 从源字符串中删除指定的开头和/或结尾字符。 | 支持三个参数**specification(可选)**:指定从哪边去掉字符,可以是:`BOTH`:两边都去掉(默认)。`LEADING`:只去掉开头的字符。`TRAILING`:只去掉结尾的字符。**trimcharacter(可选)**:要去掉的字符,如果没指定,默认去掉空格。**string**:要处理的字符串。 | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) 示例:`trim('!' FROM '!foo!');` —— `'foo'` | -| strpos | 返回子字符串在字符串中第一次出现的起始位置。位置从 1 开始计数。如果未找到,返回 0。注意:起始位置是基于字符而不是字节数组确定的。 | 仅支持两个参数,类型可以是字符串或文本。**sourceStr**:要搜索的字符串。**subStr**:要找的子字符串。 | INT32 | strpos(sourceStr, subStr) | -| starts_with | 测试子字符串是否是字符串的前缀。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串,类型可以是字符串或文本。**prefix**:前缀子字符串,类型可以是字符串或文本。 | Boolean | starts_with(sourceStr, prefix) | -| ends_with | 测试字符串是否以指定的后缀结束。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串。**suffix**:后缀子字符串。 | Boolean | ends_with(sourceStr, suffix) | -| concat | 返回字符串 `string1`、`string2`、...、`stringN` 的连接结果。功能与连接操作符 `\|\|` 相同。 | 至少两个参数,所有参数类型必须是字符串或文本。 | String | concat(str1, str2, ...) 或 str1 \|\| str2 ... | -| strcmp | 比较两个字符串的字母序。 | 支持两个参数,两个参数类型必须是字符串或文本。**string1**:第一个要比较的字符串。**string2**:第二个要比较的字符串。 | 返回一个整数值INT32如果 `str1 < str2`,返回 `-1`如果 `str1 = str2`,返回 `0`如果 `str1 > str2`,返回 `1`如果 `str1` 或 `str2` 为 `NULL`,返回 `NULL` | strcmp(str1, str2) | -| replace | 从字符串中删除所有 `search` 的实例。 | 支持两个参数,可以是字符串或文本类型。**string**:原始字符串,要从中删除内容的字符串。**search**:要删除的子字符串。 | String | replace(string, string) | -| replace | 将字符串中所有 `search` 的实例替换为 `replace`。 | 支持三个参数,可以是字符串或文本类型。**string**:原始字符串,要从中替换内容的字符串。**search**:要替换掉的子字符串。**replace**:用来替换的新字符串。 | String | replace(string, string, string) | -| substring | 从指定位置提取字符到字符串末尾。需要注意的是,起始位置是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持两个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。 | String:返回一个字符串,从 `start_index` 位置开始到字符串末尾的所有字符。**注意事项**:`start_index` 从 1 开始,即数组的第 0 个位置是 1参数为 null时,返回 `null`start_index 大于字符串长度时,结果报错。 | substring(string from start_index)或 substring(string, start_index) | -| substring | 从一个字符串中提取从指定位置开始、指定长度的子字符串注意:起始位置和长度是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持三个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。**length**:要提取的子字符串的长度。 | String:返回一个字符串,从 `start_index` 位置开始,提取 `length` 个字符。**注意事项**:参数为 null时,返回 `null`如果 `start_index` 大于字符串的长度,结果报错。如果 `length` 小于 0,结果报错。极端情况,`start_index + length` 超过 `int.MAX` 并变成负数,将导致异常结果。 | substring(string from start_index for length) 或 substring(string, start_index, length) | - -## 10. 模式匹配函数 - -### 10.1 LIKE 运算符 - -#### 10.1.1 用途 - -`LIKE` 运算符用于将值与模式进行比较。它通常用于 `WHERE` 子句中,用于匹配字符串中的特定模式。 - -#### 10.1.2 语法 - -```SQL -... column [NOT] LIKE 'pattern' ESCAPE 'character'; -``` - -#### 10.1.3 匹配规则 - -- 匹配字符是区分大小写的。 -- 模式支持两个匹配符号: - - `_`:匹配任意单个字符。 - - `%`:匹配0个或多个字符。 - -#### 10.1.4 注意事项 - -- `LIKE` 模式匹配总是覆盖整个字符串。如果需要匹配字符串中的任意位置,模式必须以 `%` 开头和结尾。 -- 如果需要匹配 `%` 或 `_` 作为普通字符,必须使用转义字符。 - -#### 10.1.5 示例 - -示例 1:匹配以特定字符开头的字符串 - -- **说明**:查找所有以字母 `E` 开头的名称,例如 `Europe`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'E%'; -``` - -示例 2:排除特定模式 - -- **说明**:查找所有不以字母 `E` 开头的名称。 - -```SQL -SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; -``` - -示例 3:匹配特定长度的字符串 - -- **说明**:查找所有以 `A` 开头、以 `a` 结尾且中间有两个字符的名称,例如 `Asia`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'A__a'; -``` - -示例 4:转义特殊字符 - -- **说明**:查找所有以 `South_` 开头的名称。这里使用了转义字符 `\` 来转义 `_` 等特殊字符,例如`South_America`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; -``` - -示例 5:匹配转义字符本身 - -- **说明**:如果需要匹配转义字符本身,可以使用双转义字符 `\\`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; -``` - -### 10.2 regexp_like 函数 - -#### 10.2.1 用途 - -`regexp_like` 函数用于评估正则表达式模式,并确定该模式是否包含在字符串中。 - -#### 10.2.2 语法 - -```SQL -regexp_like(string, pattern); -``` - -#### 10.2.3 注意事项 - -- `regexp_like` 的模式只需包含在字符串中,而不需要匹配整个字符串。 -- 如果需要匹配整个字符串,可以使用正则表达式的锚点 `^` 和 `$`。 -- `^` 表示“字符串的开头”,`$` 表示“字符串的结尾”。 -- 正则表达式采用 Java 定义的正则语法,但存在以下需要注意的例外情况: - - **多行模式** - 1. 启用方式:`(?m)`。 - 2. 只识别`\n`作为行终止符。 - 3. 不支持`(?d)`标志,且禁止使用。 - - **不区分大小写匹配** - 1. 启用方式:`(?i)`。 - 2. 基于Unicode规则,不支持上下文相关和本地化匹配。 - 3. 不支持`(?u)`标志,且禁止使用。 - - **字符类** - 1. 在字符类(如`[A-Z123]`)中,`\Q`和`\E`不被支持,被视为普通字面量。 - - **Unicode字符类(**`\p{prop}`**)** - 1. **名称下划线**:名称中的所有下划线必须删除(如`OldItalic`而非`Old_Italic`)。 - 2. **文字(Scripts)**:直接指定,无需`Is`、`script=`或`sc=`前缀(如`\p{Hiragana}`)。 - 3. **区块(Blocks)**:必须使用`In`前缀,不支持`block=`或`blk=`前缀(如`\p{InMongolian}`)。 - 4. **类别(Categories)**:直接指定,无需`Is`、`general_category=`或`gc=`前缀(如`\p{L}`)。 - 5. **二元属性(Binary Properties)**:直接指定,无需`Is`(如`\p{NoncharacterCodePoint}`)。 - -#### 10.2.4 示例 - -示例 1:匹配包含特定模式的字符串 - -```SQL -SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true -``` - -- **说明**:检查字符串 `'1a 2b 14m'` 是否包含模式 `\d+b`。 - - `\d+` 表示“一个或多个数字”。 - - `b` 表示字母 `b`。 - - 在 `'1a 2b 14m'` 中,`2b` 符合这个模式,所以返回 `true`。 - -示例 2:匹配整个字符串 - -```SQL -SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false -``` - -- **说明**:检查字符串 `'1a 2b 14m'` 是否完全匹配模式 `^\\d+b$`。 - - `\d+` 表示“一个或多个数字”。 - - `b` 表示字母 `b`。 - - `'1a 2b 14m'` 并不符合这个模式,因为它不是从数字开始,也不是以 `b` 结束,所以返回 `false`。 - -## 11. 时序分窗函数 - -原始示例数据如下: - -```SQL -IoTDB> SELECT * FROM bid; -+-----------------------------+--------+-----+ -| time|stock_id|price| -+-----------------------------+--------+-----+ -|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+--------+-----+ - --- 创建语句 -CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); --- 插入数据 -INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); -``` - -### 11.1 HOP - -#### 11.1.1 功能描述 - -HOP 函数用于按时间分段分窗分析,识别每一行数据所属的时间窗口。该函数通过指定固定窗口大小(size)和窗口滑动步长(SLIDE),将数据按时间戳分配到所有与其时间戳重叠的窗口中。若窗口之间存在重叠(步长 < 窗口大小),数据会自动复制到多个窗口。 - -#### 11.1.2 函数定义 - -```SQL -HOP(data, timecol, size, slide[, origin]) -``` - -#### 11.1.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | --------------------------------- | -------------------- | -| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | -| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小 | -| SLIDE | 标量参数 | 长整数类型 | 窗口滑动步长 | -| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | - -#### 11.1.4 返回结果 - -HOP 函数的返回结果列包含: - -* window\_start: 窗口开始时间(闭区间) -* window\_end: 窗口结束时间(开区间) -* 映射列:DATA 参数的所有输入列 - -#### 11.1.5 使用示例 - -```SQL -IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.2 SESSION - -#### 11.2.1 功能描述 - -SESSION 函数用于按会话间隔对数据进行分窗。系统逐行检查与前一行的时间间隔,小于阈值(GAP)则归入当前窗口,超过则归入下一个窗口。 - -#### 11.2.2 函数定义 - -```SQL -SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) -``` -#### 11.2.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | -------------------------- | ---------------------------------------- | -| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | -| TIMECOL | 标量参数 | 字符串类型默认值:'time' | 时间列名 -| -| GAP | 标量参数 | 长整数类型 | 会话间隔阈值 | - -#### 11.2.4 返回结果 - -SESSION 函数的返回结果列包含: - -* window\_start: 会话窗口内的第一条数据的时间 -* window\_end: 会话窗口内的最后一条数据的时间 -* 映射列:DATA 参数的所有输入列 - -#### 11.2.5 使用示例 - -```SQL -IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY SESSION -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.3 VARIATION - -#### 11.3.1 功能描述 - -VARIATION 函数用于按数据差值分窗,将第一条数据作为首个窗口的基准值,每个数据点会与基准值进行差值运算,如果差值小于给定的阈值(delta)则加入当前窗口;如果超过阈值,则分为下一个窗口,将该值作为下一个窗口的基准值。 - -#### 11.3.2 函数定义 - -```sql -VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) -``` - -#### 11.3.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| -------- | ---------- | -------------------------- | ---------------------------------------- | -| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | -| COL | 标量参数 | 字符串类型 | 标识对哪一列计算差值 | -| DELTA | 标量参数 | 浮点数类型 | 差值阈值 | - -#### 11.3.4 返回结果 - -VARIATION 函数的返回结果列包含: - -* window\_index: 窗口编号 -* 映射列:DATA 参数的所有输入列 - -#### 11.3.5 使用示例 - -```sql -IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY VARIATION -IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.4 CAPACITY - -#### 11.4.1 功能描述 - -CAPACITY 函数用于按数据点数(行数)分窗,每个窗口最多有 SIZE 行数据。 - -#### 11.4.2 函数定义 - -```sql -CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) -``` - -#### 11.4.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| -------- | ---------- | -------------------------- | ---------------------------------------- | -| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小 | - -#### 11.4.4 返回结果 - -CAPACITY 函数的返回结果列包含: - -* window\_index: 窗口编号 -* 映射列:DATA 参数的所有输入列 - -#### 11.4.5 使用示例 - -```sql -IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY COUNT -IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| start_time| end_time|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| -|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.5 TUMBLE - -#### 11.5.1 功能描述 - -TUMBLE 函数用于通过时间属性字段为每行数据分配一个窗口,滚动窗口的大小固定且不重复。 - -#### 11.5.2 函数定义 - -```sql -TUMBLE(data, timecol, size[, origin]) -``` -#### 11.5.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | --------------------------------- | -------------------- | -| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | -| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小,需为正数 | -| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | - -#### 11.5.4 返回结果 - -TUBMLE 函数的返回结果列包含: - -* window\_start: 窗口开始时间(闭区间) -* window\_end: 窗口结束时间(开区间) -* 映射列:DATA 参数的所有输入列 - -#### 11.5.5 使用示例 - -```SQL -IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.6 CUMULATE - -#### 11.6.1 功能描述 - -Cumulate 函数用于从初始的窗口开始,创建相同窗口开始但窗口结束步长不同的窗口,直到达到最大的窗口大小。每个窗口包含其区间内的元素。例如:1小时步长,24小时大小的累计窗口,每天可以获得如下这些窗口:`[00:00, 01:00)`,`[00:00, 02:00)`,`[00:00, 03:00)`, …, `[00:00, 24:00)` - -#### 11.6.2 函数定义 - -```sql -CUMULATE(data, timecol, size, step[, origin]) -``` - -#### 11.6.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | --------------------------------- | -------------------------------------------- | -| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | -| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小,SIZE必须是STEP的整数倍,需为正数 | -| STEP | 标量参数 | 长整数类型 | 窗口步长,需为正数 | -| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | - -> 注意:size 如果不是 step 的整数倍,则会报错`Cumulative table function requires size must be an integral multiple of step` - -#### 11.6.4 返回结果 - -CUMULATE函数的返回结果列包含: - -* window\_start: 窗口开始时间(闭区间) -* window\_end: 窗口结束时间(开区间) -* 映射列:DATA 参数的所有输入列 - -#### 11.6.5 使用示例 - -```sql -IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function_apache.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function_apache.md new file mode 100644 index 000000000..a3e47d977 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function_apache.md @@ -0,0 +1,2019 @@ + + +# 基础函数 + +## 1. 比较函数和运算符 + +### 1.1 基本比较运算符 + +比较运算符用于比较两个值,并返回比较结果(true或false)。 + +| 运算符 | 描述 | +| ------ | ---------- | +| < | 小于 | +| > | 大于 | +| <= | 小于或等于 | +| >= | 大于或等于 | +| = | 等于 | +| <> | 不等于 | +| != | 不等于 | + +#### 1.1.1 比较规则: + +1. 所有类型都可以与自身进行比较 +2. 数值类型(INT32, INT64, FLOAT, DOUBLE, TIMESTAMP)之间可以相互比较 +3. 字符类型(STRING, TEXT)之间也可以相互比较 +4. 除上述规则外的类型进行比较时,均会报错。 + +### 1.2 BETWEEN 运算符 + +1. `BETWEEN` 操作符用于判断一个值是否在指定的范围内。 +2. `NOT BETWEEN`操作符用于判断一个值是否不在指定范围内。 +3. `BETWEEN` 和 `NOT BETWEEN` 操作符可用于评估任何可排序的类型。 +4. `BETWEEN` 和 `NOT BETWEEN` 的值、最小值和最大值参数必须是同一类型,否则会报错。 + +**语法**: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +示例 1 :BETWEEN + +```SQL +-- 查询 temperature 在 85.0 和 90.0 之间的记录 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +示例 2 :NOT BETWEEN + +```SQL +3-- 查询 humidity 不在 35.0 和 40.0 之间的记录 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL 运算符 + +1. `IS NULL` 和 `IS NOT NULL` 运算符用于判断一个值是否为 NULL。 +2. 这两个运算符适用于所有数据类型。 + +示例1:查询 temperature 为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +示例2:查询 humidity 不为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN 运算符 + +1. `IN` 操作符可用于 `WHERE` 子句中,比较一列中的一些值。 +2. 这些值可以由静态数组、标量表达式。 + +**语法:** + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +示例 1:静态数组:查询 region 为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region IN ('北京', '上海'); +--等价于 +SELECT * FROM region WHERE name = '北京' OR name = '上海'; +``` + +示例 2:标量表达式:查询 temperature 在特定值中的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +示例 3:查询 region 不为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('北京', '上海'); +``` + +### 1.5 GREATEST 和 LEAST + +`Greatest` 函数用于返回参数列表中的最大值,`Least` 函数用于返回参数列表中的最小值,返回数据类型与输入类型相同。 +1. 空值处理:若所有参数均为 NULL,则返回 NULL。 +2. 参数要求:必须提供 至少 2 个参数。 +3. 类型约束:仅支持 相同数据类型 的参数比较。 +4. 支持类型: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**语法:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**示例:** + +```sql +-- 查询 table2 中 temperature 和 humidity 的最大记录 +SELECT GREATEST(temperature,humidity) FROM table2; + +-- 查询 table2 中 temperature 和 humidity 的最小记录 +SELECT LEAST(temperature,humidity) FROM table2; +``` + + +## 2. 聚合函数 + +### 2.1 概述 + +1. 聚合函数是多对一函数。它们对一组值进行聚合计算,得到单个聚合结果。 +2. 除了 `COUNT()`之外,其他所有聚合函数都忽略空值,并在没有输入行或所有值为空时返回空值。 例如,`SUM()` 返回 null 而不是零,而 `AVG()` 在计数中不包括 null 值。 + +### 2.2 支持的聚合函数 + +| 函数名 | 功能描述 | 允许的输入类型 | 输出类型 | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|------------------| +| COUNT | 计算数据点数。 | 所有类型 | INT64 | +| COUNT_IF | COUNT_IF(exp) 用于统计满足指定布尔表达式的记录行数 | exp 必须是一个布尔类型的表达式,例如 count_if(temperature>20) | INT64 | +| APPROX_COUNT_DISTINCT | APPROX_COUNT_DISTINCT(x[,maxStandardError]) 函数提供 COUNT(DISTINCT x) 的近似值,返回不同输入值的近似个数。 | `x`:待计算列,支持所有类型;
`maxStandardError`:指定该函数应产生的最大标准误差,取值范围[0.0040625, 0.26],未指定值时默认0.023。 | INT64 | +| APPROX_MOST_FREQUENT | APPROX_MOST_FREQUENT(x, k, capacity) 函数用于近似计算数据集中出现频率最高的前 k 个元素。它返回一个JSON 格式的字符串,其中键是该元素的值,值是该元素对应的近似频率。(V 2.0.5.1 及以后版本支持) | `x`:待计算列,支持 IoTDB 现有所有的数据类型;
`k`:返回出现频率最高的 k 个值;
`capacity`: 用于计算的桶的数量,跟内存占用相关:其值越大误差越小,但占用内存更大,反之capacity值越小误差越大,但占用内存更小。 | STRING | +| SUM | 求和。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | 求平均值。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | 求最大值。 | 所有类型 | 与输入类型一致 | +| MIN | 求最小值。 | 所有类型 | 与输入类型一致 | +| FIRST | 求时间戳最小且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| LAST | 求时间戳最大且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| STDDEV | STDDEV_SAMP 的别名,求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | 求总体标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | 求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | VAR_SAMP 的别名,求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | 求总体方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | 求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | 求具有最大绝对值的值。如果正值和负值的最大绝对值相等,则返回正值。 | INT32 INT64 FLOAT DOUBLE | 与输入类型一致 | +| MODE | 求众数。注意: 1.输入序列的不同值个数过多时会有内存异常风险; 2.如果所有元素出现的频次相同,即没有众数,则随机返回一个元素; 3.如果有多个众数,则随机返回一个众数; 4. NULL 值也会被统计频次,所以即使输入序列的值不全为 NULL,最终结果也可能为 NULL。 | 所有类型 | 与输入类型一致 | +| MAX_BY | MAX_BY(x, y) 求二元输入 x 和 y 在 y 最大时对应的 x 的值。MAX_BY(time, x) 返回 x 取最大值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| MIN_BY | MIN_BY(x, y) 求二元输入 x 和 y 在 y 最小时对应的 x 的值。MIN_BY(time, x) 返回 x 取最小值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| FIRST_BY | FIRST_BY(x, y) 求当 y 为第一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| LAST_BY | LAST_BY(x, y) 求当 y 为最后一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | + + +### 2.3 示例 + +#### 2.3.1 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +#### 2.3.2 Count + +统计的是整张表的行数和 `temperature` 列非 NULL 值的数量。 + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +执行结果如下: + +> 注意:只有COUNT函数可以与*一起使用,否则将抛出错误。 + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +统计 `table2` 中 到达时间 `arrival_time` 不是 `null` 的记录行数。 + +```sql +IoTDB> select count_if(arrival_time is not null) from table2; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +查询 `table1` 中 `temperature` 列不同值的个数。 + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +执行结果如下: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +查询 `table1` 中 `temperature` 列出现频次最高的2个值 + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +执行结果如下: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +查询`temperature`列、`humidity`列时间戳最小且不为 NULL 的值。 + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +查询`temperature`列、`humidity`列时间戳最大且不为 NULL 的值。 + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +查询 `temperature` 列中非 NULL 且时间戳最小的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最小的行的 `humidity` 值。 + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +查询`temperature` 列中非 NULL 且时间戳最大的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最大的行的 `humidity` 值。 + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +查询`temperature` 列中最大值所在行的 `time` 值,以及`temperature` 列中最大值所在行的 `humidity` 值。 + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +查询`temperature` 列中最小值所在行的 `time` 值,以及`temperature` 列中最小值所在行的 `humidity` 值。 + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. 逻辑运算符 + +### 3.1 概述 + +逻辑运算符用于组合条件或否定条件,返回布尔结果(`true` 或 `false`)。 + +以下是常用的逻辑运算符及其描述: + +| 运算符 | 描述 | 示例 | +| ------ | ----------------------------- | ------- | +| AND | 仅当两个值都为 true 时为 true | a AND b | +| OR | 任一值为 true 时为 true | a OR b | +| NOT | 当值为 false 时为 true | NOT a | + +### 3.2 NULL 对逻辑运算符的影响 + +#### 3.2.1 AND 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `AND` 运算符的一侧为 `FALSE`,则表达式结果为 `FALSE`。 + +示例: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `OR` 运算符的一侧为 `TRUE`,则表达式结果为 `TRUE`。 + +示例: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 真值表 + +以下真值表展示了 `NULL` 在 `AND` 和 `OR` 运算符中的处理方式: + +| a | b | a AND b | a OR b | +| ----- | ----- | ------- | ------ | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT 运算符 + +NULL 的逻辑否定仍然是 NULL + +示例: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1真值表 + +以下真值表展示了 `NULL` 在 `NOT` 运算符中的处理方式: + +| a | NOT a | +| ----- | ----- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + + +## 4. 日期和时间函数和运算符 + +### 4.1 now() -> Timestamp + +返回当前时间的时间戳。 + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +`date_bin` 函数是一种用于处理时间数据的函数,作用是将一个时间戳(Timestamp)舍入到指定的时间间隔(interval)的边界上。 + +**语法:** + +```SQL +-- 从时间戳为 0 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source) + +-- 从时间戳为 origin 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source,origin) + +-- interval支持的时间单位有: +-- 年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 +-- source必须为时间戳类型。 +``` + +**参数:** + +| 参数 | 含义 | +| -------- | ------------------------------------------------------------ | +| interval | 时间间隔支持的时间单位有:年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 | +| source | 待计算时间列,也可以是表达式。必须为时间戳类型。 | +| origin | 起始时间戳 | + +#### 4.2.1 语法约定: + +1. 不传入 `origin` 时,起始时间戳从 1970-01-01T00:00:00Z 开始计算(北京时间为 1970-01-01 08:00:00)。 +2. `interval` 为一个非负数,且必须带上时间单位。`interval` 为 0ms 时,不进行计算,直接返回 `source`。 +3. 当传入 `origin` 或 `source` 为负时,表示纪元时间之前的某个时间点,`date_bin` 会正常计算并返回与该时间点相关的时间段。 +4. 如果 `source` 中的值为 `null`,则返回 `null`。 +5. 不支持月份和非月份时间单位混用,例如 `1 MONTH 1 DAY`,这种时间间隔有歧义。 + +> 假设是起始时间是 2000 年 4 月 30 日进行计算,那么在一个时间间隔后,如果是先算 DAY再算MONTH,则会得到 2000 年 6 月 1 日,如果先算 MONTH 再算 DAY 则会得到 2000 年 5 月 31 日,二者得出的时间日期不同。 + +#### 4.2.2 示例 + +##### 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +示例 1:不指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +示例 2:指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +示例 3:`origin` 为负数的情况 + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +示例 4:`interval` 为 0 的情况 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +示例 5:`source` 为 null 的情况 + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract 函数 + +该函数用于提取日期对应部分的值。(V2.0.6 版本起支持) + +#### 4.3.1 语法定义 + +```SQL +EXTRACT (identifier FROM expression) +``` +* 参数说明 + * **expression**: `TIMESTAMP` 类型或时间常量 + * **identifier** :取值范围及对应的返回值见下表 + + | 取值范围 | 返回值类型 | 返回值范围 | + | -------------------------- | ------------- | ------------- | + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 使用示例 + +以[示例数据](../Reference/Sample-Data.md)中的 table1 为源数据,查询某段时间每天前12个小时的温度平均值 + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +`Format` 函数介绍:[Format 函数](../SQL-Manual/Basis-Function_apache.md#_7-2-format-函数) + +`Date_bin` 函数介绍:[Date_bin 函数](../SQL-Manual/Basis-Function_apache.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. 数学函数和运算符 + +### 5.1 数学运算符 + +| **运算符** | **描述** | +| ---------- | ------------------------ | +| + | 加法 | +| - | 减法 | +| * | 乘法 | +| / | 除法(整数除法执行截断) | +| % | 模(余数) | +| - | 取反 | + +### 5.2 数学函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------| ---------------------- | ---------- | +| sin | 正弦函数 | double、float、INT64、INT32 | double | sin(x) | +| cos | 余弦函数 | double、float、INT64、INT32 | double | cos(x) | +| tan | 正切函数 | double、float、INT64、INT32 | double | tan(x) | +| asin | 反正弦函数 | double、float、INT64、INT32 | double | asin(x) | +| acos | 反余弦函数 | double、float、INT64、INT32 | double | acos(x) | +| atan | 反正切函数 | double、float、INT64、INT32 | double | atan(x) | +| sinh | 双曲正弦函数 | double、float、INT64、INT32 | double | sinh(x) | +| cosh | 双曲余弦函数 | double、float、INT64、INT32 | double | cosh(x) | +| tanh | 双曲正切函数 | double、float、INT64、INT32 | double | tanh(x) | +| degrees | 将弧度角 x 转换为度 | double、float、INT64、INT32 | double | degrees(x) | +| radians | 将度转换为弧度 | double、float、INT64、INT32 | double | radians(x) | +| abs | 绝对值 | double、float、INT64、INT32 | 返回与输入类型相同的值 | abs(x) | +| sign | 返回 x 的符号函数,即:如果参数为 0,则返回 0,如果参数大于 0,则返回 1,如果参数小于 0,则返回 -1。对于 double/float 类型的参数,函数还会返回:如果参数为 NaN,则返回 NaN,如果参数为 +Infinity,则返回 1.0,如果参数为 -Infinity,则返回 -1.0。 | double、float、INT64、INT32 | 返回与输入类型相同的值 | sign(x) | +| ceil | 返回 x 向上取整到最近的整数。 | double、float、INT64、INT32 | double | ceil(x) | +| floor | 返回 x 向下取整到最近的整数。 | double、float、INT64、INT32 | double | floor(x) | +| exp | 返回欧拉数 e 的 x 次幂。 | double、float、INT64、INT32 | double | exp(x) | +| ln | 返回 x 的自然对数。 | double、float、INT64、INT32 | double | ln(x) | +| log10 | 返回 x 的以 10 为底的对数。 | double、float、INT64、INT32 | double | log10(x) | +| round | 返回 x 四舍五入到最近的整数。 | double、float、INT64、INT32 | double | round(x) | +| round | 返回 x 四舍五入到 d 位小数。 | double、float、INT64、INT32 | double | round(x, d) | +| sqrt | 返回 x 的平方根。 | double、float、INT64、INT32 | double | sqrt(x) | +| e | 自然指数 | | double | e() | +| pi | π | | double | pi() | + + +## 6. 位运算函数 + +> V 2.0.6 版本起支持 + +示例原始数据如下: + +```SQL +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +--建表语句 +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +--写入数据 +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +`bit_count(num, bits)` 函数用于统计整数 `num`在指定位宽 `bits`下的二进制表示中 1 的个数。 + +#### 6.1.1 语法定义 + +```SQL +bit_count(num, bits) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * **​num:​**任意整型数值(int32 或者 int64) + * **​bits:​**整型数值,取值范围为2\~64 + +注意:如果 bits 位数不够表示 num,会报错(此处是​**有符号补码**​):`Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* 调用方式 + * 两个具体数值:`bit_count(9, 64)` + * 列与数值:`bit_count(column1, 64)` + * 两列之间:`bit_count(column1, column2)` + +#### 6.1.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +--列与数值 +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +--bits位数不够 +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +`bitwise_and(x, y)`函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑与操作,并返回其按位与(bitwise AND)的运算结果。 + +#### 6.2.1 语法定义 + +```SQL +bitwise_and(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_and(19, 25)` + * 列与数值:`bitwise_and(column1, 25)` + * 两列之间:`bitwise_and(column1, column2)` + +#### 6.2.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--俩列之间 +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +`bitwise_not(x)` 函数基于二进制补码表示法,对整数 x 的每一位进行逻辑非操作,并返回其按位取反(bitwise NOT)的运算结果。 + +#### 6.3.1 语法定义 + +```SQL +bitwise_not(x) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 具体数值:`bitwise_not(5)` + * 单列操作:`bitwise_not(column1)` + +#### 6.3.2 使用示例 + +```SQL +-- 具体数值 +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- 单列 +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +`bitwise_or(x,y)` 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑或操作,并返回其按位或(bitwise OR)的运算结果。 + +#### 6.4.1 语法定义 + +```SQL +bitwise_or(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_or(19, 25)` + * 列与数值:`bitwise_or(column1, 25)` + * 两列之间:`bitwise_or(column1, column2)` + +#### 6.4.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +bitwise\_xor(x,y) 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑异或操作,并返回其按位异或(bitwise XOR)的运算结果。异或规则:相同为0,不同为1。 + +#### 6.5.1 语法定义 + +```SQL +bitwise_xor(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_xor(19, 25)` + * 列与数值:`bitwise_xor(column1, 25)` + * 两列之间:`bitwise_xor(column1, column2)` + +#### 6.5.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +`bitwise_left_shift(value, shift)` 函数返回将整数 `value`的二进制表示左移 `shift`位后的结果。左移操作将二进制位向高位方向移动,右侧空出的位用 0 填充,左侧溢出的位直接丢弃。等价于: `value << shift`。 + +#### 6.6.1 语法定义 + +```SQL +bitwise_left_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要左移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 左移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_left_shift(1, 2)` + * 列与数值:`bitwise_left_shift(column1, 2)` + * 两列之间:`bitwise_left_shift(column1, column2)` + +#### 6.6.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +`bitwise_right_shift(value, shift)`函数返回将整数 `value`的二进制表示逻辑右移(无符号右移) `shift`位后的结果。逻辑右移操作将二进制位向低位方向移动,左侧空出的高位用 0 填充,右侧溢出的低位直接丢弃。 + +#### 6.7.1 语法定义 + +```SQL +bitwise_right_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_right_shift(8, 3)` + * 列与数值:`bitwise_right_shift(column1, 3)` + * 两列之间:`bitwise_right_shift(column1, column2)` + +#### 6.7.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +`bitwise_right_shift_arithmetic(value, shift)`函数返回将整数 `value`的二进制表示算术右移 `shift`位后的结果。算术右移操作将二进制位向低位方向移动,右侧溢出的低位直接丢弃,左侧空出的高位用符号位填充(正数补0,负数补1),以保持数值的符号不变。 + +#### 6.8.1 语法定义 + +```SQL +bitwise_right_shift_arithmetic(value, shift) -> [same as value]--返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式: + * 两个具体数值:`bitwise_right_shift_arithmetic(12, 2)` + * 列与数值:`bitwise_right_shift_arithmetic(column1, 64)` + * 两列之间:`bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +## 7. 条件表达式 + +### 7.1 CASE 表达式 + +CASE 表达式有两种形式:简单形式、搜索形式 + +#### 7.1.1 简单形式 + +简单形式从左到右搜索每个值表达式,直到找到一个与表达式相等的值: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果找到匹配的值,则返回相应的结果。如果没有找到匹配项,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 搜索形式 + +搜索形式从左到右评估每个布尔条件,直到找到一个为真的条件,并返回相应的结果: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果没有条件为真,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE 函数 + +返回参数列表中的第一个非空值。 + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. 转换函数 + +### 8.1 转换函数 + +#### 8.1.1 cast(value AS type) → type + +1. 显式地将一个值转换为指定类型。 +2. 可以用于将字符串(varchar)转换为数值类型,或数值转换为字符串类型,V2.0.8-beta 版本起支持 OBJECT 类型强转成 STRING 类型。 +3. 如果转换失败,将抛出运行时错误。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +1. 与 `cast()` 类似。 +2. 如果转换失败,则返回 `null`。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format 函数 +该函数基于指定的格式字符串与输入参数,生成并返回格式化后的字符串输出。其功能与 Java 语言中的`String.format` 方法及 C 语言中的`printf`函数相类似,支持开发者通过占位符语法构建动态字符串模板,其中预设的格式标识符将被传入的对应参数值精准替换,最终形成符合特定格式要求的完整字符串。 + +#### 8.2.1 语法介绍 + +```SQL +format(pattern,...args) -> String +``` + +**参数定义** + +* `pattern`: 格式字符串,可包含静态文本及一个或多个格式说明符(如 `%s`, `%d` 等),或任意返回类型为 `STRING/TEXT` 的表达式。 +* `args`: 用于替换格式说明符的输入参数。需满足以下条件: + * 参数数量 ≥ 1 + * 若存在多个参数,以逗号`,`分隔(如 `arg1,arg2`) + * 参数总数可多于 `pattern` 中的占位符数量,但不可少于,否则触发异常 + +**返回值** + +* 类型为 `STRING` 的格式化结果字符串 + +#### 8.2.2 使用示例 + +1. 格式化浮点数 + +```SQL +IoTDB:database1> select format('%.5f',humidity) from table1 where humidity = 35.4 ++--------+ +| _col0| ++--------+ +|35.40000| ++--------+ +``` + +2. 格式化整数 + +```SQL +IoTDB:database1> select format('%03d',8) from table1 limit 1 ++-----+ +|_col0| ++-----+ +| 008| ++-----+ +``` + +3. 格式化日期和时间戳 + +* Locale-specific日期 + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) from table1 limit 1 ++--------------------+ +| _col0| ++--------------------+ +|星期一, 一月 1, 2024| ++--------------------+ +``` + +* 去除时区信息 + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* 获取秒级时间戳精度 + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* 日期符号说明如下 + +| **符号** | **​ 描述** | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 'H' | 24 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 00 - 23。 | +| 'I' | 12 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 01 - 12。 | +| 'k' | 24 小时制的小时数,i.e. 0 - 23。 | +| 'l' | 12 小时制的小时数,i.e. 1 - 12。 | +| 'M' | 小时内的分钟,格式为两位数,必要时加上前导零,i.e. 00 - 59。 | +| 'S' | 分钟内的秒数,格式为两位数,必要时加上前导零,i.e. 00 - 60(“60 ”是支持闰秒所需的特殊值)。 | +| 'L' | 秒内毫秒,格式为三位数,必要时加前导零,i.e. 000 - 999。 | +| 'N' | 秒内的纳秒,格式为九位数,必要时加前导零,i.e. 000000000 - 999999999。 | +| 'p' | 当地特定的[上午或下午](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getAmPmStrings())标记,小写,如 “am ”或 “pm”。使用转换前缀 “T ”会强制输出为大写。 | +| 'z' | 从格林尼治标准时间偏移的[RFC 822](http://www.ietf.org/rfc/rfc0822.txt)式数字时区,例如 -0800。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是 Java 虚拟机此实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。 | +| 'Z' | 表示时区缩写的字符串。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是此 Java 虚拟机实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。Formatter 的时区将取代参数的时区(如果有)。 | +| 's' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的秒数,i.e. Long.MIN\_VALUE/1000 至 Long.MAX\_VALUE/1000。 | +| 'Q' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的毫秒数,i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* 用于格式化常见的日期/时间组成的转换字符说明如下 + +| **符号** | **描述** | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'B' | 特定于区域设置[的完整月份名称](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getMonths()),例如 “January”、“February”。 | +| 'b' | 当地特定月份的缩写名称,如"1 月"、"2 月"。 | +| 'h' | 与"b "相同。 | +| 'A' | 一周中某一天在当地的全称,如"星期日"、"星期一"。 | +| 'a' | 当地特有的星期简短名称,例如"星期日"、"星期一 | +| 'C' | 四位数年份除以100,格式为两位数,必要时加上前导零,即00 - 99 | +| 'Y' | 年份,格式为至少四位数,必要时加上前导零,例如0092相当于公历92年。 | +| 'y' | 年份的最后两位数,格式为必要的前导零,即00 - 99。 | +| 'j' | 年号,格式为三位数,必要时加前导零,例如公历为001 - 366。 | +| 'm' | 月份,格式为两位数,必要时加前导零,即01 - 13。 | +| 'd' | 月日,格式为两位数,必要时加前导零,即01 - 31 | +| 'e' | 月日,格式为两位数,即1 - 31。 | + +4. 格式化字符串 + +```SQL +IoTDB:database1> SELECT format('The measurement status is :%s',status) FROM table2 limit 1 ++-------------------------------+ +| _col0| ++-------------------------------+ +|The measurement status is :true| ++-------------------------------+ +``` + +5. 格式化百分号 + +```SQL +IoTDB:database1> SELECT format('%s%%', 99.9) from table1 limit 1 ++-----+ +|_col0| ++-----+ +|99.9%| ++-----+ +``` + +#### 8.2.3 **格式转换失败场景说明** + +1. 类型不匹配错误 + +* 时间戳类型冲突 若格式说明符中包含时间相关标记(如 `%Y-%m-%d`),但参数提供: + * 非 `DATE`/`TIMESTAMP` 类型值 + * 或涉及日期细粒度单位(如 `%H` 小时、`%M` 分钟)时,参数仅支持 `TIMESTAMP` 类型,否则将抛出类型异常 + +```SQL +-- 示例1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- 示例2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* 浮点数类型冲突 若使用 `%f` 等浮点格式说明符,但参数提供非数值类型(如字符串、布尔值),将触发类型转换错误 + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. 参数数量不匹配错误 + +* 实际提供的参数数量 必须等于或大于 格式字符串中格式说明符的数量 +* 若参数数量少于格式说明符数量,将抛出 `ArgumentCountMismatch` 异常 + +```SQL +IoTDB:database1> select format('%.5f %03d', humidity) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') +``` + +3. 无效调用错误 + +* 当函数参数满足以下任一条件时,视为非法调用: + * 参数总数 小于 2(必须包含格式字符串及至少一个参数) + * 格式字符串(`pattern`)类型非 `STRING/TEXT` + +```SQL +-- 示例1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--示例2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + + +## 9. 字符串函数和操作符 + +### 9.1 字符串操作符 + +#### 9.1.1 || 操作符 + +`||` 操作符用于字符串连接,功能与 `concat` 函数相同。 + +#### 9.1.2 LIKE 语句 + +`LIKE` 语句用于模式匹配,具体用法在[模式匹配:LIKE](#1-like-运算符) 中有详细文档。 + +### 9.2 字符串函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +| ----------- |---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------ | ------------------------------------------------------------ | +| length | 返回字符串的字符长度,而不是字符数组的长度。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | INT32 | length(string) | +| upper | 将字符串中的字母转换为大写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | upper(string) | +| lower | 将字符串中的字母转换为小写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | lower(string) | +| trim | 从源字符串中删除指定的开头和/或结尾字符。 | 支持三个参数**specification(可选)**:指定从哪边去掉字符,可以是:`BOTH`:两边都去掉(默认)。`LEADING`:只去掉开头的字符。`TRAILING`:只去掉结尾的字符。**trimcharacter(可选)**:要去掉的字符,如果没指定,默认去掉空格。**string**:要处理的字符串。 | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) 示例:`trim('!' FROM '!foo!');` —— `'foo'` | +| strpos | 返回子字符串在字符串中第一次出现的起始位置。位置从 1 开始计数。如果未找到,返回 0。注意:起始位置是基于字符而不是字节数组确定的。 | 仅支持两个参数,类型可以是字符串或文本。**sourceStr**:要搜索的字符串。**subStr**:要找的子字符串。 | INT32 | strpos(sourceStr, subStr) | +| starts_with | 测试子字符串是否是字符串的前缀。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串,类型可以是字符串或文本。**prefix**:前缀子字符串,类型可以是字符串或文本。 | Boolean | starts_with(sourceStr, prefix) | +| ends_with | 测试字符串是否以指定的后缀结束。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串。**suffix**:后缀子字符串。 | Boolean | ends_with(sourceStr, suffix) | +| concat | 返回字符串 `string1`、`string2`、...、`stringN` 的连接结果。功能与连接操作符 `\|\|` 相同。 | 至少两个参数,所有参数类型必须是字符串或文本。 | String | concat(str1, str2, ...) 或 str1 \|\| str2 ... | +| strcmp | 比较两个字符串的字母序。 | 支持两个参数,两个参数类型必须是字符串或文本。**string1**:第一个要比较的字符串。**string2**:第二个要比较的字符串。 | 返回一个整数值INT32如果 `str1 < str2`,返回 `-1`如果 `str1 = str2`,返回 `0`如果 `str1 > str2`,返回 `1`如果 `str1` 或 `str2` 为 `NULL`,返回 `NULL` | strcmp(str1, str2) | +| replace | 从字符串中删除所有 `search` 的实例。 | 支持两个参数,可以是字符串或文本类型。**string**:原始字符串,要从中删除内容的字符串。**search**:要删除的子字符串。 | String | replace(string, string) | +| replace | 将字符串中所有 `search` 的实例替换为 `replace`。 | 支持三个参数,可以是字符串或文本类型。**string**:原始字符串,要从中替换内容的字符串。**search**:要替换掉的子字符串。**replace**:用来替换的新字符串。 | String | replace(string, string, string) | +| substring | 从指定位置提取字符到字符串末尾。需要注意的是,起始位置是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持两个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。 | String:返回一个字符串,从 `start_index` 位置开始到字符串末尾的所有字符。**注意事项**:`start_index` 从 1 开始,即数组的第 0 个位置是 1参数为 null时,返回 `null`start_index 大于字符串长度时,结果报错。 | substring(string from start_index)或 substring(string, start_index) | +| substring | 从一个字符串中提取从指定位置开始、指定长度的子字符串注意:起始位置和长度是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持三个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。**length**:要提取的子字符串的长度。 | String:返回一个字符串,从 `start_index` 位置开始,提取 `length` 个字符。**注意事项**:参数为 null时,返回 `null`如果 `start_index` 大于字符串的长度,结果报错。如果 `length` 小于 0,结果报错。极端情况,`start_index + length` 超过 `int.MAX` 并变成负数,将导致异常结果。 | substring(string from start_index for length) 或 substring(string, start_index, length) | + +## 10. 模式匹配函数 + +### 10.1 LIKE 运算符 + +#### 10.1.1 用途 + +`LIKE` 运算符用于将值与模式进行比较。它通常用于 `WHERE` 子句中,用于匹配字符串中的特定模式。 + +#### 10.1.2 语法 + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 匹配规则 + +- 匹配字符是区分大小写的。 +- 模式支持两个匹配符号: + - `_`:匹配任意单个字符。 + - `%`:匹配0个或多个字符。 + +#### 10.1.4 注意事项 + +- `LIKE` 模式匹配总是覆盖整个字符串。如果需要匹配字符串中的任意位置,模式必须以 `%` 开头和结尾。 +- 如果需要匹配 `%` 或 `_` 作为普通字符,必须使用转义字符。 + +#### 10.1.5 示例 + +示例 1:匹配以特定字符开头的字符串 + +- **说明**:查找所有以字母 `E` 开头的名称,例如 `Europe`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +示例 2:排除特定模式 + +- **说明**:查找所有不以字母 `E` 开头的名称。 + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +示例 3:匹配特定长度的字符串 + +- **说明**:查找所有以 `A` 开头、以 `a` 结尾且中间有两个字符的名称,例如 `Asia`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +示例 4:转义特殊字符 + +- **说明**:查找所有以 `South_` 开头的名称。这里使用了转义字符 `\` 来转义 `_` 等特殊字符,例如`South_America`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +示例 5:匹配转义字符本身 + +- **说明**:如果需要匹配转义字符本身,可以使用双转义字符 `\\`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like 函数 + +#### 10.2.1 用途 + +`regexp_like` 函数用于评估正则表达式模式,并确定该模式是否包含在字符串中。 + +#### 10.2.2 语法 + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 注意事项 + +- `regexp_like` 的模式只需包含在字符串中,而不需要匹配整个字符串。 +- 如果需要匹配整个字符串,可以使用正则表达式的锚点 `^` 和 `$`。 +- `^` 表示“字符串的开头”,`$` 表示“字符串的结尾”。 +- 正则表达式采用 Java 定义的正则语法,但存在以下需要注意的例外情况: + - **多行模式** + 1. 启用方式:`(?m)`。 + 2. 只识别`\n`作为行终止符。 + 3. 不支持`(?d)`标志,且禁止使用。 + - **不区分大小写匹配** + 1. 启用方式:`(?i)`。 + 2. 基于Unicode规则,不支持上下文相关和本地化匹配。 + 3. 不支持`(?u)`标志,且禁止使用。 + - **字符类** + 1. 在字符类(如`[A-Z123]`)中,`\Q`和`\E`不被支持,被视为普通字面量。 + - **Unicode字符类(**`\p{prop}`**)** + 1. **名称下划线**:名称中的所有下划线必须删除(如`OldItalic`而非`Old_Italic`)。 + 2. **文字(Scripts)**:直接指定,无需`Is`、`script=`或`sc=`前缀(如`\p{Hiragana}`)。 + 3. **区块(Blocks)**:必须使用`In`前缀,不支持`block=`或`blk=`前缀(如`\p{InMongolian}`)。 + 4. **类别(Categories)**:直接指定,无需`Is`、`general_category=`或`gc=`前缀(如`\p{L}`)。 + 5. **二元属性(Binary Properties)**:直接指定,无需`Is`(如`\p{NoncharacterCodePoint}`)。 + +#### 10.2.4 示例 + +示例 1:匹配包含特定模式的字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否包含模式 `\d+b`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - 在 `'1a 2b 14m'` 中,`2b` 符合这个模式,所以返回 `true`。 + +示例 2:匹配整个字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否完全匹配模式 `^\\d+b$`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - `'1a 2b 14m'` 并不符合这个模式,因为它不是从数字开始,也不是以 `b` 结束,所以返回 `false`。 + +## 11. 时序分窗函数 + +原始示例数据如下: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- 创建语句 +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- 插入数据 +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 功能描述 + +HOP 函数用于按时间分段分窗分析,识别每一行数据所属的时间窗口。该函数通过指定固定窗口大小(size)和窗口滑动步长(SLIDE),将数据按时间戳分配到所有与其时间戳重叠的窗口中。若窗口之间存在重叠(步长 < 窗口大小),数据会自动复制到多个窗口。 + +#### 11.1.2 函数定义 + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | +| SLIDE | 标量参数 | 长整数类型 | 窗口滑动步长 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.1.4 返回结果 + +HOP 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.1.5 使用示例 + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 功能描述 + +SESSION 函数用于按会话间隔对数据进行分窗。系统逐行检查与前一行的时间间隔,小于阈值(GAP)则归入当前窗口,超过则归入下一个窗口。 + +#### 11.2.2 函数定义 + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| TIMECOL | 标量参数 | 字符串类型默认值:'time' | 时间列名 +| +| GAP | 标量参数 | 长整数类型 | 会话间隔阈值 | + +#### 11.2.4 返回结果 + +SESSION 函数的返回结果列包含: + +* window\_start: 会话窗口内的第一条数据的时间 +* window\_end: 会话窗口内的最后一条数据的时间 +* 映射列:DATA 参数的所有输入列 + +#### 11.2.5 使用示例 + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY SESSION +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 功能描述 + +VARIATION 函数用于按数据差值分窗,将第一条数据作为首个窗口的基准值,每个数据点会与基准值进行差值运算,如果差值小于给定的阈值(delta)则加入当前窗口;如果超过阈值,则分为下一个窗口,将该值作为下一个窗口的基准值。 + +#### 11.3.2 函数定义 + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| COL | 标量参数 | 字符串类型 | 标识对哪一列计算差值 | +| DELTA | 标量参数 | 浮点数类型 | 差值阈值 | + +#### 11.3.4 返回结果 + +VARIATION 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.3.5 使用示例 + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY VARIATION +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 功能描述 + +CAPACITY 函数用于按数据点数(行数)分窗,每个窗口最多有 SIZE 行数据。 + +#### 11.4.2 函数定义 + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | + +#### 11.4.4 返回结果 + +CAPACITY 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.4.5 使用示例 + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY COUNT +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 功能描述 + +TUMBLE 函数用于通过时间属性字段为每行数据分配一个窗口,滚动窗口的大小固定且不重复。 + +#### 11.5.2 函数定义 + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.5.4 返回结果 + +TUBMLE 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.5.5 使用示例 + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 功能描述 + +Cumulate 函数用于从初始的窗口开始,创建相同窗口开始但窗口结束步长不同的窗口,直到达到最大的窗口大小。每个窗口包含其区间内的元素。例如:1小时步长,24小时大小的累计窗口,每天可以获得如下这些窗口:`[00:00, 01:00)`,`[00:00, 02:00)`,`[00:00, 03:00)`, …, `[00:00, 24:00)` + +#### 11.6.2 函数定义 + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------------------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,SIZE必须是STEP的整数倍,需为正数 | +| STEP | 标量参数 | 长整数类型 | 窗口步长,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +> 注意:size 如果不是 step 的整数倍,则会报错`Cumulative table function requires size must be an integral multiple of step` + +#### 11.6.4 返回结果 + +CUMULATE函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.6.5 使用示例 + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function_timecho.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function_timecho.md new file mode 100644 index 000000000..210eb2613 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Basis-Function_timecho.md @@ -0,0 +1,2019 @@ + + +# 基础函数 + +## 1. 比较函数和运算符 + +### 1.1 基本比较运算符 + +比较运算符用于比较两个值,并返回比较结果(true或false)。 + +| 运算符 | 描述 | +| ------ | ---------- | +| < | 小于 | +| > | 大于 | +| <= | 小于或等于 | +| >= | 大于或等于 | +| = | 等于 | +| <> | 不等于 | +| != | 不等于 | + +#### 1.1.1 比较规则: + +1. 所有类型都可以与自身进行比较 +2. 数值类型(INT32, INT64, FLOAT, DOUBLE, TIMESTAMP)之间可以相互比较 +3. 字符类型(STRING, TEXT)之间也可以相互比较 +4. 除上述规则外的类型进行比较时,均会报错。 + +### 1.2 BETWEEN 运算符 + +1. `BETWEEN` 操作符用于判断一个值是否在指定的范围内。 +2. `NOT BETWEEN`操作符用于判断一个值是否不在指定范围内。 +3. `BETWEEN` 和 `NOT BETWEEN` 操作符可用于评估任何可排序的类型。 +4. `BETWEEN` 和 `NOT BETWEEN` 的值、最小值和最大值参数必须是同一类型,否则会报错。 + +**语法**: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +示例 1 :BETWEEN + +```SQL +-- 查询 temperature 在 85.0 和 90.0 之间的记录 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +示例 2 :NOT BETWEEN + +```SQL +3-- 查询 humidity 不在 35.0 和 40.0 之间的记录 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL 运算符 + +1. `IS NULL` 和 `IS NOT NULL` 运算符用于判断一个值是否为 NULL。 +2. 这两个运算符适用于所有数据类型。 + +示例1:查询 temperature 为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +示例2:查询 humidity 不为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN 运算符 + +1. `IN` 操作符可用于 `WHERE` 子句中,比较一列中的一些值。 +2. 这些值可以由静态数组、标量表达式。 + +**语法:** + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +示例 1:静态数组:查询 region 为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region IN ('北京', '上海'); +--等价于 +SELECT * FROM region WHERE name = '北京' OR name = '上海'; +``` + +示例 2:标量表达式:查询 temperature 在特定值中的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +示例 3:查询 region 不为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('北京', '上海'); +``` + +### 1.5 GREATEST 和 LEAST + +`Greatest` 函数用于返回参数列表中的最大值,`Least` 函数用于返回参数列表中的最小值,返回数据类型与输入类型相同。 +1. 空值处理:若所有参数均为 NULL,则返回 NULL。 +2. 参数要求:必须提供 至少 2 个参数。 +3. 类型约束:仅支持 相同数据类型 的参数比较。 +4. 支持类型: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**语法:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**示例:** + +```sql +-- 查询 table2 中 temperature 和 humidity 的最大记录 +SELECT GREATEST(temperature,humidity) FROM table2; + +-- 查询 table2 中 temperature 和 humidity 的最小记录 +SELECT LEAST(temperature,humidity) FROM table2; +``` + + +## 2. 聚合函数 + +### 2.1 概述 + +1. 聚合函数是多对一函数。它们对一组值进行聚合计算,得到单个聚合结果。 +2. 除了 `COUNT()`之外,其他所有聚合函数都忽略空值,并在没有输入行或所有值为空时返回空值。 例如,`SUM()` 返回 null 而不是零,而 `AVG()` 在计数中不包括 null 值。 + +### 2.2 支持的聚合函数 + +| 函数名 | 功能描述 | 允许的输入类型 | 输出类型 | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|------------------| +| COUNT | 计算数据点数。 | 所有类型 | INT64 | +| COUNT_IF | COUNT_IF(exp) 用于统计满足指定布尔表达式的记录行数 | exp 必须是一个布尔类型的表达式,例如 count_if(temperature>20) | INT64 | +| APPROX_COUNT_DISTINCT | APPROX_COUNT_DISTINCT(x[,maxStandardError]) 函数提供 COUNT(DISTINCT x) 的近似值,返回不同输入值的近似个数。 | `x`:待计算列,支持所有类型;
`maxStandardError`:指定该函数应产生的最大标准误差,取值范围[0.0040625, 0.26],未指定值时默认0.023。 | INT64 | +| APPROX_MOST_FREQUENT | APPROX_MOST_FREQUENT(x, k, capacity) 函数用于近似计算数据集中出现频率最高的前 k 个元素。它返回一个JSON 格式的字符串,其中键是该元素的值,值是该元素对应的近似频率。(V 2.0.5.1 及以后版本支持) | `x`:待计算列,支持 IoTDB 现有所有的数据类型;
`k`:返回出现频率最高的 k 个值;
`capacity`: 用于计算的桶的数量,跟内存占用相关:其值越大误差越小,但占用内存更大,反之capacity值越小误差越大,但占用内存更小。 | STRING | +| SUM | 求和。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | 求平均值。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | 求最大值。 | 所有类型 | 与输入类型一致 | +| MIN | 求最小值。 | 所有类型 | 与输入类型一致 | +| FIRST | 求时间戳最小且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| LAST | 求时间戳最大且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| STDDEV | STDDEV_SAMP 的别名,求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | 求总体标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | 求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | VAR_SAMP 的别名,求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | 求总体方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | 求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | 求具有最大绝对值的值。如果正值和负值的最大绝对值相等,则返回正值。 | INT32 INT64 FLOAT DOUBLE | 与输入类型一致 | +| MODE | 求众数。注意: 1.输入序列的不同值个数过多时会有内存异常风险; 2.如果所有元素出现的频次相同,即没有众数,则随机返回一个元素; 3.如果有多个众数,则随机返回一个众数; 4. NULL 值也会被统计频次,所以即使输入序列的值不全为 NULL,最终结果也可能为 NULL。 | 所有类型 | 与输入类型一致 | +| MAX_BY | MAX_BY(x, y) 求二元输入 x 和 y 在 y 最大时对应的 x 的值。MAX_BY(time, x) 返回 x 取最大值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| MIN_BY | MIN_BY(x, y) 求二元输入 x 和 y 在 y 最小时对应的 x 的值。MIN_BY(time, x) 返回 x 取最小值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| FIRST_BY | FIRST_BY(x, y) 求当 y 为第一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| LAST_BY | LAST_BY(x, y) 求当 y 为最后一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | + + +### 2.3 示例 + +#### 2.3.1 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +#### 2.3.2 Count + +统计的是整张表的行数和 `temperature` 列非 NULL 值的数量。 + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +执行结果如下: + +> 注意:只有COUNT函数可以与*一起使用,否则将抛出错误。 + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +统计 `table2` 中 到达时间 `arrival_time` 不是 `null` 的记录行数。 + +```sql +IoTDB> select count_if(arrival_time is not null) from table2; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +查询 `table1` 中 `temperature` 列不同值的个数。 + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +执行结果如下: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +查询 `table1` 中 `temperature` 列出现频次最高的2个值 + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +执行结果如下: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +查询`temperature`列、`humidity`列时间戳最小且不为 NULL 的值。 + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +查询`temperature`列、`humidity`列时间戳最大且不为 NULL 的值。 + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +查询 `temperature` 列中非 NULL 且时间戳最小的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最小的行的 `humidity` 值。 + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +查询`temperature` 列中非 NULL 且时间戳最大的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最大的行的 `humidity` 值。 + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +查询`temperature` 列中最大值所在行的 `time` 值,以及`temperature` 列中最大值所在行的 `humidity` 值。 + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +查询`temperature` 列中最小值所在行的 `time` 值,以及`temperature` 列中最小值所在行的 `humidity` 值。 + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. 逻辑运算符 + +### 3.1 概述 + +逻辑运算符用于组合条件或否定条件,返回布尔结果(`true` 或 `false`)。 + +以下是常用的逻辑运算符及其描述: + +| 运算符 | 描述 | 示例 | +| ------ | ----------------------------- | ------- | +| AND | 仅当两个值都为 true 时为 true | a AND b | +| OR | 任一值为 true 时为 true | a OR b | +| NOT | 当值为 false 时为 true | NOT a | + +### 3.2 NULL 对逻辑运算符的影响 + +#### 3.2.1 AND 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `AND` 运算符的一侧为 `FALSE`,则表达式结果为 `FALSE`。 + +示例: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `OR` 运算符的一侧为 `TRUE`,则表达式结果为 `TRUE`。 + +示例: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 真值表 + +以下真值表展示了 `NULL` 在 `AND` 和 `OR` 运算符中的处理方式: + +| a | b | a AND b | a OR b | +| ----- | ----- | ------- | ------ | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT 运算符 + +NULL 的逻辑否定仍然是 NULL + +示例: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1真值表 + +以下真值表展示了 `NULL` 在 `NOT` 运算符中的处理方式: + +| a | NOT a | +| ----- | ----- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + + +## 4. 日期和时间函数和运算符 + +### 4.1 now() -> Timestamp + +返回当前时间的时间戳。 + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +`date_bin` 函数是一种用于处理时间数据的函数,作用是将一个时间戳(Timestamp)舍入到指定的时间间隔(interval)的边界上。 + +**语法:** + +```SQL +-- 从时间戳为 0 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source) + +-- 从时间戳为 origin 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source,origin) + +-- interval支持的时间单位有: +-- 年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 +-- source必须为时间戳类型。 +``` + +**参数:** + +| 参数 | 含义 | +| -------- | ------------------------------------------------------------ | +| interval | 时间间隔支持的时间单位有:年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 | +| source | 待计算时间列,也可以是表达式。必须为时间戳类型。 | +| origin | 起始时间戳 | + +#### 4.2.1 语法约定: + +1. 不传入 `origin` 时,起始时间戳从 1970-01-01T00:00:00Z 开始计算(北京时间为 1970-01-01 08:00:00)。 +2. `interval` 为一个非负数,且必须带上时间单位。`interval` 为 0ms 时,不进行计算,直接返回 `source`。 +3. 当传入 `origin` 或 `source` 为负时,表示纪元时间之前的某个时间点,`date_bin` 会正常计算并返回与该时间点相关的时间段。 +4. 如果 `source` 中的值为 `null`,则返回 `null`。 +5. 不支持月份和非月份时间单位混用,例如 `1 MONTH 1 DAY`,这种时间间隔有歧义。 + +> 假设是起始时间是 2000 年 4 月 30 日进行计算,那么在一个时间间隔后,如果是先算 DAY再算MONTH,则会得到 2000 年 6 月 1 日,如果先算 MONTH 再算 DAY 则会得到 2000 年 5 月 31 日,二者得出的时间日期不同。 + +#### 4.2.2 示例 + +##### 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +示例 1:不指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +示例 2:指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +示例 3:`origin` 为负数的情况 + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +示例 4:`interval` 为 0 的情况 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +示例 5:`source` 为 null 的情况 + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract 函数 + +该函数用于提取日期对应部分的值。(V2.0.6 版本起支持) + +#### 4.3.1 语法定义 + +```SQL +EXTRACT (identifier FROM expression) +``` +* 参数说明 + * **expression**: `TIMESTAMP` 类型或时间常量 + * **identifier** :取值范围及对应的返回值见下表 + + | 取值范围 | 返回值类型 | 返回值范围 | + | -------------------------- | ------------- | ------------- | + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 使用示例 + +以[示例数据](../Reference/Sample-Data.md)中的 table1 为源数据,查询某段时间每天前12个小时的温度平均值 + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +`Format` 函数介绍:[Format 函数](../SQL-Manual/Basis-Function_timecho.md#_7-2-format-函数) + +`Date_bin` 函数介绍:[Date_bin 函数](../SQL-Manual/Basis-Function_timecho.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. 数学函数和运算符 + +### 5.1 数学运算符 + +| **运算符** | **描述** | +| ---------- | ------------------------ | +| + | 加法 | +| - | 减法 | +| * | 乘法 | +| / | 除法(整数除法执行截断) | +| % | 模(余数) | +| - | 取反 | + +### 5.2 数学函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------| ---------------------- | ---------- | +| sin | 正弦函数 | double、float、INT64、INT32 | double | sin(x) | +| cos | 余弦函数 | double、float、INT64、INT32 | double | cos(x) | +| tan | 正切函数 | double、float、INT64、INT32 | double | tan(x) | +| asin | 反正弦函数 | double、float、INT64、INT32 | double | asin(x) | +| acos | 反余弦函数 | double、float、INT64、INT32 | double | acos(x) | +| atan | 反正切函数 | double、float、INT64、INT32 | double | atan(x) | +| sinh | 双曲正弦函数 | double、float、INT64、INT32 | double | sinh(x) | +| cosh | 双曲余弦函数 | double、float、INT64、INT32 | double | cosh(x) | +| tanh | 双曲正切函数 | double、float、INT64、INT32 | double | tanh(x) | +| degrees | 将弧度角 x 转换为度 | double、float、INT64、INT32 | double | degrees(x) | +| radians | 将度转换为弧度 | double、float、INT64、INT32 | double | radians(x) | +| abs | 绝对值 | double、float、INT64、INT32 | 返回与输入类型相同的值 | abs(x) | +| sign | 返回 x 的符号函数,即:如果参数为 0,则返回 0,如果参数大于 0,则返回 1,如果参数小于 0,则返回 -1。对于 double/float 类型的参数,函数还会返回:如果参数为 NaN,则返回 NaN,如果参数为 +Infinity,则返回 1.0,如果参数为 -Infinity,则返回 -1.0。 | double、float、INT64、INT32 | 返回与输入类型相同的值 | sign(x) | +| ceil | 返回 x 向上取整到最近的整数。 | double、float、INT64、INT32 | double | ceil(x) | +| floor | 返回 x 向下取整到最近的整数。 | double、float、INT64、INT32 | double | floor(x) | +| exp | 返回欧拉数 e 的 x 次幂。 | double、float、INT64、INT32 | double | exp(x) | +| ln | 返回 x 的自然对数。 | double、float、INT64、INT32 | double | ln(x) | +| log10 | 返回 x 的以 10 为底的对数。 | double、float、INT64、INT32 | double | log10(x) | +| round | 返回 x 四舍五入到最近的整数。 | double、float、INT64、INT32 | double | round(x) | +| round | 返回 x 四舍五入到 d 位小数。 | double、float、INT64、INT32 | double | round(x, d) | +| sqrt | 返回 x 的平方根。 | double、float、INT64、INT32 | double | sqrt(x) | +| e | 自然指数 | | double | e() | +| pi | π | | double | pi() | + + +## 6. 位运算函数 + +> V 2.0.6 版本起支持 + +示例原始数据如下: + +```SQL +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +--建表语句 +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +--写入数据 +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +`bit_count(num, bits)` 函数用于统计整数 `num`在指定位宽 `bits`下的二进制表示中 1 的个数。 + +#### 6.1.1 语法定义 + +```SQL +bit_count(num, bits) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * **​num:​**任意整型数值(int32 或者 int64) + * **​bits:​**整型数值,取值范围为2\~64 + +注意:如果 bits 位数不够表示 num,会报错(此处是​**有符号补码**​):`Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* 调用方式 + * 两个具体数值:`bit_count(9, 64)` + * 列与数值:`bit_count(column1, 64)` + * 两列之间:`bit_count(column1, column2)` + +#### 6.1.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +--列与数值 +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +--bits位数不够 +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +`bitwise_and(x, y)`函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑与操作,并返回其按位与(bitwise AND)的运算结果。 + +#### 6.2.1 语法定义 + +```SQL +bitwise_and(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_and(19, 25)` + * 列与数值:`bitwise_and(column1, 25)` + * 两列之间:`bitwise_and(column1, column2)` + +#### 6.2.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--俩列之间 +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +`bitwise_not(x)` 函数基于二进制补码表示法,对整数 x 的每一位进行逻辑非操作,并返回其按位取反(bitwise NOT)的运算结果。 + +#### 6.3.1 语法定义 + +```SQL +bitwise_not(x) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 具体数值:`bitwise_not(5)` + * 单列操作:`bitwise_not(column1)` + +#### 6.3.2 使用示例 + +```SQL +-- 具体数值 +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- 单列 +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +`bitwise_or(x,y)` 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑或操作,并返回其按位或(bitwise OR)的运算结果。 + +#### 6.4.1 语法定义 + +```SQL +bitwise_or(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_or(19, 25)` + * 列与数值:`bitwise_or(column1, 25)` + * 两列之间:`bitwise_or(column1, column2)` + +#### 6.4.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +bitwise\_xor(x,y) 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑异或操作,并返回其按位异或(bitwise XOR)的运算结果。异或规则:相同为0,不同为1。 + +#### 6.5.1 语法定义 + +```SQL +bitwise_xor(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_xor(19, 25)` + * 列与数值:`bitwise_xor(column1, 25)` + * 两列之间:`bitwise_xor(column1, column2)` + +#### 6.5.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +`bitwise_left_shift(value, shift)` 函数返回将整数 `value`的二进制表示左移 `shift`位后的结果。左移操作将二进制位向高位方向移动,右侧空出的位用 0 填充,左侧溢出的位直接丢弃。等价于: `value << shift`。 + +#### 6.6.1 语法定义 + +```SQL +bitwise_left_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要左移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 左移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_left_shift(1, 2)` + * 列与数值:`bitwise_left_shift(column1, 2)` + * 两列之间:`bitwise_left_shift(column1, column2)` + +#### 6.6.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +`bitwise_right_shift(value, shift)`函数返回将整数 `value`的二进制表示逻辑右移(无符号右移) `shift`位后的结果。逻辑右移操作将二进制位向低位方向移动,左侧空出的高位用 0 填充,右侧溢出的低位直接丢弃。 + +#### 6.7.1 语法定义 + +```SQL +bitwise_right_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_right_shift(8, 3)` + * 列与数值:`bitwise_right_shift(column1, 3)` + * 两列之间:`bitwise_right_shift(column1, column2)` + +#### 6.7.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +`bitwise_right_shift_arithmetic(value, shift)`函数返回将整数 `value`的二进制表示算术右移 `shift`位后的结果。算术右移操作将二进制位向低位方向移动,右侧溢出的低位直接丢弃,左侧空出的高位用符号位填充(正数补0,负数补1),以保持数值的符号不变。 + +#### 6.8.1 语法定义 + +```SQL +bitwise_right_shift_arithmetic(value, shift) -> [same as value]--返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式: + * 两个具体数值:`bitwise_right_shift_arithmetic(12, 2)` + * 列与数值:`bitwise_right_shift_arithmetic(column1, 64)` + * 两列之间:`bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +## 7. 条件表达式 + +### 7.1 CASE 表达式 + +CASE 表达式有两种形式:简单形式、搜索形式 + +#### 7.1.1 简单形式 + +简单形式从左到右搜索每个值表达式,直到找到一个与表达式相等的值: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果找到匹配的值,则返回相应的结果。如果没有找到匹配项,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 搜索形式 + +搜索形式从左到右评估每个布尔条件,直到找到一个为真的条件,并返回相应的结果: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果没有条件为真,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE 函数 + +返回参数列表中的第一个非空值。 + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. 转换函数 + +### 8.1 转换函数 + +#### 8.1.1 cast(value AS type) → type + +1. 显式地将一个值转换为指定类型。 +2. 可以用于将字符串(varchar)转换为数值类型,或数值转换为字符串类型,V2.0.8 版本起支持 OBJECT 类型强转成 STRING 类型。 +3. 如果转换失败,将抛出运行时错误。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +1. 与 `cast()` 类似。 +2. 如果转换失败,则返回 `null`。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format 函数 +该函数基于指定的格式字符串与输入参数,生成并返回格式化后的字符串输出。其功能与 Java 语言中的`String.format` 方法及 C 语言中的`printf`函数相类似,支持开发者通过占位符语法构建动态字符串模板,其中预设的格式标识符将被传入的对应参数值精准替换,最终形成符合特定格式要求的完整字符串。 + +#### 8.2.1 语法介绍 + +```SQL +format(pattern,...args) -> String +``` + +**参数定义** + +* `pattern`: 格式字符串,可包含静态文本及一个或多个格式说明符(如 `%s`, `%d` 等),或任意返回类型为 `STRING/TEXT` 的表达式。 +* `args`: 用于替换格式说明符的输入参数。需满足以下条件: + * 参数数量 ≥ 1 + * 若存在多个参数,以逗号`,`分隔(如 `arg1,arg2`) + * 参数总数可多于 `pattern` 中的占位符数量,但不可少于,否则触发异常 + +**返回值** + +* 类型为 `STRING` 的格式化结果字符串 + +#### 8.2.2 使用示例 + +1. 格式化浮点数 + +```SQL +IoTDB:database1> select format('%.5f',humidity) from table1 where humidity = 35.4 ++--------+ +| _col0| ++--------+ +|35.40000| ++--------+ +``` + +2. 格式化整数 + +```SQL +IoTDB:database1> select format('%03d',8) from table1 limit 1 ++-----+ +|_col0| ++-----+ +| 008| ++-----+ +``` + +3. 格式化日期和时间戳 + +* Locale-specific日期 + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) from table1 limit 1 ++--------------------+ +| _col0| ++--------------------+ +|星期一, 一月 1, 2024| ++--------------------+ +``` + +* 去除时区信息 + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* 获取秒级时间戳精度 + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* 日期符号说明如下 + +| **符号** | **​ 描述** | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 'H' | 24 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 00 - 23。 | +| 'I' | 12 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 01 - 12。 | +| 'k' | 24 小时制的小时数,i.e. 0 - 23。 | +| 'l' | 12 小时制的小时数,i.e. 1 - 12。 | +| 'M' | 小时内的分钟,格式为两位数,必要时加上前导零,i.e. 00 - 59。 | +| 'S' | 分钟内的秒数,格式为两位数,必要时加上前导零,i.e. 00 - 60(“60 ”是支持闰秒所需的特殊值)。 | +| 'L' | 秒内毫秒,格式为三位数,必要时加前导零,i.e. 000 - 999。 | +| 'N' | 秒内的纳秒,格式为九位数,必要时加前导零,i.e. 000000000 - 999999999。 | +| 'p' | 当地特定的[上午或下午](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getAmPmStrings())标记,小写,如 “am ”或 “pm”。使用转换前缀 “T ”会强制输出为大写。 | +| 'z' | 从格林尼治标准时间偏移的[RFC 822](http://www.ietf.org/rfc/rfc0822.txt)式数字时区,例如 -0800。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是 Java 虚拟机此实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。 | +| 'Z' | 表示时区缩写的字符串。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是此 Java 虚拟机实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。Formatter 的时区将取代参数的时区(如果有)。 | +| 's' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的秒数,i.e. Long.MIN\_VALUE/1000 至 Long.MAX\_VALUE/1000。 | +| 'Q' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的毫秒数,i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* 用于格式化常见的日期/时间组成的转换字符说明如下 + +| **符号** | **描述** | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'B' | 特定于区域设置[的完整月份名称](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getMonths()),例如 “January”、“February”。 | +| 'b' | 当地特定月份的缩写名称,如"1 月"、"2 月"。 | +| 'h' | 与"b "相同。 | +| 'A' | 一周中某一天在当地的全称,如"星期日"、"星期一"。 | +| 'a' | 当地特有的星期简短名称,例如"星期日"、"星期一 | +| 'C' | 四位数年份除以100,格式为两位数,必要时加上前导零,即00 - 99 | +| 'Y' | 年份,格式为至少四位数,必要时加上前导零,例如0092相当于公历92年。 | +| 'y' | 年份的最后两位数,格式为必要的前导零,即00 - 99。 | +| 'j' | 年号,格式为三位数,必要时加前导零,例如公历为001 - 366。 | +| 'm' | 月份,格式为两位数,必要时加前导零,即01 - 13。 | +| 'd' | 月日,格式为两位数,必要时加前导零,即01 - 31 | +| 'e' | 月日,格式为两位数,即1 - 31。 | + +4. 格式化字符串 + +```SQL +IoTDB:database1> SELECT format('The measurement status is :%s',status) FROM table2 limit 1 ++-------------------------------+ +| _col0| ++-------------------------------+ +|The measurement status is :true| ++-------------------------------+ +``` + +5. 格式化百分号 + +```SQL +IoTDB:database1> SELECT format('%s%%', 99.9) from table1 limit 1 ++-----+ +|_col0| ++-----+ +|99.9%| ++-----+ +``` + +#### 8.2.3 **格式转换失败场景说明** + +1. 类型不匹配错误 + +* 时间戳类型冲突 若格式说明符中包含时间相关标记(如 `%Y-%m-%d`),但参数提供: + * 非 `DATE`/`TIMESTAMP` 类型值 + * 或涉及日期细粒度单位(如 `%H` 小时、`%M` 分钟)时,参数仅支持 `TIMESTAMP` 类型,否则将抛出类型异常 + +```SQL +-- 示例1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- 示例2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* 浮点数类型冲突 若使用 `%f` 等浮点格式说明符,但参数提供非数值类型(如字符串、布尔值),将触发类型转换错误 + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. 参数数量不匹配错误 + +* 实际提供的参数数量 必须等于或大于 格式字符串中格式说明符的数量 +* 若参数数量少于格式说明符数量,将抛出 `ArgumentCountMismatch` 异常 + +```SQL +IoTDB:database1> select format('%.5f %03d', humidity) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') +``` + +3. 无效调用错误 + +* 当函数参数满足以下任一条件时,视为非法调用: + * 参数总数 小于 2(必须包含格式字符串及至少一个参数) + * 格式字符串(`pattern`)类型非 `STRING/TEXT` + +```SQL +-- 示例1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--示例2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + + +## 9. 字符串函数和操作符 + +### 9.1 字符串操作符 + +#### 9.1.1 || 操作符 + +`||` 操作符用于字符串连接,功能与 `concat` 函数相同。 + +#### 9.1.2 LIKE 语句 + +`LIKE` 语句用于模式匹配,具体用法在[模式匹配:LIKE](#1-like-运算符) 中有详细文档。 + +### 9.2 字符串函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +| ----------- |---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------ | ------------------------------------------------------------ | +| length | 返回字符串的字符长度,而不是字符数组的长度。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | INT32 | length(string) | +| upper | 将字符串中的字母转换为大写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | upper(string) | +| lower | 将字符串中的字母转换为小写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | lower(string) | +| trim | 从源字符串中删除指定的开头和/或结尾字符。 | 支持三个参数**specification(可选)**:指定从哪边去掉字符,可以是:`BOTH`:两边都去掉(默认)。`LEADING`:只去掉开头的字符。`TRAILING`:只去掉结尾的字符。**trimcharacter(可选)**:要去掉的字符,如果没指定,默认去掉空格。**string**:要处理的字符串。 | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) 示例:`trim('!' FROM '!foo!');` —— `'foo'` | +| strpos | 返回子字符串在字符串中第一次出现的起始位置。位置从 1 开始计数。如果未找到,返回 0。注意:起始位置是基于字符而不是字节数组确定的。 | 仅支持两个参数,类型可以是字符串或文本。**sourceStr**:要搜索的字符串。**subStr**:要找的子字符串。 | INT32 | strpos(sourceStr, subStr) | +| starts_with | 测试子字符串是否是字符串的前缀。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串,类型可以是字符串或文本。**prefix**:前缀子字符串,类型可以是字符串或文本。 | Boolean | starts_with(sourceStr, prefix) | +| ends_with | 测试字符串是否以指定的后缀结束。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串。**suffix**:后缀子字符串。 | Boolean | ends_with(sourceStr, suffix) | +| concat | 返回字符串 `string1`、`string2`、...、`stringN` 的连接结果。功能与连接操作符 `\|\|` 相同。 | 至少两个参数,所有参数类型必须是字符串或文本。 | String | concat(str1, str2, ...) 或 str1 \|\| str2 ... | +| strcmp | 比较两个字符串的字母序。 | 支持两个参数,两个参数类型必须是字符串或文本。**string1**:第一个要比较的字符串。**string2**:第二个要比较的字符串。 | 返回一个整数值INT32如果 `str1 < str2`,返回 `-1`如果 `str1 = str2`,返回 `0`如果 `str1 > str2`,返回 `1`如果 `str1` 或 `str2` 为 `NULL`,返回 `NULL` | strcmp(str1, str2) | +| replace | 从字符串中删除所有 `search` 的实例。 | 支持两个参数,可以是字符串或文本类型。**string**:原始字符串,要从中删除内容的字符串。**search**:要删除的子字符串。 | String | replace(string, string) | +| replace | 将字符串中所有 `search` 的实例替换为 `replace`。 | 支持三个参数,可以是字符串或文本类型。**string**:原始字符串,要从中替换内容的字符串。**search**:要替换掉的子字符串。**replace**:用来替换的新字符串。 | String | replace(string, string, string) | +| substring | 从指定位置提取字符到字符串末尾。需要注意的是,起始位置是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持两个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。 | String:返回一个字符串,从 `start_index` 位置开始到字符串末尾的所有字符。**注意事项**:`start_index` 从 1 开始,即数组的第 0 个位置是 1参数为 null时,返回 `null`start_index 大于字符串长度时,结果报错。 | substring(string from start_index)或 substring(string, start_index) | +| substring | 从一个字符串中提取从指定位置开始、指定长度的子字符串注意:起始位置和长度是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持三个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。**length**:要提取的子字符串的长度。 | String:返回一个字符串,从 `start_index` 位置开始,提取 `length` 个字符。**注意事项**:参数为 null时,返回 `null`如果 `start_index` 大于字符串的长度,结果报错。如果 `length` 小于 0,结果报错。极端情况,`start_index + length` 超过 `int.MAX` 并变成负数,将导致异常结果。 | substring(string from start_index for length) 或 substring(string, start_index, length) | + +## 10. 模式匹配函数 + +### 10.1 LIKE 运算符 + +#### 10.1.1 用途 + +`LIKE` 运算符用于将值与模式进行比较。它通常用于 `WHERE` 子句中,用于匹配字符串中的特定模式。 + +#### 10.1.2 语法 + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 匹配规则 + +- 匹配字符是区分大小写的。 +- 模式支持两个匹配符号: + - `_`:匹配任意单个字符。 + - `%`:匹配0个或多个字符。 + +#### 10.1.4 注意事项 + +- `LIKE` 模式匹配总是覆盖整个字符串。如果需要匹配字符串中的任意位置,模式必须以 `%` 开头和结尾。 +- 如果需要匹配 `%` 或 `_` 作为普通字符,必须使用转义字符。 + +#### 10.1.5 示例 + +示例 1:匹配以特定字符开头的字符串 + +- **说明**:查找所有以字母 `E` 开头的名称,例如 `Europe`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +示例 2:排除特定模式 + +- **说明**:查找所有不以字母 `E` 开头的名称。 + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +示例 3:匹配特定长度的字符串 + +- **说明**:查找所有以 `A` 开头、以 `a` 结尾且中间有两个字符的名称,例如 `Asia`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +示例 4:转义特殊字符 + +- **说明**:查找所有以 `South_` 开头的名称。这里使用了转义字符 `\` 来转义 `_` 等特殊字符,例如`South_America`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +示例 5:匹配转义字符本身 + +- **说明**:如果需要匹配转义字符本身,可以使用双转义字符 `\\`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like 函数 + +#### 10.2.1 用途 + +`regexp_like` 函数用于评估正则表达式模式,并确定该模式是否包含在字符串中。 + +#### 10.2.2 语法 + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 注意事项 + +- `regexp_like` 的模式只需包含在字符串中,而不需要匹配整个字符串。 +- 如果需要匹配整个字符串,可以使用正则表达式的锚点 `^` 和 `$`。 +- `^` 表示“字符串的开头”,`$` 表示“字符串的结尾”。 +- 正则表达式采用 Java 定义的正则语法,但存在以下需要注意的例外情况: + - **多行模式** + 1. 启用方式:`(?m)`。 + 2. 只识别`\n`作为行终止符。 + 3. 不支持`(?d)`标志,且禁止使用。 + - **不区分大小写匹配** + 1. 启用方式:`(?i)`。 + 2. 基于Unicode规则,不支持上下文相关和本地化匹配。 + 3. 不支持`(?u)`标志,且禁止使用。 + - **字符类** + 1. 在字符类(如`[A-Z123]`)中,`\Q`和`\E`不被支持,被视为普通字面量。 + - **Unicode字符类(**`\p{prop}`**)** + 1. **名称下划线**:名称中的所有下划线必须删除(如`OldItalic`而非`Old_Italic`)。 + 2. **文字(Scripts)**:直接指定,无需`Is`、`script=`或`sc=`前缀(如`\p{Hiragana}`)。 + 3. **区块(Blocks)**:必须使用`In`前缀,不支持`block=`或`blk=`前缀(如`\p{InMongolian}`)。 + 4. **类别(Categories)**:直接指定,无需`Is`、`general_category=`或`gc=`前缀(如`\p{L}`)。 + 5. **二元属性(Binary Properties)**:直接指定,无需`Is`(如`\p{NoncharacterCodePoint}`)。 + +#### 10.2.4 示例 + +示例 1:匹配包含特定模式的字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否包含模式 `\d+b`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - 在 `'1a 2b 14m'` 中,`2b` 符合这个模式,所以返回 `true`。 + +示例 2:匹配整个字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否完全匹配模式 `^\\d+b$`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - `'1a 2b 14m'` 并不符合这个模式,因为它不是从数字开始,也不是以 `b` 结束,所以返回 `false`。 + +## 11. 时序分窗函数 + +原始示例数据如下: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- 创建语句 +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- 插入数据 +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 功能描述 + +HOP 函数用于按时间分段分窗分析,识别每一行数据所属的时间窗口。该函数通过指定固定窗口大小(size)和窗口滑动步长(SLIDE),将数据按时间戳分配到所有与其时间戳重叠的窗口中。若窗口之间存在重叠(步长 < 窗口大小),数据会自动复制到多个窗口。 + +#### 11.1.2 函数定义 + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | +| SLIDE | 标量参数 | 长整数类型 | 窗口滑动步长 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.1.4 返回结果 + +HOP 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.1.5 使用示例 + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 功能描述 + +SESSION 函数用于按会话间隔对数据进行分窗。系统逐行检查与前一行的时间间隔,小于阈值(GAP)则归入当前窗口,超过则归入下一个窗口。 + +#### 11.2.2 函数定义 + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| TIMECOL | 标量参数 | 字符串类型默认值:'time' | 时间列名 +| +| GAP | 标量参数 | 长整数类型 | 会话间隔阈值 | + +#### 11.2.4 返回结果 + +SESSION 函数的返回结果列包含: + +* window\_start: 会话窗口内的第一条数据的时间 +* window\_end: 会话窗口内的最后一条数据的时间 +* 映射列:DATA 参数的所有输入列 + +#### 11.2.5 使用示例 + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY SESSION +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 功能描述 + +VARIATION 函数用于按数据差值分窗,将第一条数据作为首个窗口的基准值,每个数据点会与基准值进行差值运算,如果差值小于给定的阈值(delta)则加入当前窗口;如果超过阈值,则分为下一个窗口,将该值作为下一个窗口的基准值。 + +#### 11.3.2 函数定义 + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| COL | 标量参数 | 字符串类型 | 标识对哪一列计算差值 | +| DELTA | 标量参数 | 浮点数类型 | 差值阈值 | + +#### 11.3.4 返回结果 + +VARIATION 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.3.5 使用示例 + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY VARIATION +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 功能描述 + +CAPACITY 函数用于按数据点数(行数)分窗,每个窗口最多有 SIZE 行数据。 + +#### 11.4.2 函数定义 + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | + +#### 11.4.4 返回结果 + +CAPACITY 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.4.5 使用示例 + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY COUNT +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 功能描述 + +TUMBLE 函数用于通过时间属性字段为每行数据分配一个窗口,滚动窗口的大小固定且不重复。 + +#### 11.5.2 函数定义 + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.5.4 返回结果 + +TUBMLE 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.5.5 使用示例 + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 功能描述 + +Cumulate 函数用于从初始的窗口开始,创建相同窗口开始但窗口结束步长不同的窗口,直到达到最大的窗口大小。每个窗口包含其区间内的元素。例如:1小时步长,24小时大小的累计窗口,每天可以获得如下这些窗口:`[00:00, 01:00)`,`[00:00, 02:00)`,`[00:00, 03:00)`, …, `[00:00, 24:00)` + +#### 11.6.2 函数定义 + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------------------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,SIZE必须是STEP的整数倍,需为正数 | +| STEP | 标量参数 | 长整数类型 | 窗口步长,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +> 注意:size 如果不是 step 的整数倍,则会报错`Cumulative table function requires size must be an integral multiple of step` + +#### 11.6.4 返回结果 + +CUMULATE函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.6.5 使用示例 + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md index d9dd63cac..308c527a4 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Featured-Functions_timecho.md @@ -822,7 +822,7 @@ IoTDB> SELECT *,count(flow) OVER(PARTITION BY device ORDER BY flow RANGE BETWEEN ## 5. Object 类型读取函数 描述:用于读取 OBJECT 对象的二进制内容。返回 BLOB 类型(对象的二进制内容)。 -> V2.0.8-beta 版本起支持 +> V2.0.8 版本起支持 语法: diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md index 7b05473a5..1d89c3fc7 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md @@ -64,7 +64,7 @@ intervalField IoTDB 支持以下三种空值填充方式: -1. **`PREVIOUS`填充**:使用该列前一个非空值进行填充,V2.0.8-beta 版本起仅该方式支持支持 OBJECT 类型。 +1. **`PREVIOUS`填充**:使用该列前一个非空值进行填充,V2.0.8 版本起仅该方式支持支持 OBJECT 类型。 2. **`LINEAR`填充**:使用该列前一个非空值和下一个非空值的线性插值进行填充。 3. **`Constant`填充**:使用指定的常量值进行填充。 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md index 4793ddd1f..35fee992c 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements.md @@ -1,3 +1,6 @@ +--- +redirectTo: SQL-Maintenance-Statements_apache.html +--- - -# 运维语句 - -## 1. 状态查看 - -### 1.1 查看当前的树/表模型 - -**语法:** - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 1.2 查看登录的用户名 - -**语法:** - -```SQL -showCurrentUserStatement - : SHOW CURRENT_USER - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_USER -+-----------+ -|CurrentUser| -+-----------+ -| root| -+-----------+ -``` - -### 1.3 查看连接的数据库名 - -**语法:** - -```SQL -showCurrentDatabaseStatement - : SHOW CURRENT_DATABASE - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| null| -+---------------+ - -IoTDB> USE test; - -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| test| -+---------------+ -``` - -### 1.4 查看集群版本 - -**语法:** - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW VERSION -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.5 查看集群关键参数 - -**语法:** - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW VARIABLES -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.6 查看集群ID - -**语法:** - -```SQL -showClusterIdStatement - : SHOW (CLUSTERID | CLUSTER_ID) - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CLUSTER_ID -+------------------------------------+ -| ClusterId| -+------------------------------------+ -|40163007-9ec1-4455-aa36-8055d740fcda| -``` - -### 1.7 查看服务器的时间 - -查看客户端直连的 DataNode 进程所在的服务器的时间 - -**语法:** - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - -### 1.8 查看分区信息 - -**含义**:返回当前集群的分区信息。 - -#### 语法: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW REGIONS -``` - -执行结果如下: - -```SQL -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | -| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| -| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.9 查看可用节点 - -**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 - -> V2.0.8 起支持该功能 - -#### 语法: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -执行结果如下: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. 状态设置 - -### 2.1 设置连接的树/表模型 - -**语法:** - -```SQL -SET SQL_DIALECT EQ (TABLE | TREE) -``` - -**示例:** - -```SQL -IoTDB> SET SQL_DIALECT=TABLE -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 2.2 更新配置项 - -**语法:** - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**示例:** - -```SQL -IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; -``` - -### 2.3 读取手动修改的配置文件 - -**语法:** - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 设置系统的状态 - -**语法:** - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - -## 3. 数据管理 - -### 3.1 将内存表中的数据刷到磁盘 - -**语法:** - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> FLUSH test_db TRUE ON LOCAL; -``` - -### 3.2 清除 DataNode 上的缓存 - -**语法:** - -```SQL -clearCacheStatement - : CLEAR clearCacheOptions? CACHE localOrClusterMode? - ; - -clearCacheOptions - : ATTRIBUTE - | QUERY - | ALL - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> CLEAR ALL CACHE ON LOCAL; -``` - -## 4. 数据修复 - -### 4.1 启动后台扫描并修复 tsfile 任务 - -**语法:** - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 暂停后台修复 tsfile 任务 - -**语法:** - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. 查询相关 - -### 5.1 查看正在执行的查询 - -**语法:** - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW QUERIES WHERE elapsed_time > 30 -+-----------------------+-----------------------------+-----------+------------+------------+----+ -| query_id| start_time|datanode_id|elapsed_time| statement|user| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -``` - -### 5.2 主动终止查询 - -**语法:** - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**示例:** - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query -IoTDB> KILL ALL QUERIES; -- 终止所有query -``` - -### 5.3 查询性能分析 - -#### 5.3.1 查看执行计划 - -**语法:** - -```SQL -EXPLAIN -``` - -更多详细语法说明请参考:[EXPLAIN 语句](../User-Manual/Query-Performance-Analysis.md#_1-explain-语句) - -**示例:** - -```SQL -IoTDB> explain select * from t1 -+-----------------------------------------------------------------------------------------------+ -| distribution plan| -+-----------------------------------------------------------------------------------------------+ -| ┌─────────────────────────────────────────────┐ | -| │OutputNode-4 │ | -| │OutputColumns-[time, device_id, type, speed] │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| │ | -| │ | -| ┌─────────────────────────────────────────────┐ | -| │Collect-21 │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| ┌───────────────────────┴───────────────────────┐ | -| │ │ | -|┌─────────────────────────────────────────────┐ ┌───────────┐ | -|│TableScan-19 │ │Exchange-28│ | -|│QualifiedTableName: test.t1 │ └───────────┘ | -|│OutputSymbols: [time, device_id, type, speed]│ │ | -|│DeviceNumber: 1 │ │ | -|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| -|│PushDownOffset: 0 │ │TableScan-20 │| -|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| -|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| -|│RegionId: 2 │ │DeviceNumber: 1 │| -|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| -| │PushDownOffset: 0 │| -| │PushDownLimit: 0 │| -| │PushDownLimitToEachDevice: false │| -| │RegionId: 1 │| -| └─────────────────────────────────────────────┘| -+-----------------------------------------------------------------------------------------------+ -``` - -#### 5.3.2 查询性能分析 - -**语法:** - -```SQL -EXPLAIN ANALYZE [VERBOSE] -``` - -更多详细语法说明请参考:[EXPLAIN ANALYZE 语句](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-语句) - -**示例:** - -```SQL -IoTDB> explain analyze verbose select * from t1 -+-----------------------------------------------------------------------------------------------+ -| Explain Analyze| -+-----------------------------------------------------------------------------------------------+ -|Analyze Cost: 38.860 ms | -|Fetch Partition Cost: 9.888 ms | -|Fetch Schema Cost: 54.046 ms | -|Logical Plan Cost: 10.102 ms | -|Logical Optimization Cost: 17.396 ms | -|Distribution Plan Cost: 2.508 ms | -|Dispatch Cost: 22.126 ms | -|Fragment Instances Count: 2 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| -| Total Wall Time: 18 ms | -| Cost of initDataQuerySource: 6.153 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.294 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.047 | -| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 5.523 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 31]: CollectNode(CollectOperator) | -| CPU Time: 5.512 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 29]: TableScanNode(TableScanOperator) | -| CPU Time: 5.439 ms | -| output: 1 rows | -| HasNext() Called Count: 3 -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | -| CPU Time: 0.053 ms | -| output: 1 rows | -| HasNext() Called Count: 2 | -| Next() Called Count: 1 | -| Estimated Memory Size: : 131072 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| -| Total Wall Time: 13 ms | -| Cost of initDataQuerySource: 5.725 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.001 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.007 | -| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 0.270 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 30]: TableScanNode(TableScanOperator) | -| CPU Time: 0.250 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -+-----------------------------------------------------------------------------------------------+ -``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_apache.md b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_apache.md new file mode 100644 index 000000000..00efaff14 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_apache.md @@ -0,0 +1,651 @@ + + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看当前的树/表模型 + +**语法:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 查看登录的用户名 + +**语法:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 查看连接的数据库名 + +**语法:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 查看集群版本 + +**语法:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 查看集群关键参数 + +**语法:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 查看集群ID + +**语法:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 查看服务器的时间 + +查看客户端直连的 DataNode 进程所在的服务器的时间 + +**语法:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.8 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8-beta 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的树/表模型 + +**语法:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**示例:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**语法:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**示例:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**语法:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**语法:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. 数据管理 + +### 3.1 将内存表中的数据刷到磁盘 + +**语法:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 清除 DataNode 上的缓存 + +**语法:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**语法:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**语法:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 查询相关 + +### 5.1 查看正在执行的查询 + +**语法:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 主动终止查询 + +**语法:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**示例:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` + +### 5.3 查询性能分析 + +#### 5.3.1 查看执行计划 + +**语法:** + +```SQL +EXPLAIN +``` + +更多详细语法说明请参考:[EXPLAIN 语句](../User-Manual/Query-Performance-Analysis.md#_1-explain-语句) + +**示例:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 查询性能分析 + +**语法:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +更多详细语法说明请参考:[EXPLAIN ANALYZE 语句](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-语句) + +**示例:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md new file mode 100644 index 000000000..4793ddd1f --- /dev/null +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md @@ -0,0 +1,651 @@ + + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看当前的树/表模型 + +**语法:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 查看登录的用户名 + +**语法:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 查看连接的数据库名 + +**语法:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 查看集群版本 + +**语法:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 查看集群关键参数 + +**语法:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 查看集群ID + +**语法:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 查看服务器的时间 + +查看客户端直连的 DataNode 进程所在的服务器的时间 + +**语法:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.8 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的树/表模型 + +**语法:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**示例:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**语法:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**示例:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**语法:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**语法:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. 数据管理 + +### 3.1 将内存表中的数据刷到磁盘 + +**语法:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 清除 DataNode 上的缓存 + +**语法:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**语法:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**语法:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 查询相关 + +### 5.1 查看正在执行的查询 + +**语法:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 主动终止查询 + +**语法:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**示例:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` + +### 5.3 查询性能分析 + +#### 5.3.1 查看执行计划 + +**语法:** + +```SQL +EXPLAIN +``` + +更多详细语法说明请参考:[EXPLAIN 语句](../User-Manual/Query-Performance-Analysis.md#_1-explain-语句) + +**示例:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 查询性能分析 + +**语法:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +更多详细语法说明请参考:[EXPLAIN ANALYZE 语句](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-语句) + +**示例:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md index a1ede9228..1a6da4ae1 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md @@ -1,3 +1,6 @@ +--- +redirectTo: Select-Clause_apache.html +--- - -# SELECT 子句 - -## 1. 语法概览 - -```sql -SELECT setQuantifier? selectItem (',' selectItem)* - -selectItem - : expression (AS? identifier)? #selectSingle - | tableName '.' ASTERISK (AS columnAliases)? #selectAll - | ASTERISK #selectAll - ; -setQuantifier - : DISTINCT - | ALL - ; -``` - -- **SELECT 子句**: 指定了查询结果应包含的列,包含聚合函数(如 SUM、AVG、COUNT 等)以及窗口函数,在逻辑上最后执行。 -- **DISTINCT 关键字**: `SELECT DISTINCT column_name` 确保查询结果中的值是唯一的,去除重复项。 -- **COLUMNS 函数**:SELECT 子句中支持使用 COLUMNS 函数进行列筛选,并支持和表达式结合使用,使表达式的效果对所有筛选出的列生效。 - -## 2. 语法详释: - -每个 `selectItem` 可以是以下形式之一: - -- **表达式**: `expression [ [ AS ] column_alias ]` 定义单个输出列,可以指定列别名。 -- **选择某个关系的所有列**: `relation.*` 选择某个关系的所有列,不允许使用列别名。 -- **选择结果集中的所有列**: `*` 选择查询的所有列,不允许使用列别名。 - -`DISTINCT` 的使用场景: - -- **SELECT 语句**:在 SELECT 语句中使用 DISTINCT,查询结果去除重复项。 -- **聚合函数**:与聚合函数一起使用时,DISTINCT 只处理输入数据集中的非重复行。 -- **GROUP BY 子句**:在 GROUP BY 子句中使用 ALL 和 DISTINCT 量词,决定是否每个重复的分组集产生不同的输出行。 - -`COLUMNS` 函数: -- **`COLUMNS(*)`**: 匹配所有列,支持结合表达式进行使用。 -- **`COLUMNS(regexStr) ? AS identifier`**:正则匹配 - - 匹配所有列名满足正则表达式的列,支持结合表达式进行使用。 - - 支持引用正则表达式捕获到的 groups 对列进行重命名,不写 AS 时展示原始列名(即 _coln_原始列名,其中 n 为列在结果表中的 position)。 - - 重命名用法简述: - - regexStr 中使用圆括号设置要捕获的组; - - 在 identifier 中使用 `'$index'` 引用捕获到的组。 - - 注意:使用该功能时,identifier 中会包含特殊字符 '$',所以整个 identifier 要用双引号引起来。 - -## 3. 示例数据 - -在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 - -### 3.1 选择列表 - -#### 3.1.1 星表达式 - -使用星号(*)可以选取表中的所有列,**注意**,星号表达式不能被大多数函数转换,除了`count(*)`的情况。 - -示例:从表中选择所有列 - -```sql -SELECT * FROM table1; -``` - -执行结果如下: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -#### 3.1.2 聚合函数 - -聚合函数将多行数据汇总为单个值。当 SELECT 子句中存在聚合函数时,查询将被视为聚合查询。在聚合查询中,所有表达式必须是聚合函数的一部分或由[GROUP BY子句](../SQL-Manual/GroupBy-Clause.md)指定的分组的一部分。 - -示例1:返回地址表中的总行数: - -```sql -SELECT count(*) FROM table1; -``` - -执行结果如下: - -```sql -+-----+ -|_col0| -+-----+ -| 18| -+-----+ -Total line number = 1 -It costs 0.091s -``` - -示例2:返回按城市分组的地址表中的总行数: - -```sql -SELECT region, count(*) - FROM table1 - GROUP BY region; -``` - -执行结果如下: - -```sql -+------+-----+ -|region|_col1| -+------+-----+ -| 上海| 9| -| 北京| 9| -+------+-----+ -Total line number = 2 -It costs 0.071s -``` - -#### 3.1.3 别名 - -关键字`AS`:为选定的列指定别名,别名将覆盖已存在的列名,以提高查询结果的可读性。 - -示例1:原始表格: - -```sql -IoTDB> SELECT * FROM table1; -``` - -执行结果如下: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -示例2:单列设置别名: - -```sql -IoTDB> SELECT device_id - AS device - FROM table1; -``` - -执行结果如下: - -```sql -+------+ -|device| -+------+ -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -+------+ -Total line number = 18 -It costs 0.053s -``` - -示例3:所有列的别名: - -```sql -IoTDB> SELECT table1.* - AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) - FROM table1; -``` - -执行结果如下: - -```sql -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| -|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| -|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| -|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| -|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| -|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| -|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -Total line number = 18 -It costs 0.189s -``` - -#### 3.1.4 Object 类型查询 - -> V2.0.8-beta 版本起支持 - -示例一:直接查询 object 类型数据 - -```SQL -IoTDB:database1> select s1 from table1 where device_id = 'tag1' -+------------+ -| s1| -+------------+ -|(Object) 5 B| -+------------+ -Total line number = 1 -It costs 0.428s -``` - -示例二:通过 read\_object 函数查询 Object 类型数据的真实内容 - -```SQL -IoTDB:database1> select read_object(s1) from table1 where device_id = 'tag1' -+------------+ -| _col0| -+------------+ -|0x696f746462| -+------------+ -Total line number = 1 -It costs 0.188s -``` - - -### 3.2 Columns 函数 - -1. 不结合表达式 -```sql --- 查询列名以 'm' 开头的列的数据 -IoTDB:database1> select columns('^m.*') from table1 limit 5 -+--------+-----------+ -|model_id|maintenance| -+--------+-----------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+--------+-----------+ - - --- 查询列名以 'o' 开头的列,未匹配到任何列,抛出异常 -IoTDB:database1> select columns('^o.*') from table1 limit 5 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' - - --- 查询列名以 'm' 开头的列的数据,并重命名以 'series_' 开头 -IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 -+---------------+------------------+ -|series_model_id|series_maintenance| -+---------------+------------------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+---------------+------------------+ -``` - -2. 结合表达式 - -- 单个 COLUMNS 函数 -```sql --- 查询所有列的最小值 -IoTDB:database1> select min(columns(*)) from table1 -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -``` - -- 多个 COLUMNS 函数,出现在同一表达式 - -> 使用限制:出现多个 COLUMNS 函数时,多个 COLUMNS 函数的参数要完全相同 - -```sql --- 查询 'h' 开头列的最小值和最大值之和 -IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 -+--------------+ -|_col0_humidity| -+--------------+ -| 79.899994| -+--------------+ - --- 错误查询,两个 COLUMNS 函数不完全相同 -IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported -``` - -- 多个 COLUMNS 函数,出现在不同表达式 - -```sql --- 分别查询 'h' 开头列的最小值和最大值 -IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 -+--------------+--------------+ -|_col0_humidity|_col1_humidity| -+--------------+--------------+ -| 34.8| 45.1| -+--------------+--------------+ - --- 分别查询 'h' 开头列的最小值和 'te'开头列的最大值 -IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 -+--------------+-----------------+ -|_col0_humidity|_col1_temperature| -+--------------+-----------------+ -| 34.8| 90.0| -+--------------+-----------------+ -``` - -3. 在 WHERE 子句中使用 - -```sql --- 查询数据,所有 'h' 开头列的数据必须要大于 40 -IoTDB:database1> select * from table1 where columns('^h.*') > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ - ---等价于 -IoTDB:database1> select * from table1 where humidity > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -``` - -## 4. 结果集列顺序 - -- **列顺序**: 结果集中的列顺序与 SELECT 子句中指定的顺序相同。 -- **多列排序**: 如果选择表达式返回多个列,它们的排序方式与源关系中的排序方式相同 \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause_apache.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause_apache.md new file mode 100644 index 000000000..a1ede9228 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause_apache.md @@ -0,0 +1,414 @@ + + +# SELECT 子句 + +## 1. 语法概览 + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- **SELECT 子句**: 指定了查询结果应包含的列,包含聚合函数(如 SUM、AVG、COUNT 等)以及窗口函数,在逻辑上最后执行。 +- **DISTINCT 关键字**: `SELECT DISTINCT column_name` 确保查询结果中的值是唯一的,去除重复项。 +- **COLUMNS 函数**:SELECT 子句中支持使用 COLUMNS 函数进行列筛选,并支持和表达式结合使用,使表达式的效果对所有筛选出的列生效。 + +## 2. 语法详释: + +每个 `selectItem` 可以是以下形式之一: + +- **表达式**: `expression [ [ AS ] column_alias ]` 定义单个输出列,可以指定列别名。 +- **选择某个关系的所有列**: `relation.*` 选择某个关系的所有列,不允许使用列别名。 +- **选择结果集中的所有列**: `*` 选择查询的所有列,不允许使用列别名。 + +`DISTINCT` 的使用场景: + +- **SELECT 语句**:在 SELECT 语句中使用 DISTINCT,查询结果去除重复项。 +- **聚合函数**:与聚合函数一起使用时,DISTINCT 只处理输入数据集中的非重复行。 +- **GROUP BY 子句**:在 GROUP BY 子句中使用 ALL 和 DISTINCT 量词,决定是否每个重复的分组集产生不同的输出行。 + +`COLUMNS` 函数: +- **`COLUMNS(*)`**: 匹配所有列,支持结合表达式进行使用。 +- **`COLUMNS(regexStr) ? AS identifier`**:正则匹配 + - 匹配所有列名满足正则表达式的列,支持结合表达式进行使用。 + - 支持引用正则表达式捕获到的 groups 对列进行重命名,不写 AS 时展示原始列名(即 _coln_原始列名,其中 n 为列在结果表中的 position)。 + - 重命名用法简述: + - regexStr 中使用圆括号设置要捕获的组; + - 在 identifier 中使用 `'$index'` 引用捕获到的组。 + + 注意:使用该功能时,identifier 中会包含特殊字符 '$',所以整个 identifier 要用双引号引起来。 + +## 3. 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +### 3.1 选择列表 + +#### 3.1.1 星表达式 + +使用星号(*)可以选取表中的所有列,**注意**,星号表达式不能被大多数函数转换,除了`count(*)`的情况。 + +示例:从表中选择所有列 + +```sql +SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 聚合函数 + +聚合函数将多行数据汇总为单个值。当 SELECT 子句中存在聚合函数时,查询将被视为聚合查询。在聚合查询中,所有表达式必须是聚合函数的一部分或由[GROUP BY子句](../SQL-Manual/GroupBy-Clause.md)指定的分组的一部分。 + +示例1:返回地址表中的总行数: + +```sql +SELECT count(*) FROM table1; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +示例2:返回按城市分组的地址表中的总行数: + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +执行结果如下: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 别名 + +关键字`AS`:为选定的列指定别名,别名将覆盖已存在的列名,以提高查询结果的可读性。 + +示例1:原始表格: + +```sql +IoTDB> SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +示例2:单列设置别名: + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +执行结果如下: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +示例3:所有列的别名: + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object 类型查询 + +> V2.0.8-beta 版本起支持 + +示例一:直接查询 object 类型数据 + +```SQL +IoTDB:database1> select s1 from table1 where device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +示例二:通过 read\_object 函数查询 Object 类型数据的真实内容 + +```SQL +IoTDB:database1> select read_object(s1) from table1 where device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns 函数 + +1. 不结合表达式 +```sql +-- 查询列名以 'm' 开头的列的数据 +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- 查询列名以 'o' 开头的列,未匹配到任何列,抛出异常 +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- 查询列名以 'm' 开头的列的数据,并重命名以 'series_' 开头 +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. 结合表达式 + +- 单个 COLUMNS 函数 +```sql +-- 查询所有列的最小值 +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- 多个 COLUMNS 函数,出现在同一表达式 + +> 使用限制:出现多个 COLUMNS 函数时,多个 COLUMNS 函数的参数要完全相同 + +```sql +-- 查询 'h' 开头列的最小值和最大值之和 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- 错误查询,两个 COLUMNS 函数不完全相同 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- 多个 COLUMNS 函数,出现在不同表达式 + +```sql +-- 分别查询 'h' 开头列的最小值和最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- 分别查询 'h' 开头列的最小值和 'te'开头列的最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. 在 WHERE 子句中使用 + +```sql +-- 查询数据,所有 'h' 开头列的数据必须要大于 40 +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--等价于 +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. 结果集列顺序 + +- **列顺序**: 结果集中的列顺序与 SELECT 子句中指定的顺序相同。 +- **多列排序**: 如果选择表达式返回多个列,它们的排序方式与源关系中的排序方式相同 \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause_timecho.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause_timecho.md new file mode 100644 index 000000000..3f2c476b1 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause_timecho.md @@ -0,0 +1,414 @@ + + +# SELECT 子句 + +## 1. 语法概览 + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- **SELECT 子句**: 指定了查询结果应包含的列,包含聚合函数(如 SUM、AVG、COUNT 等)以及窗口函数,在逻辑上最后执行。 +- **DISTINCT 关键字**: `SELECT DISTINCT column_name` 确保查询结果中的值是唯一的,去除重复项。 +- **COLUMNS 函数**:SELECT 子句中支持使用 COLUMNS 函数进行列筛选,并支持和表达式结合使用,使表达式的效果对所有筛选出的列生效。 + +## 2. 语法详释: + +每个 `selectItem` 可以是以下形式之一: + +- **表达式**: `expression [ [ AS ] column_alias ]` 定义单个输出列,可以指定列别名。 +- **选择某个关系的所有列**: `relation.*` 选择某个关系的所有列,不允许使用列别名。 +- **选择结果集中的所有列**: `*` 选择查询的所有列,不允许使用列别名。 + +`DISTINCT` 的使用场景: + +- **SELECT 语句**:在 SELECT 语句中使用 DISTINCT,查询结果去除重复项。 +- **聚合函数**:与聚合函数一起使用时,DISTINCT 只处理输入数据集中的非重复行。 +- **GROUP BY 子句**:在 GROUP BY 子句中使用 ALL 和 DISTINCT 量词,决定是否每个重复的分组集产生不同的输出行。 + +`COLUMNS` 函数: +- **`COLUMNS(*)`**: 匹配所有列,支持结合表达式进行使用。 +- **`COLUMNS(regexStr) ? AS identifier`**:正则匹配 + - 匹配所有列名满足正则表达式的列,支持结合表达式进行使用。 + - 支持引用正则表达式捕获到的 groups 对列进行重命名,不写 AS 时展示原始列名(即 _coln_原始列名,其中 n 为列在结果表中的 position)。 + - 重命名用法简述: + - regexStr 中使用圆括号设置要捕获的组; + - 在 identifier 中使用 `'$index'` 引用捕获到的组。 + + 注意:使用该功能时,identifier 中会包含特殊字符 '$',所以整个 identifier 要用双引号引起来。 + +## 3. 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +### 3.1 选择列表 + +#### 3.1.1 星表达式 + +使用星号(*)可以选取表中的所有列,**注意**,星号表达式不能被大多数函数转换,除了`count(*)`的情况。 + +示例:从表中选择所有列 + +```sql +SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 聚合函数 + +聚合函数将多行数据汇总为单个值。当 SELECT 子句中存在聚合函数时,查询将被视为聚合查询。在聚合查询中,所有表达式必须是聚合函数的一部分或由[GROUP BY子句](../SQL-Manual/GroupBy-Clause.md)指定的分组的一部分。 + +示例1:返回地址表中的总行数: + +```sql +SELECT count(*) FROM table1; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +示例2:返回按城市分组的地址表中的总行数: + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +执行结果如下: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 别名 + +关键字`AS`:为选定的列指定别名,别名将覆盖已存在的列名,以提高查询结果的可读性。 + +示例1:原始表格: + +```sql +IoTDB> SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +示例2:单列设置别名: + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +执行结果如下: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +示例3:所有列的别名: + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object 类型查询 + +> V2.0.8 版本起支持 + +示例一:直接查询 object 类型数据 + +```SQL +IoTDB:database1> select s1 from table1 where device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +示例二:通过 read\_object 函数查询 Object 类型数据的真实内容 + +```SQL +IoTDB:database1> select read_object(s1) from table1 where device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns 函数 + +1. 不结合表达式 +```sql +-- 查询列名以 'm' 开头的列的数据 +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- 查询列名以 'o' 开头的列,未匹配到任何列,抛出异常 +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- 查询列名以 'm' 开头的列的数据,并重命名以 'series_' 开头 +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. 结合表达式 + +- 单个 COLUMNS 函数 +```sql +-- 查询所有列的最小值 +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- 多个 COLUMNS 函数,出现在同一表达式 + +> 使用限制:出现多个 COLUMNS 函数时,多个 COLUMNS 函数的参数要完全相同 + +```sql +-- 查询 'h' 开头列的最小值和最大值之和 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- 错误查询,两个 COLUMNS 函数不完全相同 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- 多个 COLUMNS 函数,出现在不同表达式 + +```sql +-- 分别查询 'h' 开头列的最小值和最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- 分别查询 'h' 开头列的最小值和 'te'开头列的最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. 在 WHERE 子句中使用 + +```sql +-- 查询数据,所有 'h' 开头列的数据必须要大于 40 +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--等价于 +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. 结果集列顺序 + +- **列顺序**: 结果集中的列顺序与 SELECT 子句中指定的顺序相同。 +- **多列排序**: 如果选择表达式返回多个列,它们的排序方式与源关系中的排序方式相同 \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/overview_apache.md b/src/zh/UserGuide/Master/Table/SQL-Manual/overview_apache.md index ea5a1a9cf..4f8ca7579 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/overview_apache.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/overview_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_apache.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/overview_timecho.md b/src/zh/UserGuide/Master/Table/SQL-Manual/overview_timecho.md index 7b6fcb458..0cb0fd1d9 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/overview_timecho.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/overview_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_timecho.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md b/src/zh/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md index 740828f99..483be04cd 100644 --- a/src/zh/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md +++ b/src/zh/UserGuide/Master/Table/User-Manual/Black-White-List_timecho.md @@ -39,7 +39,7 @@ IoTDB 是一款针对物联网场景设计的时间序列数据库,支持高 * 编辑配置文件 `iotdb-system.properties`进行维护 * 通过 set configuration 语句进行维护 - * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-更新配置项) + * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-更新配置项) 相关参数如下: @@ -60,7 +60,7 @@ IoTDB 是一款针对物联网场景设计的时间序列数据库,支持高 * 编辑配置文件 `iotdb-system.properties`进行维护 * 通过 set configuration 语句进行维护 - * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-更新配置项) + * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-更新配置项) 相关参数如下: diff --git a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md index dd6fa3a4f..213d04f8e 100644 --- a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md +++ b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_apache.md @@ -705,7 +705,7 @@ It costs 0.021s ### 3.1 时间过滤条件 -使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type.md) 。 +使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type_apache.md) 。 示例如下: diff --git a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md index 8d8792e24..00becb9df 100644 --- a/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md +++ b/src/zh/UserGuide/Master/Tree/Basic-Concept/Query-Data_timecho.md @@ -705,7 +705,7 @@ It costs 0.021s ### 3.1 时间过滤条件 -使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type.md) 。 +使用时间过滤条件可以筛选特定时间范围的数据。对于时间戳支持的格式,请参考 [时间戳类型](../Background-knowledge/Data-Type_timecho.md) 。 示例如下: @@ -2975,7 +2975,7 @@ select s1, s2 into root.sg_copy.d1(t1, t2), aligned root.sg_copy.d2(t1, t2) from #### 其他要注意的点 - 对于一般的聚合查询,时间戳是无意义的,约定使用 0 来存储。 -- 当目标序列存在时,需要保证源序列和目标时间序列的数据类型兼容。关于数据类型的兼容性,查看文档 [数据类型](../Background-knowledge/Data-Type.md#数据类型兼容性)。 +- 当目标序列存在时,需要保证源序列和目标时间序列的数据类型兼容。关于数据类型的兼容性,查看文档 [数据类型](../Background-knowledge/Data-Type_timecho.md#数据类型兼容性)。 - 当目标序列不存在时,系统将自动创建目标序列(包括 database)。 - 当查询的序列不存在或查询的序列不存在数据,则不会自动创建目标序列。 diff --git a/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement.md b/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement.md index 93259a49a..2e0d90de0 100644 --- a/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement.md +++ b/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement.md @@ -1,3 +1,6 @@ +--- +redirectTo: Maintenance-statement_apache.html +--- -# 运维语句 - -## 1. 状态查看 - -### 1.1 查看连接的模型 - -**含义**:返回当前连接的 sql_dialect 是树模型/表模型。 - -#### 语法: - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT -``` - -执行结果如下: - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 1.2 查看集群版本 - -**含义**:返回当前集群的版本。 - -#### 语法: - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW VERSION -``` - -执行结果如下: - -```SQL -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.3 查看集群关键参数 - -**含义**:返回当前集群的关键参数。 - -#### 语法: - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -关键参数如下: - -1. **ClusterName**:当前集群的名称。 -2. **DataReplicationFactor**:数据副本的数量,表示每个数据分区(DataRegion)的副本数。 -3. **SchemaReplicationFactor**:元数据副本的数量,表示每个元数据分区(SchemaRegion)的副本数。 -4. **DataRegionConsensusProtocolClass**:数据分区(DataRegion)使用的共识协议类。 -5. **SchemaRegionConsensusProtocolClass**:元数据分区(SchemaRegion)使用的共识协议类。 -6. **ConfigNodeConsensusProtocolClass**:配置节点(ConfigNode)使用的共识协议类。 -7. **TimePartitionOrigin**:数据库时间分区的起始时间戳。 -8. **TimePartitionInterval**:数据库的时间分区间隔(单位:毫秒)。 -9. **ReadConsistencyLevel**:读取操作的一致性级别。 -10. **SchemaRegionPerDataNode**:数据节点(DataNode)上的元数据分区(SchemaRegion)数量。 -11. **DataRegionPerDataNode**:数据节点(DataNode)上的数据分区(DataRegion)数量。 -12. **SeriesSlotNum**:数据分区(DataRegion)的序列槽(SeriesSlot)数量。 -13. **SeriesSlotExecutorClass**:序列槽的实现类。 -14. **DiskSpaceWarningThreshold**:磁盘空间告警阈值(单位:百分比)。 -15. **TimestampPrecision**:时间戳精度。 - -#### 示例: - -```SQL -IoTDB> SHOW VARIABLES -``` - -执行结果如下: - -```SQL -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.4 查看数据库当前时间 - -#### 语法: - -**含义**:返回数据库当前时间。 - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP -``` - -执行结果如下: - -```SQL -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - -### 1.5 查看正在执行的查询信息 - -**含义**:用于显示所有正在执行的查询信息。 - -#### 语法: - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**参数解释**: - -1. **WHERE** 子句:需保证过滤的目标列是结果集中存在的列 -2. **ORDER BY** 子句:需保证`sortKey`是结果集中存在的列 -3. **limitOffsetClause**: - - **含义**:用于限制结果集的返回数量。 - - **格式**:`LIMIT , `, `` 是偏移量,`` 是返回的行数。 -4. **QUERIES** 表中的列: - - **time**:查询开始的时间戳,时间戳精度与系统精度一致 - - **queryid**:查询语句的 ID - - **datanodeid**:发起查询语句的 DataNode 的ID - - **elapsedtime**:查询的执行耗时,单位是秒 - - **statement**:查询的 SQL 语句 - - -#### 示例: - -```SQL -IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 -``` - -执行结果如下: - -```SQL -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -| Time| QueryId|DataNodeId|ElapsedTime| Statement| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -``` - - -### 1.6 查看分区信息 - -**含义**:返回当前集群的分区信息。 - -#### 语法: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW REGIONS -``` - -执行结果如下: - -```SQL -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | -| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| -| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | -| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.7 查看可用节点 - -**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 - -> V2.0.8 起支持该功能 - -#### 语法: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -执行结果如下: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. 状态设置 - -### 2.1 设置连接的模型 - -**含义**:将当前连接的 sql_dialect 置为树模型/表模型,在树模型和表模型中均可使用该命令。 - -#### 语法: - -```SQL -SET SQL_DIALECT EQ (TABLE | TREE) -``` - -#### 示例: - -```SQL -IoTDB> SET SQL_DIALECT=TREE -IoTDB> SHOW CURRENT_SQL_DIALECT -``` - -执行结果如下: - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 2.2 更新配置项 - -**含义**:用于更新配置项,执行完成后会进行配置项的热加载,对于支持热修改的配置项会立即生效。 - -#### 语法: - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**参数解释**: - -1. **propertyAssignments** - - **含义**:更新的配置列表,由多个 `property` 组成。 - - 可以更新多个配置列表,用逗号分隔。 - - **取值**: - - `DEFAULT`:将配置项恢复为默认值。 - - `expression`:具体的值,必须是一个字符串。 -2. **ON INTEGER_VALUE** - - **含义**:指定要更新配置的节点 ID。 - - **可选性**:可选。如果不指定或指定的值低于 0,则更新所有 ConfigNode 和 DataNode 的配置。 - -#### 示例: - -```SQL -IoTDB> SET CONFIGURATION 'disk_space_warning_threshold'='0.05','heartbeat_interval_in_ms'='1000' ON 1; -``` - -### 2.3 读取手动修改的配置文件 - -**含义**:用于读取手动修改过的配置文件,并对配置项进行热加载,对于支持热修改的配置项会立即生效。 - -#### 语法: - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **localOrClusterMode** - - **含义**:指定配置热加载的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `LOCAL`:只对客户端直连的 DataNode 进行配置热加载。 - - `CLUSTER`:对集群中所有 DataNode 进行配置热加载。 - -#### 示例: - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 设置系统的状态 - -**含义**:用于设置系统的状态。 - -#### 语法: - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **RUNNING | READONLY** - - **含义**:指定系统的新状态。 - - **取值**: - - `RUNNING`:将系统设置为运行状态,允许读写操作。 - - `READONLY`:将系统设置为只读状态,只允许读取操作,禁止写入操作。 -2. **localOrClusterMode** - - **含义**:指定状态变更的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `LOCAL`:仅对客户端直连的 DataNode 生效。 - - `CLUSTER`:对集群中所有 DataNode 生效。 - -#### 示例: - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - - -## 3. 数据管理 - -### 3.1 刷写内存表中的数据到磁盘 - -**含义**:将内存表中的数据刷写到磁盘上。 - -#### 语法: - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **identifier** - - **含义**:指定要刷写的路径名称。 - - **可选性**:可选。如果不指定,则默认刷写所有路径。 - - **多个路径**:可以指定多个路径名称,用逗号分隔。例如:`FLUSH root.ln, root.lnm`。 -2. **booleanValue** - - **含义**:指定刷写的内容。 - - **可选性**:可选。如果不指定,则默认刷写顺序和乱序空间的内存。 - - **取值**: - - `TRUE`:只刷写顺序空间的内存表。 - - `FALSE`:只刷写乱序空间的MemTable。 -3. **localOrClusterMode** - - **含义**:指定刷写的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `ON LOCAL`:只刷写客户端直连的 DataNode 上的内存表。 - - `ON CLUSTER`:刷写集群中所有 DataNode 上的内存表。 - -#### 示例: - -```SQL -IoTDB> FLUSH root.ln TRUE ON LOCAL; -``` - -## 4. 数据修复 - -### 4.1 启动后台扫描并修复 tsfile 任务 - -**含义**:启动一个后台任务,开始扫描并修复 tsfile,能够修复数据文件内的时间戳乱序类异常。 - -#### 语法: - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **localOrClusterMode** - - **含义**:指定数据修复的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 - - `ON CLUSTER`:对集群中所有 DataNode 执行。 - -#### 示例: - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 暂停后台修复 tsfile 任务 - -**含义**:暂停后台的修复任务,暂停中的任务可通过再次执行 start repair data 命令恢复。 - -#### 语法: - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **localOrClusterMode** - - **含义**:指定数据修复的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 - - `ON CLUSTER`:对集群中所有 DataNode 执行。 - -#### 示例: - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. 终止查询 - -### 5.1 主动终止查询 - -**含义**:使用该命令主动地终止查询。 - -#### 语法: - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**参数解释**: - -1. **QUERY queryId=string** - - **含义**:指定要终止的查询的 ID。 `` 是正在执行的查询的唯一标识符。 - - **获取查询 ID**:可以通过 `SHOW QUERIES` 命令获取所有正在执行的查询及其 ID。 -2. **ALL QUERIES** - - **含义**:终止所有正在执行的查询。 - -#### 示例: - -通过指定 `queryId` 可以中止指定的查询,为了获取正在执行的查询 id,用户可以使用 show queries 命令,该命令将显示所有正在执行的查询列表。 - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query -IoTDB> KILL ALL QUERIES; -- 终止所有query -``` \ No newline at end of file +--> \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement_apache.md b/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement_apache.md new file mode 100644 index 000000000..45b37b014 --- /dev/null +++ b/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement_apache.md @@ -0,0 +1,558 @@ + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看连接的模型 + +**含义**:返回当前连接的 sql_dialect 是树模型/表模型。 + +#### 语法: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 查看集群版本 + +**含义**:返回当前集群的版本。 + +#### 语法: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW VERSION +``` + +执行结果如下: + +```SQL ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 查看集群关键参数 + +**含义**:返回当前集群的关键参数。 + +#### 语法: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +关键参数如下: + +1. **ClusterName**:当前集群的名称。 +2. **DataReplicationFactor**:数据副本的数量,表示每个数据分区(DataRegion)的副本数。 +3. **SchemaReplicationFactor**:元数据副本的数量,表示每个元数据分区(SchemaRegion)的副本数。 +4. **DataRegionConsensusProtocolClass**:数据分区(DataRegion)使用的共识协议类。 +5. **SchemaRegionConsensusProtocolClass**:元数据分区(SchemaRegion)使用的共识协议类。 +6. **ConfigNodeConsensusProtocolClass**:配置节点(ConfigNode)使用的共识协议类。 +7. **TimePartitionOrigin**:数据库时间分区的起始时间戳。 +8. **TimePartitionInterval**:数据库的时间分区间隔(单位:毫秒)。 +9. **ReadConsistencyLevel**:读取操作的一致性级别。 +10. **SchemaRegionPerDataNode**:数据节点(DataNode)上的元数据分区(SchemaRegion)数量。 +11. **DataRegionPerDataNode**:数据节点(DataNode)上的数据分区(DataRegion)数量。 +12. **SeriesSlotNum**:数据分区(DataRegion)的序列槽(SeriesSlot)数量。 +13. **SeriesSlotExecutorClass**:序列槽的实现类。 +14. **DiskSpaceWarningThreshold**:磁盘空间告警阈值(单位:百分比)。 +15. **TimestampPrecision**:时间戳精度。 + +#### 示例: + +```SQL +IoTDB> SHOW VARIABLES +``` + +执行结果如下: + +```SQL ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 查看数据库当前时间 + +#### 语法: + +**含义**:返回数据库当前时间。 + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP +``` + +执行结果如下: + +```SQL ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 查看正在执行的查询信息 + +**含义**:用于显示所有正在执行的查询信息。 + +#### 语法: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**参数解释**: + +1. **WHERE** 子句:需保证过滤的目标列是结果集中存在的列 +2. **ORDER BY** 子句:需保证`sortKey`是结果集中存在的列 +3. **limitOffsetClause**: + - **含义**:用于限制结果集的返回数量。 + - **格式**:`LIMIT , `, `` 是偏移量,`` 是返回的行数。 +4. **QUERIES** 表中的列: + - **time**:查询开始的时间戳,时间戳精度与系统精度一致 + - **queryid**:查询语句的 ID + - **datanodeid**:发起查询语句的 DataNode 的ID + - **elapsedtime**:查询的执行耗时,单位是秒 + - **statement**:查询的 SQL 语句 + + +#### 示例: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +执行结果如下: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + + +### 1.6 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8-beta 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的模型 + +**含义**:将当前连接的 sql_dialect 置为树模型/表模型,在树模型和表模型中均可使用该命令。 + +#### 语法: + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +#### 示例: + +```SQL +IoTDB> SET SQL_DIALECT=TREE +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**含义**:用于更新配置项,执行完成后会进行配置项的热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**参数解释**: + +1. **propertyAssignments** + - **含义**:更新的配置列表,由多个 `property` 组成。 + - 可以更新多个配置列表,用逗号分隔。 + - **取值**: + - `DEFAULT`:将配置项恢复为默认值。 + - `expression`:具体的值,必须是一个字符串。 +2. **ON INTEGER_VALUE** + - **含义**:指定要更新配置的节点 ID。 + - **可选性**:可选。如果不指定或指定的值低于 0,则更新所有 ConfigNode 和 DataNode 的配置。 + +#### 示例: + +```SQL +IoTDB> SET CONFIGURATION 'disk_space_warning_threshold'='0.05','heartbeat_interval_in_ms'='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**含义**:用于读取手动修改过的配置文件,并对配置项进行热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定配置热加载的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:只对客户端直连的 DataNode 进行配置热加载。 + - `CLUSTER`:对集群中所有 DataNode 进行配置热加载。 + +#### 示例: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**含义**:用于设置系统的状态。 + +#### 语法: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **RUNNING | READONLY** + - **含义**:指定系统的新状态。 + - **取值**: + - `RUNNING`:将系统设置为运行状态,允许读写操作。 + - `READONLY`:将系统设置为只读状态,只允许读取操作,禁止写入操作。 +2. **localOrClusterMode** + - **含义**:指定状态变更的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:仅对客户端直连的 DataNode 生效。 + - `CLUSTER`:对集群中所有 DataNode 生效。 + +#### 示例: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + + +## 3. 数据管理 + +### 3.1 刷写内存表中的数据到磁盘 + +**含义**:将内存表中的数据刷写到磁盘上。 + +#### 语法: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **identifier** + - **含义**:指定要刷写的路径名称。 + - **可选性**:可选。如果不指定,则默认刷写所有路径。 + - **多个路径**:可以指定多个路径名称,用逗号分隔。例如:`FLUSH root.ln, root.lnm`。 +2. **booleanValue** + - **含义**:指定刷写的内容。 + - **可选性**:可选。如果不指定,则默认刷写顺序和乱序空间的内存。 + - **取值**: + - `TRUE`:只刷写顺序空间的内存表。 + - `FALSE`:只刷写乱序空间的MemTable。 +3. **localOrClusterMode** + - **含义**:指定刷写的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:只刷写客户端直连的 DataNode 上的内存表。 + - `ON CLUSTER`:刷写集群中所有 DataNode 上的内存表。 + +#### 示例: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**含义**:启动一个后台任务,开始扫描并修复 tsfile,能够修复数据文件内的时间戳乱序类异常。 + +#### 语法: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**含义**:暂停后台的修复任务,暂停中的任务可通过再次执行 start repair data 命令恢复。 + +#### 语法: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 终止查询 + +### 5.1 主动终止查询 + +**含义**:使用该命令主动地终止查询。 + +#### 语法: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**参数解释**: + +1. **QUERY queryId=string** + - **含义**:指定要终止的查询的 ID。 `` 是正在执行的查询的唯一标识符。 + - **获取查询 ID**:可以通过 `SHOW QUERIES` 命令获取所有正在执行的查询及其 ID。 +2. **ALL QUERIES** + - **含义**:终止所有正在执行的查询。 + +#### 示例: + +通过指定 `queryId` 可以中止指定的查询,为了获取正在执行的查询 id,用户可以使用 show queries 命令,该命令将显示所有正在执行的查询列表。 + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement_timecho.md b/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement_timecho.md new file mode 100644 index 000000000..93259a49a --- /dev/null +++ b/src/zh/UserGuide/Master/Tree/User-Manual/Maintenance-statement_timecho.md @@ -0,0 +1,558 @@ + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看连接的模型 + +**含义**:返回当前连接的 sql_dialect 是树模型/表模型。 + +#### 语法: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 查看集群版本 + +**含义**:返回当前集群的版本。 + +#### 语法: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW VERSION +``` + +执行结果如下: + +```SQL ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 查看集群关键参数 + +**含义**:返回当前集群的关键参数。 + +#### 语法: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +关键参数如下: + +1. **ClusterName**:当前集群的名称。 +2. **DataReplicationFactor**:数据副本的数量,表示每个数据分区(DataRegion)的副本数。 +3. **SchemaReplicationFactor**:元数据副本的数量,表示每个元数据分区(SchemaRegion)的副本数。 +4. **DataRegionConsensusProtocolClass**:数据分区(DataRegion)使用的共识协议类。 +5. **SchemaRegionConsensusProtocolClass**:元数据分区(SchemaRegion)使用的共识协议类。 +6. **ConfigNodeConsensusProtocolClass**:配置节点(ConfigNode)使用的共识协议类。 +7. **TimePartitionOrigin**:数据库时间分区的起始时间戳。 +8. **TimePartitionInterval**:数据库的时间分区间隔(单位:毫秒)。 +9. **ReadConsistencyLevel**:读取操作的一致性级别。 +10. **SchemaRegionPerDataNode**:数据节点(DataNode)上的元数据分区(SchemaRegion)数量。 +11. **DataRegionPerDataNode**:数据节点(DataNode)上的数据分区(DataRegion)数量。 +12. **SeriesSlotNum**:数据分区(DataRegion)的序列槽(SeriesSlot)数量。 +13. **SeriesSlotExecutorClass**:序列槽的实现类。 +14. **DiskSpaceWarningThreshold**:磁盘空间告警阈值(单位:百分比)。 +15. **TimestampPrecision**:时间戳精度。 + +#### 示例: + +```SQL +IoTDB> SHOW VARIABLES +``` + +执行结果如下: + +```SQL ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 查看数据库当前时间 + +#### 语法: + +**含义**:返回数据库当前时间。 + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP +``` + +执行结果如下: + +```SQL ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 查看正在执行的查询信息 + +**含义**:用于显示所有正在执行的查询信息。 + +#### 语法: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**参数解释**: + +1. **WHERE** 子句:需保证过滤的目标列是结果集中存在的列 +2. **ORDER BY** 子句:需保证`sortKey`是结果集中存在的列 +3. **limitOffsetClause**: + - **含义**:用于限制结果集的返回数量。 + - **格式**:`LIMIT , `, `` 是偏移量,`` 是返回的行数。 +4. **QUERIES** 表中的列: + - **time**:查询开始的时间戳,时间戳精度与系统精度一致 + - **queryid**:查询语句的 ID + - **datanodeid**:发起查询语句的 DataNode 的ID + - **elapsedtime**:查询的执行耗时,单位是秒 + - **statement**:查询的 SQL 语句 + + +#### 示例: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +执行结果如下: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + + +### 1.6 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的模型 + +**含义**:将当前连接的 sql_dialect 置为树模型/表模型,在树模型和表模型中均可使用该命令。 + +#### 语法: + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +#### 示例: + +```SQL +IoTDB> SET SQL_DIALECT=TREE +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**含义**:用于更新配置项,执行完成后会进行配置项的热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**参数解释**: + +1. **propertyAssignments** + - **含义**:更新的配置列表,由多个 `property` 组成。 + - 可以更新多个配置列表,用逗号分隔。 + - **取值**: + - `DEFAULT`:将配置项恢复为默认值。 + - `expression`:具体的值,必须是一个字符串。 +2. **ON INTEGER_VALUE** + - **含义**:指定要更新配置的节点 ID。 + - **可选性**:可选。如果不指定或指定的值低于 0,则更新所有 ConfigNode 和 DataNode 的配置。 + +#### 示例: + +```SQL +IoTDB> SET CONFIGURATION 'disk_space_warning_threshold'='0.05','heartbeat_interval_in_ms'='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**含义**:用于读取手动修改过的配置文件,并对配置项进行热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定配置热加载的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:只对客户端直连的 DataNode 进行配置热加载。 + - `CLUSTER`:对集群中所有 DataNode 进行配置热加载。 + +#### 示例: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**含义**:用于设置系统的状态。 + +#### 语法: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **RUNNING | READONLY** + - **含义**:指定系统的新状态。 + - **取值**: + - `RUNNING`:将系统设置为运行状态,允许读写操作。 + - `READONLY`:将系统设置为只读状态,只允许读取操作,禁止写入操作。 +2. **localOrClusterMode** + - **含义**:指定状态变更的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:仅对客户端直连的 DataNode 生效。 + - `CLUSTER`:对集群中所有 DataNode 生效。 + +#### 示例: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + + +## 3. 数据管理 + +### 3.1 刷写内存表中的数据到磁盘 + +**含义**:将内存表中的数据刷写到磁盘上。 + +#### 语法: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **identifier** + - **含义**:指定要刷写的路径名称。 + - **可选性**:可选。如果不指定,则默认刷写所有路径。 + - **多个路径**:可以指定多个路径名称,用逗号分隔。例如:`FLUSH root.ln, root.lnm`。 +2. **booleanValue** + - **含义**:指定刷写的内容。 + - **可选性**:可选。如果不指定,则默认刷写顺序和乱序空间的内存。 + - **取值**: + - `TRUE`:只刷写顺序空间的内存表。 + - `FALSE`:只刷写乱序空间的MemTable。 +3. **localOrClusterMode** + - **含义**:指定刷写的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:只刷写客户端直连的 DataNode 上的内存表。 + - `ON CLUSTER`:刷写集群中所有 DataNode 上的内存表。 + +#### 示例: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**含义**:启动一个后台任务,开始扫描并修复 tsfile,能够修复数据文件内的时间戳乱序类异常。 + +#### 语法: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**含义**:暂停后台的修复任务,暂停中的任务可通过再次执行 start repair data 命令恢复。 + +#### 语法: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 终止查询 + +### 5.1 主动终止查询 + +**含义**:使用该命令主动地终止查询。 + +#### 语法: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**参数解释**: + +1. **QUERY queryId=string** + - **含义**:指定要终止的查询的 ID。 `` 是正在执行的查询的唯一标识符。 + - **获取查询 ID**:可以通过 `SHOW QUERIES` 命令获取所有正在执行的查询及其 ID。 +2. **ALL QUERIES** + - **含义**:终止所有正在执行的查询。 + +#### 示例: + +通过指定 `queryId` 可以中止指定的查询,为了获取正在执行的查询 id,用户可以使用 show queries 命令,该命令将显示所有正在执行的查询列表。 + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md b/src/zh/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md index e71d71f94..bb3134ee6 100644 --- a/src/zh/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md +++ b/src/zh/UserGuide/latest-Table/API/Programming-Java-Native-API_timecho.md @@ -68,7 +68,7 @@ ITableSession接口定义了与IoTDB交互的基本操作,可以执行数据 **关于 Object 数据类型的说明:** -自 V2.0.8-beta 起,`iTableSession.insert(Tablet tablet)`接口支持将单个 Object 类文件拆成多段后按顺序分段写入。当 Tablet 数据结构中列数据类型为 **`TSDataType.Object`​ ​**时,需要使用如下方法向 Tablet 填值。 +自 V2.0.8 起,`iTableSession.insert(Tablet tablet)`接口支持将单个 Object 类文件拆成多段后按顺序分段写入。当 Tablet 数据结构中列数据类型为 **`TSDataType.Object`​ ​**时,需要使用如下方法向 Tablet 填值。 ```Java /* diff --git a/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type.md b/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type.md index 1f2347c2c..49bc408ab 100644 --- a/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type.md +++ b/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type.md @@ -1,3 +1,6 @@ +--- +redirectTo: Data-Type_apache.html +--- - -# 数据类型 - -## 1. 基本数据类型 - -IoTDB 支持以下十种数据类型: - -* BOOLEAN(布尔值) -* INT32(整型) -* INT64(长整型) -* FLOAT(单精度浮点数) -* DOUBLE(双精度浮点数) -* TEXT(长字符串) -* STRING(字符串) -* BLOB(大二进制对象) -* OBJECT(大二进制对象) - > V2.0.8-beta 版本起支持 -* TIMESTAMP(时间戳) -* DATE(日期) - -其中: -1. STRING 和 TEXT 类型的区别在于,STRING 类型具有更多的统计信息,能够用于优化值过滤查询。TEXT 类型适合用于存储长字符串。 -2. OBJECT 和 BLOB 类型的区别如下: - - | | **OBJECT** | **BLOB** | - | ---------------------- |-------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- | - | 写放大(越低越好) | 低(写放大系数永远为 1) | 高(写放大系数为 2 + 合并次数) | - | 空间放大(越低越好) | 低(merge & release on write) | 高(merge on read and release on compact) | - | 查询结果 | 默认查询 OBJECT 列时,返回结果如`(Object) XX.XX KB)`。
真实 OBJECT 数据存储路径位于:`${data_dir}/object_data`,可通过 `READ_OBJECT` 函数读取其真实内容 | 直接返回真实的二进制内容 | - - -### 1.1 浮点数精度配置 - -对于 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 `RLE`或 `TS_2DIFF`,可以在创建序列时通过 `MAX_POINT_NUMBER` 属性指定浮点数的小数点后位数。 - -例如, -```sql -CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; -``` - -若不指定,系统会按照配置文件 `iotdb-system.properties` 中的 [float_precision](../Reference/System-Config-Manual.md) 项配置(默认为 2 位)。 - -### 1.2 数据类型兼容性 - -当写入数据的类型与序列注册的数据类型不一致时, -- 如果序列数据类型不兼容写入数据类型,系统会给出错误提示。 -- 如果序列数据类型兼容写入数据类型,系统会进行数据类型的自动转换,将写入的数据类型更正为注册序列的类型。 - -各数据类型的兼容情况如下表所示: - -| 序列数据类型 | 支持的写入数据类型 | -|-----------|------------------------------------| -| BOOLEAN | BOOLEAN | -| INT32 | INT32 | -| INT64 | INT32 INT64 TIMESTAMP | -| FLOAT | INT32 FLOAT | -| DOUBLE | INT32 INT64 FLOAT DOUBLE TIMESTAMP | -| TEXT | TEXT STRING | -| STRING | TEXT STRING | -| BLOB | TEXT STRING BLOB | -| OBJECT | OBJECT | -| TIMESTAMP | INT32 INT64 TIMESTAMP | -| DATE | DATE | - -## 2. 时间戳类型 - -时间戳是一个数据到来的时间点,其中包括绝对时间戳和相对时间戳。 - -### 2.1 绝对时间戳 - -IOTDB 中绝对时间戳分为二种,一种为 LONG 类型,一种为 DATETIME 类型(包含 DATETIME-INPUT, DATETIME-DISPLAY 两个小类)。 - -在用户在输入时间戳时,可以使用 LONG 类型的时间戳或 DATETIME-INPUT 类型的时间戳,其中 DATETIME-INPUT 类型的时间戳支持格式如表所示: - -
- -**DATETIME-INPUT 类型支持格式** - - -| format | -| :--------------------------- | -| yyyy-MM-dd HH:mm:ss | -| yyyy/MM/dd HH:mm:ss | -| yyyy.MM.dd HH:mm:ss | -| yyyy-MM-dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ssZZ | -| yyyy.MM.dd HH:mm:ssZZ | -| yyyy/MM/dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSS | -| yyyy.MM.dd HH:mm:ss.SSS | -| yyyy-MM-dd HH:mm:ss.SSSZZ | -| yyyy/MM/dd HH:mm:ss.SSSZZ | -| yyyy.MM.dd HH:mm:ss.SSSZZ | -| ISO8601 standard time format | - - -
- - -IoTDB 在显示时间戳时可以支持 LONG 类型以及 DATETIME-DISPLAY 类型,其中 DATETIME-DISPLAY 类型可以支持用户自定义时间格式。自定义时间格式的语法如表所示: - -
- -**DATETIME-DISPLAY 自定义时间格式的语法** - - -| Symbol | Meaning | Presentation | Examples | -| :----: | :-------------------------: | :----------: | :--------------------------------: | -| G | era | era | era | -| C | century of era (>=0) | number | 20 | -| Y | year of era (>=0) | year | 1996 | -| | | | | -| x | weekyear | year | 1996 | -| w | week of weekyear | number | 27 | -| e | day of week | number | 2 | -| E | day of week | text | Tuesday; Tue | -| | | | | -| y | year | year | 1996 | -| D | day of year | number | 189 | -| M | month of year | month | July; Jul; 07 | -| d | day of month | number | 10 | -| | | | | -| a | halfday of day | text | PM | -| K | hour of halfday (0~11) | number | 0 | -| h | clockhour of halfday (1~12) | number | 12 | -| | | | | -| H | hour of day (0~23) | number | 0 | -| k | clockhour of day (1~24) | number | 24 | -| m | minute of hour | number | 30 | -| s | second of minute | number | 55 | -| S | fraction of second | millis | 978 | -| | | | | -| z | time zone | text | Pacific Standard Time; PST | -| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | -| | | | | -| ' | escape for text | delimiter | | -| '' | single quote | literal | ' | - -
- -### 2.2 相对时间戳 - - 相对时间是指与服务器时间```now()```和```DATETIME```类型时间相差一定时间间隔的时间。 - 形式化定义为: - - ``` - Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ - RelativeTime = (now() | DATETIME) ((+|-) Duration)+ - ``` - -
- - **The syntax of the duration unit** - - - | Symbol | Meaning | Presentation | Examples | - | :----: | :---------: | :----------------------: | :------: | - | y | year | 1y=365 days | 1y | - | mo | month | 1mo=30 days | 1mo | - | w | week | 1w=7 days | 1w | - | d | day | 1d=1 day | 1d | - | | | | | - | h | hour | 1h=3600 seconds | 1h | - | m | minute | 1m=60 seconds | 1m | - | s | second | 1s=1 second | 1s | - | | | | | - | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | - | us | microsecond | 1us=1000 nanoseconds | 1us | - | ns | nanosecond | 1ns=1 nanosecond | 1ns | - -
- - 例子: - - ``` - now() - 1d2h //比服务器时间早 1 天 2 小时的时间 - now() - 1w //比服务器时间早 1 周的时间 - ``` - - > 注意:'+'和'-'的左右两边必须有空格 \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type_apache.md b/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type_apache.md new file mode 100644 index 000000000..1ee9da48d --- /dev/null +++ b/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type_apache.md @@ -0,0 +1,200 @@ + + +# 数据类型 + +## 1. 基本数据类型 + +IoTDB 支持以下十种数据类型: + +* BOOLEAN(布尔值) +* INT32(整型) +* INT64(长整型) +* FLOAT(单精度浮点数) +* DOUBLE(双精度浮点数) +* TEXT(长字符串) +* STRING(字符串) +* BLOB(大二进制对象) +* OBJECT(大二进制对象) + > V2.0.8-beta 版本起支持 +* TIMESTAMP(时间戳) +* DATE(日期) + +其中: +1. STRING 和 TEXT 类型的区别在于,STRING 类型具有更多的统计信息,能够用于优化值过滤查询。TEXT 类型适合用于存储长字符串。 +2. OBJECT 和 BLOB 类型的区别如下: + + | | **OBJECT** | **BLOB** | + | ---------------------- |-------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- | + | 写放大(越低越好) | 低(写放大系数永远为 1) | 高(写放大系数为 2 + 合并次数) | + | 空间放大(越低越好) | 低(merge & release on write) | 高(merge on read and release on compact) | + | 查询结果 | 默认查询 OBJECT 列时,返回结果如`(Object) XX.XX KB)`。
真实 OBJECT 数据存储路径位于:`${data_dir}/object_data`,可通过 `READ_OBJECT` 函数读取其真实内容 | 直接返回真实的二进制内容 | + + +### 1.1 浮点数精度配置 + +对于 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 `RLE`或 `TS_2DIFF`,可以在创建序列时通过 `MAX_POINT_NUMBER` 属性指定浮点数的小数点后位数。 + +例如, +```sql +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +若不指定,系统会按照配置文件 `iotdb-system.properties` 中的 [float_precision](../Reference/System-Config-Manual_apache.md) 项配置(默认为 2 位)。 + +### 1.2 数据类型兼容性 + +当写入数据的类型与序列注册的数据类型不一致时, +- 如果序列数据类型不兼容写入数据类型,系统会给出错误提示。 +- 如果序列数据类型兼容写入数据类型,系统会进行数据类型的自动转换,将写入的数据类型更正为注册序列的类型。 + +各数据类型的兼容情况如下表所示: + +| 序列数据类型 | 支持的写入数据类型 | +|-----------|------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32 INT64 TIMESTAMP | +| FLOAT | INT32 FLOAT | +| DOUBLE | INT32 INT64 FLOAT DOUBLE TIMESTAMP | +| TEXT | TEXT STRING | +| STRING | TEXT STRING | +| BLOB | TEXT STRING BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32 INT64 TIMESTAMP | +| DATE | DATE | + +## 2. 时间戳类型 + +时间戳是一个数据到来的时间点,其中包括绝对时间戳和相对时间戳。 + +### 2.1 绝对时间戳 + +IOTDB 中绝对时间戳分为二种,一种为 LONG 类型,一种为 DATETIME 类型(包含 DATETIME-INPUT, DATETIME-DISPLAY 两个小类)。 + +在用户在输入时间戳时,可以使用 LONG 类型的时间戳或 DATETIME-INPUT 类型的时间戳,其中 DATETIME-INPUT 类型的时间戳支持格式如表所示: + +
+ +**DATETIME-INPUT 类型支持格式** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ + +IoTDB 在显示时间戳时可以支持 LONG 类型以及 DATETIME-DISPLAY 类型,其中 DATETIME-DISPLAY 类型可以支持用户自定义时间格式。自定义时间格式的语法如表所示: + +
+ +**DATETIME-DISPLAY 自定义时间格式的语法** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 相对时间戳 + + 相对时间是指与服务器时间```now()```和```DATETIME```类型时间相差一定时间间隔的时间。 + 形式化定义为: + + ``` + Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ + RelativeTime = (now() | DATETIME) ((+|-) Duration)+ + ``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ + 例子: + + ``` + now() - 1d2h //比服务器时间早 1 天 2 小时的时间 + now() - 1w //比服务器时间早 1 周的时间 + ``` + + > 注意:'+'和'-'的左右两边必须有空格 \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type_timecho.md b/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type_timecho.md new file mode 100644 index 000000000..be817a3f1 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/Background-knowledge/Data-Type_timecho.md @@ -0,0 +1,200 @@ + + +# 数据类型 + +## 1. 基本数据类型 + +IoTDB 支持以下十种数据类型: + +* BOOLEAN(布尔值) +* INT32(整型) +* INT64(长整型) +* FLOAT(单精度浮点数) +* DOUBLE(双精度浮点数) +* TEXT(长字符串) +* STRING(字符串) +* BLOB(大二进制对象) +* OBJECT(大二进制对象) + > V2.0.8 版本起支持 +* TIMESTAMP(时间戳) +* DATE(日期) + +其中: +1. STRING 和 TEXT 类型的区别在于,STRING 类型具有更多的统计信息,能够用于优化值过滤查询。TEXT 类型适合用于存储长字符串。 +2. OBJECT 和 BLOB 类型的区别如下: + + | | **OBJECT** | **BLOB** | + | ---------------------- |-------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- | + | 写放大(越低越好) | 低(写放大系数永远为 1) | 高(写放大系数为 2 + 合并次数) | + | 空间放大(越低越好) | 低(merge & release on write) | 高(merge on read and release on compact) | + | 查询结果 | 默认查询 OBJECT 列时,返回结果如`(Object) XX.XX KB)`。
真实 OBJECT 数据存储路径位于:`${data_dir}/object_data`,可通过 `READ_OBJECT` 函数读取其真实内容 | 直接返回真实的二进制内容 | + + +### 1.1 浮点数精度配置 + +对于 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 `RLE`或 `TS_2DIFF`,可以在创建序列时通过 `MAX_POINT_NUMBER` 属性指定浮点数的小数点后位数。 + +例如, +```sql +CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; +``` + +若不指定,系统会按照配置文件 `iotdb-system.properties` 中的 [float_precision](../Reference/System-Config-Manual_timecho.md) 项配置(默认为 2 位)。 + +### 1.2 数据类型兼容性 + +当写入数据的类型与序列注册的数据类型不一致时, +- 如果序列数据类型不兼容写入数据类型,系统会给出错误提示。 +- 如果序列数据类型兼容写入数据类型,系统会进行数据类型的自动转换,将写入的数据类型更正为注册序列的类型。 + +各数据类型的兼容情况如下表所示: + +| 序列数据类型 | 支持的写入数据类型 | +|-----------|------------------------------------| +| BOOLEAN | BOOLEAN | +| INT32 | INT32 | +| INT64 | INT32 INT64 TIMESTAMP | +| FLOAT | INT32 FLOAT | +| DOUBLE | INT32 INT64 FLOAT DOUBLE TIMESTAMP | +| TEXT | TEXT STRING | +| STRING | TEXT STRING | +| BLOB | TEXT STRING BLOB | +| OBJECT | OBJECT | +| TIMESTAMP | INT32 INT64 TIMESTAMP | +| DATE | DATE | + +## 2. 时间戳类型 + +时间戳是一个数据到来的时间点,其中包括绝对时间戳和相对时间戳。 + +### 2.1 绝对时间戳 + +IOTDB 中绝对时间戳分为二种,一种为 LONG 类型,一种为 DATETIME 类型(包含 DATETIME-INPUT, DATETIME-DISPLAY 两个小类)。 + +在用户在输入时间戳时,可以使用 LONG 类型的时间戳或 DATETIME-INPUT 类型的时间戳,其中 DATETIME-INPUT 类型的时间戳支持格式如表所示: + +
+ +**DATETIME-INPUT 类型支持格式** + + +| format | +| :--------------------------- | +| yyyy-MM-dd HH:mm:ss | +| yyyy/MM/dd HH:mm:ss | +| yyyy.MM.dd HH:mm:ss | +| yyyy-MM-dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ssZZ | +| yyyy.MM.dd HH:mm:ssZZ | +| yyyy/MM/dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSS | +| yyyy.MM.dd HH:mm:ss.SSS | +| yyyy-MM-dd HH:mm:ss.SSSZZ | +| yyyy/MM/dd HH:mm:ss.SSSZZ | +| yyyy.MM.dd HH:mm:ss.SSSZZ | +| ISO8601 standard time format | + + +
+ + +IoTDB 在显示时间戳时可以支持 LONG 类型以及 DATETIME-DISPLAY 类型,其中 DATETIME-DISPLAY 类型可以支持用户自定义时间格式。自定义时间格式的语法如表所示: + +
+ +**DATETIME-DISPLAY 自定义时间格式的语法** + + +| Symbol | Meaning | Presentation | Examples | +| :----: | :-------------------------: | :----------: | :--------------------------------: | +| G | era | era | era | +| C | century of era (>=0) | number | 20 | +| Y | year of era (>=0) | year | 1996 | +| | | | | +| x | weekyear | year | 1996 | +| w | week of weekyear | number | 27 | +| e | day of week | number | 2 | +| E | day of week | text | Tuesday; Tue | +| | | | | +| y | year | year | 1996 | +| D | day of year | number | 189 | +| M | month of year | month | July; Jul; 07 | +| d | day of month | number | 10 | +| | | | | +| a | halfday of day | text | PM | +| K | hour of halfday (0~11) | number | 0 | +| h | clockhour of halfday (1~12) | number | 12 | +| | | | | +| H | hour of day (0~23) | number | 0 | +| k | clockhour of day (1~24) | number | 24 | +| m | minute of hour | number | 30 | +| s | second of minute | number | 55 | +| S | fraction of second | millis | 978 | +| | | | | +| z | time zone | text | Pacific Standard Time; PST | +| Z | time zone offset/id | zone | -0800; -08:00; America/Los_Angeles | +| | | | | +| ' | escape for text | delimiter | | +| '' | single quote | literal | ' | + +
+ +### 2.2 相对时间戳 + + 相对时间是指与服务器时间```now()```和```DATETIME```类型时间相差一定时间间隔的时间。 + 形式化定义为: + + ``` + Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ + RelativeTime = (now() | DATETIME) ((+|-) Duration)+ + ``` + +
+ + **The syntax of the duration unit** + + + | Symbol | Meaning | Presentation | Examples | + | :----: | :---------: | :----------------------: | :------: | + | y | year | 1y=365 days | 1y | + | mo | month | 1mo=30 days | 1mo | + | w | week | 1w=7 days | 1w | + | d | day | 1d=1 day | 1d | + | | | | | + | h | hour | 1h=3600 seconds | 1h | + | m | minute | 1m=60 seconds | 1m | + | s | second | 1s=1 second | 1s | + | | | | | + | ms | millisecond | 1ms=1000_000 nanoseconds | 1ms | + | us | microsecond | 1us=1000 nanoseconds | 1us | + | ns | nanosecond | 1ns=1 nanosecond | 1ns | + +
+ + 例子: + + ``` + now() - 1d2h //比服务器时间早 1 天 2 小时的时间 + now() - 1w //比服务器时间早 1 周的时间 + ``` + + > 注意:'+'和'-'的左右两边必须有空格 \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md index 811f816d5..f43389f74 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_apache.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md index 53309d6aa..48c47bc3d 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Query-Data_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_timecho.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md index f5190f8ed..6e7bb9e8f 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Write-Updata-Data_timecho.md @@ -293,7 +293,7 @@ It costs 0.014s 自为了避免单个 Object 过大导致写入请求过大,Object 类型的值支持拆分后按顺序分段写入。SQL 中需要使用 `to_object(isEOF, offset, content)` 函数进行值填充。 -> V2.0.8-beta 版本起支持 +> V2.0.8 版本起支持 **语法:** diff --git a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_apache.md b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_apache.md index 5ca9b5462..2766e74a6 100644 --- a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_apache.md +++ b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_apache.md @@ -146,7 +146,7 @@ -> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual.md#_2-2-datanode-env-sh-bat) +> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual_apache.md#_2-2-datanode-env-sh-bat) ## 3. 存储(磁盘) ### 3.1 存储空间 diff --git a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_timecho.md b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_timecho.md index 0934dbcd7..4bf223c79 100644 --- a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_timecho.md +++ b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/Database-Resources_timecho.md @@ -146,7 +146,7 @@ -> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual.md#_2-2-datanode-env-sh-bat) +> 提供灵活的内存配置选项,用户可在datanode-env文件中进行调整,详细信息和配置指南请参见 [datanode-env](../Reference/System-Config-Manual_timecho.md#_2-2-datanode-env-sh-bat) ## 3. 存储(磁盘) ### 3.1 存储空间 diff --git a/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual.md b/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual.md index 672829976..660b55b42 100644 --- a/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual.md +++ b/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual.md @@ -1,3 +1,6 @@ +--- +redirectTo: System-Config-Manual_apache.html +--- - -# 配置参数 - -IoTDB 配置文件位于 IoTDB 安装目录:`conf`文件夹下。 - -- `confignode-env.sh/bat`:环境配置项的配置文件,可以配置 ConfigNode 的内存大小。 -- `datanode-env.sh/bat`:环境配置项的配置文件,可以配置 DataNode 的内存大小。 -- `iotdb-system.properties`:IoTDB 的配置文件。 -- `iotdb-system.properties.template`:IoTDB 的配置文件模版。 - -## 1. 修改配置: - -在 `iotdb-system.properties` 文件中已存在的参数可以直接进行修改。对于那些在 `iotdb-system.properties` 中未列出的参数,可以从 `iotdb-system.properties.template` 配置文件模板中找到相应的参数,然后将其复制到 `iotdb-system.properties` 文件中进行修改。 - -### 1.1 改后生效方式 - -不同的配置参数有不同的生效方式,分为以下三种: - -- 仅允许在第一次启动服务前修改: 在第一次启动 ConfigNode/DataNode 后即禁止修改,修改会导致 ConfigNode/DataNode 无法启动。 -- 重启服务生效: ConfigNode/DataNode 启动后仍可修改,但需要重启 ConfigNode/DataNode 后才生效。 -- 热加载: 可在 ConfigNode/DataNode 运行时修改,修改后通过 Session 或 Cli 发送 `load configuration` 或 `set configuration key1 = 'value1'` 命令(SQL)至 IoTDB 使配置生效。 - -## 2. 环境配置项 - -### 2.1 confignode-env.sh/bat - -环境配置项主要用于对 ConfigNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。ConfigNode 启动时,此部分配置会被传给 JVM,详细配置项说明如下: - -- MEMORY_SIZE - -| 名字 | MEMORY_SIZE | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB ConfigNode 启动时分配的内存大小 | -| 类型 | String | -| 默认值 | 取决于操作系统和机器配置。默认为机器内存的十分之三,最多会被设置为 16G。 | -| 改后生效方式 | 重启服务生效 | - -- ON_HEAP_MEMORY - -| 名字 | ON_HEAP_MEMORY | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB ConfigNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置。 | -| 改后生效方式 | 重启服务生效 | - -- OFF_HEAP_MEMORY - -| 名字 | OFF_HEAP_MEMORY | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB ConfigNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置。 | -| 改后生效方式 | 重启服务生效 | - -### 2.2 datanode-env.sh/bat - -环境配置项主要用于对 DataNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。DataNode/Standalone 启动时,此部分配置会被传给 JVM,详细配置项说明如下: - -- MEMORY_SIZE - -| 名字 | MEMORY_SIZE | -| ------------ | ---------------------------------------------------- | -| 描述 | IoTDB DataNode 启动时分配的内存大小 | -| 类型 | String | -| 默认值 | 取决于操作系统和机器配置。默认为机器内存的二分之一。 | -| 改后生效方式 | 重启服务生效 | - -- ON_HEAP_MEMORY - -| 名字 | ON_HEAP_MEMORY | -| ------------ | ---------------------------------------------------------- | -| 描述 | IoTDB DataNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置。 | -| 改后生效方式 | 重启服务生效 | - -- OFF_HEAP_MEMORY - -| 名字 | OFF_HEAP_MEMORY | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB DataNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | -| 类型 | String | -| 默认值 | 取决于MEMORY_SIZE的配置 | -| 改后生效方式 | 重启服务生效 | - - -## 3. 系统配置项(iotdb-system.properties.template) - -### 3.1 集群管理 - -- cluster_name - -| 名字 | cluster_name | -| -------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 集群名称 | -| 类型 | String | -| 默认值 | default_cluster | -| 修改方式 | CLI 中执行语句 `set configuration cluster_name = 'xxx'` (xxx为希望修改成的集群名称) | -| 注意 | 此修改通过网络分发至每个节点。在网络波动或者有节点宕机的情况下,不保证能够在全部节点修改成功。未修改成功的节点重启时无法加入集群,此时需要手动修改该节点的配置文件中的cluster_name项,再重启。正常情况下,不建议通过手动修改配置文件的方式修改集群名称,不建议通过`load configuration`的方式热加载。 | - -### 3.2 SeedConfigNode 配置 - -- cn_seed_config_node - -| 名字 | cn_seed_config_node | -| ------------ | ------------------------------------------------------------ | -| 描述 | 目标 ConfigNode 地址,ConfigNode 通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 cn_target_config_node_list | -| 类型 | String | -| 默认值 | 127.0.0.1:10710 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_seed_config_node - -| 名字 | dn_seed_config_node | -| ------------ | ------------------------------------------------------------ | -| 描述 | ConfigNode 地址,DataNode 启动时通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 dn_target_config_node_list | -| 类型 | String | -| 默认值 | 127.0.0.1:10710 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -### 3.3 Node RPC 配置 - -- cn_internal_address - -| 名字 | cn_internal_address | -| ------------ | ---------------------------- | -| 描述 | ConfigNode 集群内部地址 | -| 类型 | String | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- cn_internal_port - -| 名字 | cn_internal_port | -| ------------ | ---------------------------- | -| 描述 | ConfigNode 集群服务监听端口 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 10710 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- cn_consensus_port - -| 名字 | cn_consensus_port | -| ------------ | ----------------------------- | -| 描述 | ConfigNode 的共识协议通信端口 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 10720 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_rpc_address - -| 名字 | dn_rpc_address | -| ------------ |----------------| -| 描述 | 客户端 RPC 服务监听地址 | -| 类型 | String | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_port - -| 名字 | dn_rpc_port | -| ------------ | ----------------------- | -| 描述 | Client RPC 服务监听端口 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 6667 | -| 改后生效方式 | 重启服务生效 | - -- dn_internal_address - -| 名字 | dn_internal_address | -| ------------ | ---------------------------- | -| 描述 | DataNode 内网通信地址 | -| 类型 | string | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_internal_port - -| 名字 | dn_internal_port | -| ------------ | ---------------------------- | -| 描述 | DataNode 内网通信端口 | -| 类型 | int | -| 默认值 | 10730 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_mpp_data_exchange_port - -| 名字 | dn_mpp_data_exchange_port | -| ------------ | ---------------------------- | -| 描述 | MPP 数据交换端口 | -| 类型 | int | -| 默认值 | 10740 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_schema_region_consensus_port - -| 名字 | dn_schema_region_consensus_port | -| ------------ | ------------------------------------- | -| 描述 | DataNode 元数据副本的共识协议通信端口 | -| 类型 | int | -| 默认值 | 10750 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_data_region_consensus_port - -| 名字 | dn_data_region_consensus_port | -| ------------ | ----------------------------------- | -| 描述 | DataNode 数据副本的共识协议通信端口 | -| 类型 | int | -| 默认值 | 10760 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- dn_join_cluster_retry_interval_ms - -| 名字 | dn_join_cluster_retry_interval_ms | -| ------------ | --------------------------------- | -| 描述 | DataNode 再次重试加入集群等待时间 | -| 类型 | long | -| 默认值 | 5000 | -| 改后生效方式 | 重启服务生效 | - -### 3.4 副本配置 - -- config_node_consensus_protocol_class - -| 名字 | config_node_consensus_protocol_class | -| ------------ | ------------------------------------------------ | -| 描述 | ConfigNode 副本的共识协议,仅支持 RatisConsensus | -| 类型 | String | -| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- schema_replication_factor - -| 名字 | schema_replication_factor | -| ------------ | ---------------------------------- | -| 描述 | Database 的默认元数据副本数 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 重启服务后对**新的 Database** 生效 | - -- schema_region_consensus_protocol_class - -| 名字 | schema_region_consensus_protocol_class | -| ------------ | ----------------------------------------------------- | -| 描述 | 元数据副本的共识协议,多副本时只能使用 RatisConsensus | -| 类型 | String | -| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- data_replication_factor - -| 名字 | data_replication_factor | -| ------------ | ---------------------------------- | -| 描述 | Database 的默认数据副本数 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 重启服务后对**新的 Database** 生效 | - -- data_region_consensus_protocol_class - -| 名字 | data_region_consensus_protocol_class | -| ------------ | ------------------------------------------------------------ | -| 描述 | 数据副本的共识协议,多副本时可以使用 IoTConsensus 或 RatisConsensus | -| 类型 | String | -| 默认值 | org.apache.iotdb.consensus.iot.IoTConsensus | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -### 3.5 目录配置 - -- cn_system_dir - -| 名字 | cn_system_dir | -| ------------ | ----------------------------------------------------------- | -| 描述 | ConfigNode 系统数据存储路径 | -| 类型 | String | -| 默认值 | data/confignode/system(Windows:data\\configndoe\\system) | -| 改后生效方式 | 重启服务生效 | - -- cn_consensus_dir - -| 名字 | cn_consensus_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | ConfigNode 共识协议数据存储路径 | -| 类型 | String | -| 默认值 | data/confignode/consensus(Windows:data\\configndoe\\consensus) | -| 改后生效方式 | 重启服务生效 | - -- cn_pipe_receiver_file_dir - -| 名字 | cn_pipe_receiver_file_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | ConfigNode中pipe接收者用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | -| 改后生效方式 | 重启服务生效 | - -- dn_system_dir - -| 名字 | dn_system_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 元数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/system(Windows:data\\datanode\\system) | -| 改后生效方式 | 重启服务生效 | - -- dn_data_dirs - -| 名字 | dn_data_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/data(Windows:data\\datanode\\data) | -| 改后生效方式 | 重启服务生效 | - -- dn_multi_dir_strategy - -| 名字 | dn_multi_dir_strategy | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 在 data_dirs 中为 TsFile 选择目录时采用的策略。可使用简单类名或类名全称。系统提供以下三种策略:
1. SequenceStrategy:IoTDB 按顺序选择目录,依次遍历 data_dirs 中的所有目录,并不断轮循;
2. MaxDiskUsableSpaceFirstStrategy:IoTDB 优先选择 data_dirs 中对应磁盘空余空间最大的目录;
您可以通过以下方法完成用户自定义策略:
1. 继承 org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy 类并实现自身的 Strategy 方法;
2. 将实现的类的完整类名(包名加类名,UserDefineStrategyPackage)填写到该配置项;
3. 将该类 jar 包添加到工程中。 | -| 类型 | String | -| 默认值 | SequenceStrategy | -| 改后生效方式 | 热加载 | - -- dn_consensus_dir - -| 名字 | dn_consensus_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 共识层日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/consensus(Windows:data\\datanode\\consensus) | -| 改后生效方式 | 重启服务生效 | - -- dn_wal_dirs - -| 名字 | dn_wal_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 写前日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/wal(Windows:data\\datanode\\wal) | -| 改后生效方式 | 重启服务生效 | - -- dn_tracing_dir - -| 名字 | dn_tracing_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB 追踪根目录路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | datanode/tracing(Windows:datanode\\tracing) | -| 改后生效方式 | 重启服务生效 | - -- dn_sync_dir - -| 名字 | dn_sync_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTDB sync 存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | -| 类型 | String | -| 默认值 | data/datanode/sync(Windows:data\\datanode\\sync) | -| 改后生效方式 | 重启服务生效 | - -- sort_tmp_dir - -| 名字 | sort_tmp_dir | -| ------------ | ------------------------------------------------- | -| 描述 | 用于配置排序操作的临时目录。 | -| 类型 | String | -| 默认值 | data/datanode/tmp(Windows:data\\datanode\\tmp) | -| 改后生效方式 | 重启服务生效 | - -- dn_pipe_receiver_file_dirs - -| 名字 | dn_pipe_receiver_file_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | DataNode中pipe接收者用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | -| 改后生效方式 | 重启服务生效 | - -- iot_consensus_v2_receiver_file_dirs - -| 名字 | iot_consensus_v2_receiver_file_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTConsensus V2中接收者用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | -| 改后生效方式 | 重启服务生效 | - -- iot_consensus_v2_deletion_file_dir - -| 名字 | iot_consensus_v2_deletion_file_dir | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTConsensus V2中删除操作用于存储文件的目录路径。 | -| 类型 | String | -| 默认值 | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | -| 改后生效方式 | 重启服务生效 | - -### 3.6 监控配置 - -- cn_metric_reporter_list - -| 名字 | cn_metric_reporter_list | -| ------------ | -------------------------------------------------- | -| 描述 | confignode中用于配置监控模块的数据需要报告的系统。 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- cn_metric_level - -| 名字 | cn_metric_level | -| ------------ | ------------------------------------------ | -| 描述 | confignode中控制监控模块收集数据的详细程度 | -| 类型 | String | -| 默认值 | IMPORTANT | -| 改后生效方式 | 重启服务生效 | - -- cn_metric_async_collect_period - -| 名字 | cn_metric_async_collect_period | -| ------------ | -------------------------------------------------- | -| 描述 | confignode中某些监控数据异步收集的周期,单位是秒。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- cn_metric_prometheus_reporter_port - -| 名字 | cn_metric_prometheus_reporter_port | -| ------------ | ------------------------------------------------------ | -| 描述 | confignode中Prometheus报告者用于监控数据报告的端口号。 | -| 类型 | int | -| 默认值 | 9091 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_reporter_list - -| 名字 | dn_metric_reporter_list | -| ------------ | ------------------------------------------------ | -| 描述 | DataNode中用于配置监控模块的数据需要报告的系统。 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_level - -| 名字 | dn_metric_level | -| ------------ | ---------------------------------------- | -| 描述 | DataNode中控制监控模块收集数据的详细程度 | -| 类型 | String | -| 默认值 | IMPORTANT | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_async_collect_period - -| 名字 | dn_metric_async_collect_period | -| ------------ | ------------------------------------------------ | -| 描述 | DataNode中某些监控数据异步收集的周期,单位是秒。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_prometheus_reporter_port - -| 名字 | dn_metric_prometheus_reporter_port | -| ------------ | ---------------------------------------------------- | -| 描述 | DataNode中Prometheus报告者用于监控数据报告的端口号。 | -| 类型 | int | -| 默认值 | 9092 | -| 改后生效方式 | 重启服务生效 | - -- dn_metric_internal_reporter_type - -| 名字 | dn_metric_internal_reporter_type | -| ------------ | ------------------------------------------------------------ | -| 描述 | DataNode中监控模块内部报告者的种类,用于内部监控和检查数据是否已经成功写入和刷新。 | -| 类型 | String | -| 默认值 | IOTDB | -| 改后生效方式 | 重启服务生效 | - -### 3.7 SSL 配置 - -- enable_thrift_ssl - -| 名字 | enable_thrift_ssl | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当enable_thrift_ssl配置为true时,将通过dn_rpc_port使用 SSL 加密进行通信 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- enable_https - -| 名字 | enable_https | -| ------------ | ------------------------------ | -| 描述 | REST Service 是否开启 SSL 配置 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- key_store_path - -| 名字 | key_store_path | -| ------------ | -------------- | -| 描述 | ssl证书路径 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- key_store_pwd - -| 名字 | key_store_pwd | -| ------------ | ------------- | -| 描述 | ssl证书密码 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -### 3.8 连接配置 - -- cn_rpc_thrift_compression_enable - -| 名字 | cn_rpc_thrift_compression_enable | -| ------------ | -------------------------------- | -| 描述 | 是否启用 thrift 的压缩机制。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- cn_rpc_max_concurrent_client_num - -| 名字 | cn_rpc_max_concurrent_client_num | -| ------------ |---------------------------------| -| 描述 | 最大连接数。 | -| 类型 | int | -| 默认值 | 3000 | -| 改后生效方式 | 重启服务生效 | - -- cn_connection_timeout_ms - -| 名字 | cn_connection_timeout_ms | -| ------------ | ------------------------ | -| 描述 | 节点连接超时时间 | -| 类型 | int | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- cn_selector_thread_nums_of_client_manager - -| 名字 | cn_selector_thread_nums_of_client_manager | -| ------------ | ----------------------------------------- | -| 描述 | 客户端异步线程管理的选择器线程数量 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- cn_max_client_count_for_each_node_in_client_manager - -| 名字 | cn_max_client_count_for_each_node_in_client_manager | -| ------------ | --------------------------------------------------- | -| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | -| 类型 | int | -| 默认值 | 300 | -| 改后生效方式 | 重启服务生效 | - -- dn_session_timeout_threshold - -| 名字 | dn_session_timeout_threshold | -| ------------ | ---------------------------- | -| 描述 | 最大的会话空闲时间 | -| 类型 | int | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_thrift_compression_enable - -| 名字 | dn_rpc_thrift_compression_enable | -| ------------ | -------------------------------- | -| 描述 | 是否启用 thrift 的压缩机制 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_advanced_compression_enable - -| 名字 | dn_rpc_advanced_compression_enable | -| ------------ | ---------------------------------- | -| 描述 | 是否启用 thrift 的自定制压缩机制 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_selector_thread_count - -| 名字 | rpc_selector_thread_count | -| ------------ | ------------------------- | -| 描述 | rpc 选择器线程数量 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_min_concurrent_client_num - -| 名字 | rpc_min_concurrent_client_num | -| ------------ | ----------------------------- | -| 描述 | 最小连接数 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- dn_rpc_max_concurrent_client_num - -| 名字 | dn_rpc_max_concurrent_client_num | -| ------------ |----------------------------------| -| 描述 | 最大连接数 | -| 类型 | Short Int : [0,65535] | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- dn_thrift_max_frame_size - -| 名字 | dn_thrift_max_frame_size | -| ------------ | ------------------------------------------------------ | -| 描述 | RPC 请求/响应的最大字节数 | -| 类型 | long | -| 默认值 | 536870912 (默认值512MB) | -| 改后生效方式 | 重启服务生效 | - -- dn_thrift_init_buffer_size - -| 名字 | dn_thrift_init_buffer_size | -| ------------ | -------------------------- | -| 描述 | 字节数 | -| 类型 | long | -| 默认值 | 1024 | -| 改后生效方式 | 重启服务生效 | - -- dn_connection_timeout_ms - -| 名字 | dn_connection_timeout_ms | -| ------------ | ------------------------ | -| 描述 | 节点连接超时时间 | -| 类型 | int | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- dn_selector_thread_count_of_client_manager - -| 名字 | dn_selector_thread_count_of_client_manager | -| ------------ | ------------------------------------------------------------ | -| 描述 | selector thread (TAsyncClientManager) nums for async thread in a clientManagerclientManager中异步线程的选择器线程(TAsyncClientManager)编号 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- dn_max_client_count_for_each_node_in_client_manager - -| 名字 | dn_max_client_count_for_each_node_in_client_manager | -| ------------ | --------------------------------------------------- | -| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | -| 类型 | int | -| 默认值 | 300 | -| 改后生效方式 | 重启服务生效 | - -### 3.9 对象存储管理 - -- remote_tsfile_cache_dirs - -| 名字 | remote_tsfile_cache_dirs | -| ------------ | ------------------------ | -| 描述 | 云端存储在本地的缓存目录 | -| 类型 | String | -| 默认值 | data/datanode/data/cache | -| 改后生效方式 | 重启服务生效 | - -- remote_tsfile_cache_page_size_in_kb - -| 名字 | remote_tsfile_cache_page_size_in_kb | -| ------------ | ----------------------------------- | -| 描述 | 云端存储在本地缓存文件的块大小 | -| 类型 | int | -| 默认值 | 20480 | -| 改后生效方式 | 重启服务生效 | - -- remote_tsfile_cache_max_disk_usage_in_mb - -| 名字 | remote_tsfile_cache_max_disk_usage_in_mb | -| ------------ | ---------------------------------------- | -| 描述 | 云端存储本地缓存的最大磁盘占用大小 | -| 类型 | long | -| 默认值 | 51200 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_type - -| 名字 | object_storage_type | -| ------------ | ------------------- | -| 描述 | 云端存储类型 | -| 类型 | String | -| 默认值 | AWS_S3 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_endpoint - -| 名字 | object_storage_endpoint | -| ------------ | ----------------------- | -| 描述 | 云端存储的 endpoint | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_bucket - -| 名字 | object_storage_bucket | -| ------------ | ---------------------- | -| 描述 | 云端存储 bucket 的名称 | -| 类型 | String | -| 默认值 | iotdb_data | -| 改后生效方式 | 重启服务生效 | - -- object_storage_access_key - -| 名字 | object_storage_access_key | -| ------------ | ------------------------- | -| 描述 | 云端存储的验证信息 key | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- object_storage_access_secret - -| 名字 | object_storage_access_secret | -| ------------ | ---------------------------- | -| 描述 | 云端存储的验证信息 secret | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -### 3.10 多级管理 - -- dn_default_space_usage_thresholds - -| 名字 | dn_default_space_usage_thresholds | -| ------------ | ------------------------------------------------------------ | -| 描述 | 定义每个层级数据目录的最小剩余空间比例;当剩余空间少于该比例时,数据会被自动迁移至下一个层级;当最后一个层级的剩余存储空间到低于此阈值时,会将系统置为 READ_ONLY | -| 类型 | double | -| 默认值 | 0.85 | -| 改后生效方式 | 热加载 | - -- dn_tier_full_policy - -| 名字 | dn_tier_full_policy | -| ------------ | ------------------------------------------------------------ | -| 描述 | 如何处理最后一层数据,当其已用空间高于其dn_default_space_usage_threshold时。| -| 类型 | String | -| 默认值 | NULL | -| 改后生效方式 | 热加载 | - -- migrate_thread_count - -| 名字 | migrate_thread_count | -| ------------ | ---------------------------------------- | -| 描述 | DataNode数据目录中迁移操作的线程池大小。 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- tiered_storage_migrate_speed_limit_bytes_per_sec - -| 名字 | tiered_storage_migrate_speed_limit_bytes_per_sec | -| ------------ | ------------------------------------------------ | -| 描述 | 限制不同存储层级之间的数据迁移速度。 | -| 类型 | int | -| 默认值 | 10485760 | -| 改后生效方式 | 热加载 | - -### 3.11 REST服务配置 - -- enable_rest_service - -| 名字 | enable_rest_service | -| ------------ | ------------------- | -| 描述 | 是否开启Rest服务。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- rest_service_port - -| 名字 | rest_service_port | -| ------------ | ------------------ | -| 描述 | Rest服务监听端口号 | -| 类型 | int32 | -| 默认值 | 18080 | -| 改后生效方式 | 重启服务生效 | - -- enable_swagger - -| 名字 | enable_swagger | -| ------------ | --------------------------------- | -| 描述 | 是否启用swagger来展示rest接口信息 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- rest_query_default_row_size_limit - -| 名字 | rest_query_default_row_size_limit | -| ------------ | --------------------------------- | -| 描述 | 一次查询能返回的结果集最大行数 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- cache_expire_in_seconds - -| 名字 | cache_expire_in_seconds | -| ------------ | -------------------------------- | -| 描述 | 用户登录信息缓存的过期时间(秒) | -| 类型 | int32 | -| 默认值 | 28800 | -| 改后生效方式 | 重启服务生效 | - -- cache_max_num - -| 名字 | cache_max_num | -| ------------ | ------------------------ | -| 描述 | 缓存中存储的最大用户数量 | -| 类型 | int32 | -| 默认值 | 100 | -| 改后生效方式 | 重启服务生效 | - -- cache_init_num - -| 名字 | cache_init_num | -| ------------ | -------------- | -| 描述 | 缓存初始容量 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- client_auth - -| 名字 | client_auth | -| ------------ | ---------------------- | -| 描述 | 是否需要客户端身份验证 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- trust_store_path - -| 名字 | trust_store_path | -| ------------ | ----------------------- | -| 描述 | keyStore 密码(非必填) | -| 类型 | String | -| 默认值 | "" | -| 改后生效方式 | 重启服务生效 | - -- trust_store_pwd - -| 名字 | trust_store_pwd | -| ------------ | ------------------------- | -| 描述 | trustStore 密码(非必填) | -| 类型 | String | -| 默认值 | "" | -| 改后生效方式 | 重启服务生效 | - -- idle_timeout_in_seconds - -| 名字 | idle_timeout_in_seconds | -| ------------ | ----------------------- | -| 描述 | SSL 超时时间,单位为秒 | -| 类型 | int32 | -| 默认值 | 5000 | -| 改后生效方式 | 重启服务生效 | - -### 3.12 负载均衡配置 - -- series_slot_num - -| 名字 | series_slot_num | -| ------------ | ---------------------------- | -| 描述 | 序列分区槽数 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- series_partition_executor_class - -| 名字 | series_partition_executor_class | -| ------------ | ------------------------------------------------------------ | -| 描述 | 序列分区哈希函数 | -| 类型 | String | -| 默认值 | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- schema_region_group_extension_policy - -| 名字 | schema_region_group_extension_policy | -| ------------ | ------------------------------------ | -| 描述 | SchemaRegionGroup 的扩容策略 | -| 类型 | string | -| 默认值 | AUTO | -| 改后生效方式 | 重启服务生效 | - -- default_schema_region_group_num_per_database - -| 名字 | default_schema_region_group_num_per_database | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当选用 CUSTOM-SchemaRegionGroup 扩容策略时,此参数为每个 Database 拥有的 SchemaRegionGroup 数量;当选用 AUTO-SchemaRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 SchemaRegionGroup 数量 | -| 类型 | int | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_per_data_node - -| 名字 | schema_region_per_data_node | -| ------------ | -------------------------------------------------- | -| 描述 | 期望每个 DataNode 可管理的 SchemaRegion 的最大数量 | -| 类型 | double | -| 默认值 | 1.0 | -| 改后生效方式 | 重启服务生效 | - -- data_region_group_extension_policy - -| 名字 | data_region_group_extension_policy | -| ------------ | ---------------------------------- | -| 描述 | DataRegionGroup 的扩容策略 | -| 类型 | string | -| 默认值 | AUTO | -| 改后生效方式 | 重启服务生效 | - -- default_data_region_group_num_per_database - -| 名字 | default_data_region_group_per_database | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当选用 CUSTOM-DataRegionGroup 扩容策略时,此参数为每个 Database 拥有的 DataRegionGroup 数量;当选用 AUTO-DataRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 DataRegionGroup 数量 | -| 类型 | int | -| 默认值 | 2 | -| 改后生效方式 | 重启服务生效 | - -- data_region_per_data_node - -| 名字 | data_region_per_data_node | -| ------------ | ------------------------------------------------ | -| 描述 | 期望每个 DataNode 可管理的 DataRegion 的最大数量 | -| 类型 | double | -| 默认值 | CPU 核心数的一半 | -| 改后生效方式 | 重启服务生效 | - -- enable_auto_leader_balance_for_ratis_consensus - -| 名字 | enable_auto_leader_balance_for_ratis_consensus | -| ------------ | ---------------------------------------------- | -| 描述 | 是否为 Ratis 共识协议开启自动均衡 leader 策略 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- enable_auto_leader_balance_for_iot_consensus - -| 名字 | enable_auto_leader_balance_for_iot_consensus | -| ------------ | -------------------------------------------- | -| 描述 | 是否为 IoT 共识协议开启自动均衡 leader 策略 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -### 3.13 集群管理 - -- time_partition_origin - -| 名字 | time_partition_origin | -| ------------ | ------------------------------------------------------------ | -| 描述 | Database 数据时间分区的起始点,即从哪个时间点开始计算时间分区。 | -| 类型 | Long | -| 单位 | 毫秒 | -| 默认值 | 0 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- time_partition_interval - -| 名字 | time_partition_interval | -| ------------ | ------------------------------- | -| 描述 | Database 默认的数据时间分区间隔 | -| 类型 | Long | -| 单位 | 毫秒 | -| 默认值 | 604800000 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- heartbeat_interval_in_ms - -| 名字 | heartbeat_interval_in_ms | -| ------------ | ------------------------ | -| 描述 | 集群节点间的心跳间隔 | -| 类型 | Long | -| 单位 | ms | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- disk_space_warning_threshold - -| 名字 | disk_space_warning_threshold | -| ------------ | ---------------------------- | -| 描述 | DataNode 磁盘剩余阈值 | -| 类型 | double(percentage) | -| 默认值 | 0.05 | -| 改后生效方式 | 重启服务生效 | - -### 3.14 内存控制配置 - -- datanode_memory_proportion - -| 名字 | datanode_memory_proportion | -| ------------ | ---------------------------------------------------- | -| 描述 | 存储引擎、查询引擎、元数据、共识、流处理引擎和空闲内存比例 | -| 类型 | Ratio | -| 默认值 | 3:3:1:1:1:1 | -| 改后生效方式 | 重启服务生效 | - -- schema_memory_proportion - -| 名字 | schema_memory_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | Schema 相关的内存如何在 SchemaRegion、SchemaCache 和 PartitionCache 之间分配 | -| 类型 | Ratio | -| 默认值 | 5:4:1 | -| 改后生效方式 | 重启服务生效 | - -- storage_engine_memory_proportion - -| 名字 | storage_engine_memory_proportion | -| ------------ | -------------------------------- | -| 描述 | 写入和合并占存储内存比例 | -| 类型 | Ratio | -| 默认值 | 8:2 | -| 改后生效方式 | 重启服务生效 | - -- write_memory_proportion - -| 名字 | write_memory_proportion | -| ------------ | -------------------------------------------- | -| 描述 | Memtable 和 TimePartitionInfo 占写入内存比例 | -| 类型 | Ratio | -| 默认值 | 19:1 | -| 改后生效方式 | 重启服务生效 | - -- primitive_array_size - -| 名字 | primitive_array_size | -| ------------ | ---------------------------------------- | -| 描述 | 数组池中的原始数组大小(每个数组的长度) | -| 类型 | int32 | -| 默认值 | 64 | -| 改后生效方式 | 重启服务生效 | - -- chunk_metadata_size_proportion - -| 名字 | chunk_metadata_size_proportion | -| ------------ | -------------------------------------------- | -| 描述 | 在数据压缩过程中,用于存储块元数据的内存比例 | -| 类型 | Double | -| 默认值 | 0.1 | -| 改后生效方式 | 重启服务生效 | - -- flush_proportion - -| 名字 | flush_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 调用flush disk的写入内存比例,默认0.4,若有极高的写入负载力(比如batch=1000),可以设置为低于默认值,比如0.2 | -| 类型 | Double | -| 默认值 | 0.4 | -| 改后生效方式 | 重启服务生效 | - -- buffered_arrays_memory_proportion - -| 名字 | buffered_arrays_memory_proportion | -| ------------ | --------------------------------------- | -| 描述 | 为缓冲数组分配的写入内存比例,默认为0.6 | -| 类型 | Double | -| 默认值 | 0.6 | -| 改后生效方式 | 重启服务生效 | - -- reject_proportion - -| 名字 | reject_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 拒绝插入的写入内存比例,默认0.8,若有极高的写入负载力(比如batch=1000)并且物理内存足够大,它可以设置为高于默认值,如0.9 | -| 类型 | Double | -| 默认值 | 0.8 | -| 改后生效方式 | 重启服务生效 | - -- device_path_cache_proportion - -| 名字 | device_path_cache_proportion | -| ------------ | --------------------------------------------------- | -| 描述 | 在内存中分配给设备路径缓存(DevicePathCache)的比例 | -| 类型 | Double | -| 默认值 | 0.05 | -| 改后生效方式 | 重启服务生效 | - -- write_memory_variation_report_proportion - -| 名字 | write_memory_variation_report_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 如果 DataRegion 的内存增加超过写入可用内存的一定比例,则向系统报告。默认值为0.001 | -| 类型 | Double | -| 默认值 | 0.001 | -| 改后生效方式 | 重启服务生效 | - -- check_period_when_insert_blocked - -| 名字 | check_period_when_insert_blocked | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当插入被拒绝时,等待时间(以毫秒为单位)去再次检查系统,默认为50。若插入被拒绝,读取负载低,可以设置大一些。 | -| 类型 | int32 | -| 默认值 | 50 | -| 改后生效方式 | 重启服务生效 | - -- io_task_queue_size_for_flushing - -| 名字 | io_task_queue_size_for_flushing | -| ------------ | -------------------------------- | -| 描述 | ioTaskQueue 的大小。默认值为10。 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- enable_query_memory_estimation - -| 名字 | enable_query_memory_estimation | -| ------------ | ------------------------------------------------------------ | -| 描述 | 开启后会预估每次查询的内存使用量,如果超过可用内存,会拒绝本次查询 | -| 类型 | bool | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -### 3.15 元数据引擎配置 - -- schema_engine_mode - -| 名字 | schema_engine_mode | -| ------------ | ------------------------------------------------------------ | -| 描述 | 元数据引擎的运行模式,支持 Memory 和 PBTree;PBTree 模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。 | -| 类型 | string | -| 默认值 | Memory | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- partition_cache_size - -| 名字 | partition_cache_size | -| ------------ | ------------------------------ | -| 描述 | 分区信息缓存的最大缓存条目数。 | -| 类型 | Int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- sync_mlog_period_in_ms - -| 名字 | sync_mlog_period_in_ms | -| ------------ | ------------------------------------------------------------ | -| 描述 | mlog定期刷新到磁盘的周期,单位毫秒。如果该参数为0,则表示每次对元数据的更新操作都会被立即写到磁盘上。 | -| 类型 | Int64 | -| 默认值 | 100 | -| 改后生效方式 | 重启服务生效 | - -- tag_attribute_flush_interval - -| 名字 | tag_attribute_flush_interval | -| ------------ | -------------------------------------------------- | -| 描述 | 标签和属性记录的间隔数,达到此记录数量时将强制刷盘 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- tag_attribute_total_size - -| 名字 | tag_attribute_total_size | -| ------------ | ---------------------------------------- | -| 描述 | 每个时间序列标签和属性的最大持久化字节数 | -| 类型 | int32 | -| 默认值 | 700 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- max_measurement_num_of_internal_request - -| 名字 | max_measurement_num_of_internal_request | -| ------------ | ------------------------------------------------------------ | -| 描述 | 一次注册序列请求中若物理量过多,在系统内部执行时将被拆分为若干个轻量级的子请求,每个子请求中的物理量数目不超过此参数设置的最大值。 | -| 类型 | Int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- datanode_schema_cache_eviction_policy - -| 名字 | datanode_schema_cache_eviction_policy | -| ------------ | ----------------------------------------------------- | -| 描述 | 当 Schema 缓存达到其最大容量时,Schema 缓存的淘汰策略 | -| 类型 | String | -| 默认值 | FIFO | -| 改后生效方式 | 重启服务生效 | - -- cluster_timeseries_limit_threshold - -| 名字 | cluster_timeseries_limit_threshold | -| ------------ | ---------------------------------- | -| 描述 | 集群中可以创建的时间序列的最大数量 | -| 类型 | Int32 | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -- cluster_device_limit_threshold - -| 名字 | cluster_device_limit_threshold | -| ------------ | ------------------------------ | -| 描述 | 集群中可以创建的最大设备数量 | -| 类型 | Int32 | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -- database_limit_threshold - -| 名字 | database_limit_threshold | -| ------------ | ------------------------------ | -| 描述 | 集群中可以创建的最大数据库数量 | -| 类型 | Int32 | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -### 3.16 自动推断数据类型 - -- enable_auto_create_schema - -| 名字 | enable_auto_create_schema | -| ------------ | -------------------------------------- | -| 描述 | 当写入的序列不存在时,是否自动创建序列 | -| 取值 | true or false | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- default_storage_group_level - -| 名字 | default_storage_group_level | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当写入的数据不存在且自动创建序列时,若需要创建相应的 database,将序列路径的哪一层当做 database。例如,如果我们接到一个新序列 root.sg0.d1.s2, 并且 level=1, 那么 root.sg0 被视为database(因为 root 是 level 0 层) | -| 取值 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 重启服务生效 | - -- boolean_string_infer_type - -| 名字 | boolean_string_infer_type | -| ------------ | ------------------------------------------ | -| 描述 | "true" 或者 "false" 字符串被推断的数据类型 | -| 取值 | BOOLEAN 或者 TEXT | -| 默认值 | BOOLEAN | -| 改后生效方式 | 热加载 | - -- integer_string_infer_type - -| 名字 | integer_string_infer_type | -| ------------ | --------------------------------- | -| 描述 | 整型字符串推断的数据类型 | -| 取值 | INT32, INT64, FLOAT, DOUBLE, TEXT | -| 默认值 | DOUBLE | -| 改后生效方式 | 热加载 | - -- floating_string_infer_type - -| 名字 | floating_string_infer_type | -| ------------ | ----------------------------- | -| 描述 | "6.7"等字符串被推断的数据类型 | -| 取值 | DOUBLE, FLOAT or TEXT | -| 默认值 | DOUBLE | -| 改后生效方式 | 热加载 | - -- nan_string_infer_type - -| 名字 | nan_string_infer_type | -| ------------ | ---------------------------- | -| 描述 | "NaN" 字符串被推断的数据类型 | -| 取值 | DOUBLE, FLOAT or TEXT | -| 默认值 | DOUBLE | -| 改后生效方式 | 热加载 | - -- default_boolean_encoding - -| 名字 | default_boolean_encoding | -| ------------ | ------------------------ | -| 描述 | BOOLEAN 类型编码格式 | -| 取值 | PLAIN, RLE | -| 默认值 | RLE | -| 改后生效方式 | 热加载 | - -- default_int32_encoding - -| 名字 | default_int32_encoding | -| ------------ | -------------------------------------- | -| 描述 | int32 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| 默认值 | TS_2DIFF | -| 改后生效方式 | 热加载 | - -- default_int64_encoding - -| 名字 | default_int64_encoding | -| ------------ | -------------------------------------- | -| 描述 | int64 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | -| 默认值 | TS_2DIFF | -| 改后生效方式 | 热加载 | - -- default_float_encoding - -| 名字 | default_float_encoding | -| ------------ | ----------------------------- | -| 描述 | float 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | -| 默认值 | GORILLA | -| 改后生效方式 | 热加载 | - -- default_double_encoding - -| 名字 | default_double_encoding | -| ------------ | ----------------------------- | -| 描述 | double 类型编码格式 | -| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | -| 默认值 | GORILLA | -| 改后生效方式 | 热加载 | - -- default_text_encoding - -| 名字 | default_text_encoding | -| ------------ | --------------------- | -| 描述 | text 类型编码格式 | -| 取值 | PLAIN | -| 默认值 | PLAIN | -| 改后生效方式 | 热加载 | - -* boolean_compressor - -| 名字 | boolean_compressor | -| -------------- | ----------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,BOOLEAN 数据类型的压缩方式 (V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* int32_compressor - -| 名字 | int32_compressor | -| -------------- | ------------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,INT32/DATE 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* int64_compressor - -| 名字 | int64_compressor | -| -------------- | ------------------------------------------------------------------------------ | -| 描述 | 启用自动创建模式时,INT64/TIMESTAMP 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* float_compressor - -| 名字 | float_compressor | -| -------------- | -------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,FLOAT 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* double_compressor - -| 名字 | double_compressor | -| -------------- | --------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,DOUBLE 数据类型的压缩方式(V2.0.6 版本开始支持) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -* text_compressor - -| 名字 | text_compressor | -| -------------- | -------------------------------------------------------------------------------- | -| 描述 | 启用自动创建模式时,TEXT/BINARY/BLOB 数据类型的压缩方式(V2.0.6 版本开始支持 ) | -| 类型 | String | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - - - -### 3.17 查询配置 - -- read_consistency_level - -| 名字 | read_consistency_level | -| ------------ | ------------------------------------------------------------ | -| 描述 | 查询一致性等级,取值 “strong” 时从 Leader 副本查询,取值 “weak” 时随机查询一个副本。 | -| 类型 | String | -| 默认值 | strong | -| 改后生效方式 | 重启服务生效 | - -- meta_data_cache_enable - -| 名字 | meta_data_cache_enable | -| ------------ | ------------------------------------------------------------ | -| 描述 | 是否缓存元数据(包括 BloomFilter、Chunk Metadata 和 TimeSeries Metadata。) | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- chunk_timeseriesmeta_free_memory_proportion - -| 名字 | chunk_timeseriesmeta_free_memory_proportion | -| ------------ | ------------------------------------------------------------ | -| 描述 | 读取内存分配比例,BloomFilterCache、ChunkCache、TimeseriesMetadataCache、数据集查询的内存和可用内存的查询。参数形式为a : b : c : d : e,其中a、b、c、d、e为整数。 例如“1 : 1 : 1 : 1 : 1” ,“1 : 100 : 200 : 300 : 400” 。 | -| 类型 | String | -| 默认值 | 1 : 100 : 200 : 300 : 400 | -| 改后生效方式 | 重启服务生效 | - -- enable_last_cache - -| 名字 | enable_last_cache | -| ------------ | ------------------ | -| 描述 | 是否开启最新点缓存 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- mpp_data_exchange_core_pool_size - -| 名字 | mpp_data_exchange_core_pool_size | -| ------------ | -------------------------------- | -| 描述 | MPP 数据交换线程池核心线程数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- mpp_data_exchange_max_pool_size - -| 名字 | mpp_data_exchange_max_pool_size | -| ------------ | ------------------------------- | -| 描述 | MPP 数据交换线程池最大线程数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- mpp_data_exchange_keep_alive_time_in_ms - -| 名字 | mpp_data_exchange_keep_alive_time_in_ms | -| ------------ | --------------------------------------- | -| 描述 | MPP 数据交换最大等待时间 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- driver_task_execution_time_slice_in_ms - -| 名字 | driver_task_execution_time_slice_in_ms | -| ------------ | -------------------------------------- | -| 描述 | 单个 DriverTask 最长执行时间(ms) | -| 类型 | int32 | -| 默认值 | 200 | -| 改后生效方式 | 重启服务生效 | - -- max_tsblock_size_in_bytes - -| 名字 | max_tsblock_size_in_bytes | -| ------------ | ------------------------------- | -| 描述 | 单个 TsBlock 的最大容量(byte) | -| 类型 | int32 | -| 默认值 | 131072 | -| 改后生效方式 | 重启服务生效 | - -- max_tsblock_line_numbers - -| 名字 | max_tsblock_line_numbers | -| ------------ | ------------------------ | -| 描述 | 单个 TsBlock 的最大行数 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- slow_query_threshold - -| 名字 | slow_query_threshold | -| ------------ | ------------------------------ | -| 描述 | 慢查询的时间阈值。单位:毫秒。 | -| 类型 | long | -| 默认值 | 10000 | -| 改后生效方式 | 热加载 | - -- query_cost_stat_window - -| 名字 | query_cost_stat_window | -| ------------ |--------------------| -| 描述 | 查询耗时统计的窗口,单位为分钟。 | -| 类型 | Int32 | -| 默认值 | 0 | -| 改后生效方式 | 热加载 | - -- query_timeout_threshold - -| 名字 | query_timeout_threshold | -| ------------ | -------------------------------- | -| 描述 | 查询的最大执行时间。单位:毫秒。 | -| 类型 | Int32 | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- max_allowed_concurrent_queries - -| 名字 | max_allowed_concurrent_queries | -| ------------ | ------------------------------ | -| 描述 | 允许的最大并发查询数量。 | -| 类型 | Int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- query_thread_count - -| 名字 | query_thread_count | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 IoTDB 对内存中的数据进行查询时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。 | -| 类型 | Int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- degree_of_query_parallelism - -| 名字 | degree_of_query_parallelism | -| ------------ | ------------------------------------------------------------ | -| 描述 | 设置单个查询片段实例将创建的 pipeline 驱动程序数量,也就是查询操作的并行度。 | -| 类型 | Int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- mode_map_size_threshold - -| 名字 | mode_map_size_threshold | -| ------------ | ---------------------------------------------- | -| 描述 | 计算 MODE 聚合函数时,计数映射可以增长到的阈值 | -| 类型 | Int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- batch_size - -| 名字 | batch_size | -| ------------ | ---------------------------------------------------------- | -| 描述 | 服务器中每次迭代的数据量(数据条目,即不同时间戳的数量。) | -| 类型 | Int32 | -| 默认值 | 100000 | -| 改后生效方式 | 重启服务生效 | - -- sort_buffer_size_in_bytes - -| 名字 | sort_buffer_size_in_bytes | -| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 设置外部排序操作中使用的内存缓冲区大小 | -| 类型 | long | -| 默认值 | 1048576(V2.0.6 之前版本)
0(V2.0.6 及之后版本),当值小于等于 0 时,由系统自动进行计算,计算公式为:`sort_buffer_size_in_bytes = Math.min(32 * 1024 * 1024, 堆内内存 * 查询引擎内存比例 * 查询执行内存比例 / 查询线程数 / 2)` | -| 改后生效方式 | 热加载 | - -- merge_threshold_of_explain_analyze - -| 名字 | merge_threshold_of_explain_analyze | -| ------------ | ------------------------------------------------------------ | -| 描述 | 用于设置在 `EXPLAIN ANALYZE` 语句的结果集中操作符(operator)数量的合并阈值。 | -| 类型 | int | -| 默认值 | 10 | -| 改后生效方式 | 热加载 | - -### 3.18 TTL配置 - -- ttl_check_interval - -| 名字 | ttl_check_interval | -| ------------ | -------------------------------------- | -| 描述 | ttl 检查任务的间隔,单位 ms,默认为 2h | -| 类型 | int | -| 默认值 | 7200000 | -| 改后生效方式 | 重启服务生效 | - -- max_expired_time - -| 名字 | max_expired_time | -| ------------ | ------------------------------------------------------------ | -| 描述 | 如果一个文件中存在设备已经过期超过此时间,那么这个文件将被立即整理。单位 ms,默认为一个月 | -| 类型 | int | -| 默认值 | 2592000000 | -| 改后生效方式 | 重启服务生效 | - -- expired_data_ratio - -| 名字 | expired_data_ratio | -| ------------ | ------------------------------------------------------------ | -| 描述 | 过期设备比例。如果一个文件中过期设备的比率超过这个值,那么这个文件中的过期数据将通过 compaction 清理。 | -| 类型 | float | -| 默认值 | 0.3 | -| 改后生效方式 | 重启服务生效 | - -### 3.19 存储引擎配置 - -- timestamp_precision - -| 名字 | timestamp_precision | -| ------------ | ---------------------------- | -| 描述 | 时间戳精度,支持 ms、us、ns | -| 类型 | String | -| 默认值 | ms | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- timestamp_precision_check_enabled - -| 名字 | timestamp_precision_check_enabled | -| ------------ | --------------------------------- | -| 描述 | 用于控制是否启用时间戳精度检查 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- max_waiting_time_when_insert_blocked - -| 名字 | max_waiting_time_when_insert_blocked | -| ------------ | ----------------------------------------------- | -| 描述 | 当插入请求等待超过这个时间,则抛出异常,单位 ms | -| 类型 | Int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- handle_system_error - -| 名字 | handle_system_error | -| ------------ | ------------------------------------ | -| 描述 | 当系统遇到不可恢复的错误时的处理方法 | -| 类型 | String | -| 默认值 | CHANGE_TO_READ_ONLY | -| 改后生效方式 | 重启服务生效 | - -- enable_timed_flush_seq_memtable - -| 名字 | enable_timed_flush_seq_memtable | -| ------------ | ------------------------------- | -| 描述 | 是否开启定时刷盘顺序 memtable | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- seq_memtable_flush_interval_in_ms - -| 名字 | seq_memtable_flush_interval_in_ms | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | -| 类型 | long | -| 默认值 | 600000 | -| 改后生效方式 | 热加载 | - -- seq_memtable_flush_check_interval_in_ms - -| 名字 | seq_memtable_flush_check_interval_in_ms | -| ------------ | ---------------------------------------- | -| 描述 | 检查顺序 memtable 是否需要刷盘的时间间隔 | -| 类型 | long | -| 默认值 | 30000 | -| 改后生效方式 | 热加载 | - -- enable_timed_flush_unseq_memtable - -| 名字 | enable_timed_flush_unseq_memtable | -| ------------ | --------------------------------- | -| 描述 | 是否开启定时刷新乱序 memtable | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- unseq_memtable_flush_interval_in_ms - -| 名字 | unseq_memtable_flush_interval_in_ms | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | -| 类型 | long | -| 默认值 | 600000 | -| 改后生效方式 | 热加载 | - -- unseq_memtable_flush_check_interval_in_ms - -| 名字 | unseq_memtable_flush_check_interval_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | 检查乱序 memtable 是否需要刷盘的时间间隔 | -| 类型 | long | -| 默认值 | 30000 | -| 改后生效方式 | 热加载 | - -- tvlist_sort_algorithm - -| 名字 | tvlist_sort_algorithm | -| ------------ | ------------------------ | -| 描述 | memtable中数据的排序方法 | -| 类型 | String | -| 默认值 | TIM | -| 改后生效方式 | 重启服务生效 | - -- avg_series_point_number_threshold - -| 名字 | avg_series_point_number_threshold | -| ------------ | ------------------------------------------------ | -| 描述 | 内存中平均每个时间序列点数最大值,达到触发 flush | -| 类型 | int32 | -| 默认值 | 100000 | -| 改后生效方式 | 重启服务生效 | - -- flush_thread_count - -| 名字 | flush_thread_count | -| ------------ | ------------------------------------------------------------ | -| 描述 | 当 IoTDB 将内存中的数据写入磁盘时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。默认值为 0。 | -| 类型 | int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- enable_partial_insert - -| 名字 | enable_partial_insert | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在一次 insert 请求中,如果部分测点写入失败,是否继续写入其他测点。 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -- recovery_log_interval_in_ms - -| 名字 | recovery_log_interval_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | data region的恢复过程中打印日志信息的间隔 | -| 类型 | Int32 | -| 默认值 | 5000 | -| 改后生效方式 | 重启服务生效 | - -- 0.13_data_insert_adapt - -| 名字 | 0.13_data_insert_adapt | -| ------------ | ------------------------------------------------------- | -| 描述 | 如果 0.13 版本客户端进行写入,需要将此配置项设置为 true | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- enable_tsfile_validation - -| 名字 | enable_tsfile_validation | -| ------------ | -------------------------------------- | -| 描述 | Flush, Load 或合并后验证 tsfile 正确性 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | - -- tier_ttl_in_ms - -| 名字 | tier_ttl_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | 定义每个层级负责的数据范围,通过 TTL 表示 | -| 类型 | long | -| 默认值 | -1 | -| 改后生效方式 | 重启服务生效 | - -* max_object_file_size_in_byte - -| 名字 | max\_object\_file\_size\_in\_byte | -| -------------- |-----------------------------------| -| 描述 | 单对象文件的最大尺寸限制 (V2.0.8-beta 版本起支持) | -| 类型 | long | -| 默认值 | 4294967296 | -| 改后生效方式 | 热加载 | - -* restrict_object_limit - -| 名字 | restrict\_object\_limit | -|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 对 OBJECT 类型的表名、列名和设备名称没有特殊限制。(V2.0.8-beta 版本起支持)当设置为 true 且表中包含 OBJECT 列时,需遵循以下限制:
1. 命名规范:TAG 列的值、表名和字段名禁止使用 “.” 或 “..”,且不得包含 “./” 或 “.\” 字符,否则元数据创建将失败。若名称包含文件系统不支持的字符,则会在数据写入时报错。
2. 大小写敏感:如果底层文件系统不区分大小写,则设备标识符(如 'd1' 与 'D1')将被视为相同。在此情况下,若创建此类名称相似的设备,其 OBJECT 数据文件可能互相覆盖,导致数据错误。
3. 存储路径:OBJECT 类型数据的实际存储路径格式为:`${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin`。 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - - -### 3.20 合并配置 - -- enable_seq_space_compaction - -| 名字 | enable_seq_space_compaction | -| ------------ | -------------------------------------- | -| 描述 | 顺序空间内合并,开启顺序文件之间的合并 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- enable_unseq_space_compaction - -| 名字 | enable_unseq_space_compaction | -| ------------ | -------------------------------------- | -| 描述 | 乱序空间内合并,开启乱序文件之间的合并 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- enable_cross_space_compaction - -| 名字 | enable_cross_space_compaction | -| ------------ | ------------------------------------------ | -| 描述 | 跨空间合并,开启将乱序文件合并到顺序文件中 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- enable_auto_repair_compaction - -| 名字 | enable_auto_repair_compaction | -| ------------ | ----------------------------- | -| 描述 | 启用通过合并操作自动修复未排序文件的功能 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- cross_selector - -| 名字 | cross_selector | -| ------------ |----------------| -| 描述 | 跨空间合并任务的选择器 | -| 类型 | String | -| 默认值 | rewrite | -| 改后生效方式 | 重启服务生效 | - -- cross_performer - -| 名字 | cross_performer | -| ------------ |-----------------------------------| -| 描述 | 跨空间合并任务的执行器,可选项:read_point 和 fast | -| 类型 | String | -| 默认值 | fast | -| 改后生效方式 | 热加载 | - -- inner_seq_selector - -| 名字 | inner_seq_selector | -| ------------ |------------------------------------------------------------------------| -| 描述 | 顺序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | -| 类型 | String | -| 默认值 | size_tiered_multi_target | -| 改后生效方式 | 热加载 | - -- inner_seq_performer - -| 名字 | inner_seq_performer | -| ------------ |--------------------------------------| -| 描述 | 顺序空间内合并任务的执行器,可选项是 read_chunk 和 fast | -| 类型 | String | -| 默认值 | read_chunk | -| 改后生效方式 | 热加载 | - -- inner_unseq_selector - -| 名字 | inner_unseq_selector | -| ------------ |-------------------------------------------------------------------------| -| 描述 | 乱序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | -| 类型 | String | -| 默认值 | size_tiered_multi_target | -| 改后生效方式 | 热加载 | - -- inner_unseq_performer - -| 名字 | inner_unseq_performer | -| ------------ |--------------------------------------| -| 描述 | 乱序空间内合并任务的执行器,可选项是 read_point 和 fast | -| 类型 | String | -| 默认值 | fast | -| 改后生效方式 | 热加载 | - -- compaction_priority - -| 名字 | compaction_priority | -| ------------ |-------------------------------------------------------------------------------------------| -| 描述 | 合并时的优先级。INNER_CROSS:优先执行空间内合并,优先减少文件数量;CROSS_INNER:优先执行跨空间合并,优先清理乱序文件;BALANCE:交替执行两种合并类型。 | -| 类型 | String | -| 默认值 | INNER_CROSS | -| 改后生效方式 | 重启服务生效 | - -- candidate_compaction_task_queue_size - -| 名字 | candidate_compaction_task_queue_size | -| ------------ | ------------------------------------ | -| 描述 | 待选合并任务队列容量 | -| 类型 | int32 | -| 默认值 | 50 | -| 改后生效方式 | 重启服务生效 | - -- target_compaction_file_size - -| 名字 | target_compaction_file_size | -| ------------ |-----------------------------------------------------------------------------------------------------------------------------------------------| -| 描述 | 该参数作用于两个场景:1. 空间内合并的目标文件大小 2. 跨空间合并中待选序列文件的大小需小于 target_compaction_file_size * 1.5 多数情况下,跨空间合并的目标文件大小不会超过此阈值,即便超出,幅度也不会过大 。 默认值:2GB ,单位:byte | -| 类型 | Long | -| 默认值 | 2147483648 | -| 改后生效方式 | 热加载 | - -- inner_compaction_total_file_size_threshold - -| 名字 | inner_compaction_total_file_size_threshold | -| ------------ |--------------------------------------------| -| 描述 | 空间内合并的文件总大小阈值,单位:byte | -| 类型 | Long | -| 默认值 | 10737418240 | -| 改后生效方式 | 热加载 | - -- inner_compaction_total_file_num_threshold - -| 名字 | inner_compaction_total_file_num_threshold | -| ------------ | ----------------------------------------- | -| 描述 | 空间内合并的文件总数阈值 | -| 类型 | int32 | -| 默认值 | 100 | -| 改后生效方式 | 热加载 | - -- max_level_gap_in_inner_compaction - -| 名字 | max_level_gap_in_inner_compaction | -| ------------ | -------------------------------------- | -| 描述 | 空间内合并筛选的最大层级差 | -| 类型 | int32 | -| 默认值 | 2 | -| 改后生效方式 | 热加载 | - -- target_chunk_size - -| 名字 | target_chunk_size | -| ------------ |--------------------------------------------------| -| 描述 | 刷盘与合并操作的目标数据块大小, 若内存表中某条时序数据的大小超过该值,数据会被刷盘至多个数据块 | -| 类型 | Long | -| 默认值 | 1600000 | -| 改后生效方式 | 重启服务生效 | - -- target_chunk_point_num - -| 名字 | target_chunk_point_num | -| ------------ |------------------------------------------------------| -| 描述 | 刷盘与合并操作中单个数据块的目标点数, 若内存表中某条时序数据的点数超过该值,数据会被刷盘至多个数据块中 | -| 类型 | Long | -| 默认值 | 100000 | -| 改后生效方式 | 重启服务生效 | - -- chunk_size_lower_bound_in_compaction - -| 名字 | chunk_size_lower_bound_in_compaction | -| ------------ |--------------------------------------| -| 描述 | 若数据块大小低于此阈值,则会被反序列化为数据点,默认值为128字节 | -| 类型 | Long | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- chunk_point_num_lower_bound_in_compaction - -| 名字 | chunk_point_num_lower_bound_in_compaction | -| ------------ |------------------------------------------| -| 描述 | 若数据块内的数据点数低于此阈值,则会被反序列化为数据点 | -| 类型 | Long | -| 默认值 | 100 | -| 改后生效方式 | 重启服务生效 | - -- inner_compaction_candidate_file_num - -| 名字 | inner_compaction_candidate_file_num | -| ------------ | ---------------------------------------- | -| 描述 | 空间内合并待选文件筛选的文件数量要求 | -| 类型 | int32 | -| 默认值 | 30 | -| 改后生效方式 | 热加载 | - -- max_cross_compaction_candidate_file_num - -| 名字 | max_cross_compaction_candidate_file_num | -| ------------ | --------------------------------------- | -| 描述 | 跨空间合并待选文件筛选的文件数量上限 | -| 类型 | int32 | -| 默认值 | 500 | -| 改后生效方式 | 热加载 | - -- max_cross_compaction_candidate_file_size - -| 名字 | max_cross_compaction_candidate_file_size | -| ------------ |------------------------------------------| -| 描述 | 跨空间合并待选文件筛选的总大小上限 | -| 类型 | Long | -| 默认值 | 5368709120 | -| 改后生效方式 | 热加载 | - -- min_cross_compaction_unseq_file_level - -| 名字 | min_cross_compaction_unseq_file_level | -| ------------ |---------------------------------------| -| 描述 | 可被选为待选文件的乱序文件的最小空间内合并层级 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- compaction_thread_count - -| 名字 | compaction_thread_count | -| ------------ | ----------------------- | -| 描述 | 执行合并任务的线程数目 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 热加载 | - -- compaction_max_aligned_series_num_in_one_batch - -| 名字 | compaction_max_aligned_series_num_in_one_batch | -| ------------ | ---------------------------------------------- | -| 描述 | 对齐序列合并一次执行时处理的值列数量 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 热加载 | - -- compaction_schedule_interval_in_ms - -| 名字 | compaction_schedule_interval_in_ms | -| ------------ |------------------------------------| -| 描述 | 合并调度的时间间隔,单位 ms | -| 类型 | Long | -| 默认值 | 60000 | -| 改后生效方式 | 重启服务生效 | - -- compaction_write_throughput_mb_per_sec - -| 名字 | compaction_write_throughput_mb_per_sec | -| ------------ |----------------------------------------| -| 描述 | 合并操作每秒可达到的写入吞吐量上限, 小于或等于 0 的取值表示无限制 | -| 类型 | int32 | -| 默认值 | 16 | -| 改后生效方式 | 重启服务生效 | - -- compaction_read_throughput_mb_per_sec - -| 名字 | compaction_read_throughput_mb_per_sec | -| --------- | ---------------------------------------------------- | -| 描述 | 合并每秒读吞吐限制,单位为 megabyte,小于或等于 0 的取值表示无限制 | -| 类型 | int32 | -| 默认值 | 0 | -| Effective | 热加载 | - -- compaction_read_operation_per_sec - -| 名字 | compaction_read_operation_per_sec | -| --------- | ------------------------------------------- | -| 描述 | 合并每秒读操作数量限制,小于或等于 0 的取值表示无限制 | -| 类型 | int32 | -| 默认值 | 0 | -| Effective | 热加载 | - -- sub_compaction_thread_count - -| 名字 | sub_compaction_thread_count | -| ------------ | ------------------------------------------------------------ | -| 描述 | 每个合并任务的子任务线程数,只对跨空间合并和乱序空间内合并生效 | -| 类型 | int32 | -| 默认值 | 4 | -| 改后生效方式 | 热加载 | - -- inner_compaction_task_selection_disk_redundancy - -| 名字 | inner_compaction_task_selection_disk_redundancy | -| ------------ | ----------------------------------------------- | -| 描述 | 定义了磁盘可用空间的冗余值,仅用于内部压缩 | -| 类型 | double | -| 默认值 | 0.05 | -| 改后生效方式 | 热加载 | - -- inner_compaction_task_selection_mods_file_threshold - -| 名字 | inner_compaction_task_selection_mods_file_threshold | -| ------------ | --------------------------------------------------- | -| 描述 | 定义了mods文件大小的阈值,仅用于内部压缩。 | -| 类型 | long | -| 默认值 | 131072 | -| 改后生效方式 | 热加载 | - -- compaction_schedule_thread_num - -| 名字 | compaction_schedule_thread_num | -| ------------ | ------------------------------ | -| 描述 | 选择合并任务的线程数量 | -| 类型 | int32 | -| 默认值 | 4 | -| 改后生效方式 | 热加载 | - -### 3.21 写前日志配置 - -- wal_mode - -| 名字 | wal_mode | -| ------------ | ------------------------------------------------------------ | -| 描述 | 写前日志的写入模式. DISABLE 模式下会关闭写前日志;SYNC 模式下写入请求会在成功写入磁盘后返回; ASYNC 模式下写入请求返回时可能尚未成功写入磁盘后。 | -| 类型 | String | -| 默认值 | ASYNC | -| 改后生效方式 | 重启服务生效 | - -- max_wal_nodes_num - -| 名字 | max_wal_nodes_num | -| ------------ | ----------------------------------------------------- | -| 描述 | 写前日志节点的最大数量,默认值 0 表示数量由系统控制。 | -| 类型 | int32 | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- wal_async_mode_fsync_delay_in_ms - -| 名字 | wal_async_mode_fsync_delay_in_ms | -| ------------ | ------------------------------------------- | -| 描述 | async 模式下写前日志调用 fsync 前的等待时间 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 热加载 | - -- wal_sync_mode_fsync_delay_in_ms - -| 名字 | wal_sync_mode_fsync_delay_in_ms | -| ------------ | ------------------------------------------ | -| 描述 | sync 模式下写前日志调用 fsync 前的等待时间 | -| 类型 | int32 | -| 默认值 | 3 | -| 改后生效方式 | 热加载 | - -- wal_buffer_size_in_byte - -| 名字 | wal_buffer_size_in_byte | -| ------------ | ----------------------- | -| 描述 | 写前日志的 buffer 大小 | -| 类型 | int32 | -| 默认值 | 33554432 | -| 改后生效方式 | 重启服务生效 | - -- wal_buffer_queue_capacity - -| 名字 | wal_buffer_queue_capacity | -| ------------ | ------------------------- | -| 描述 | 写前日志阻塞队列大小上限 | -| 类型 | int32 | -| 默认值 | 500 | -| 改后生效方式 | 重启服务生效 | - -- wal_file_size_threshold_in_byte - -| 名字 | wal_file_size_threshold_in_byte | -| ------------ | ------------------------------- | -| 描述 | 写前日志文件封口阈值 | -| 类型 | int32 | -| 默认值 | 31457280 | -| 改后生效方式 | 热加载 | - -- wal_min_effective_info_ratio - -| 名字 | wal_min_effective_info_ratio | -| ------------ | ---------------------------- | -| 描述 | 写前日志最小有效信息比 | -| 类型 | double | -| 默认值 | 0.1 | -| 改后生效方式 | 热加载 | - -- wal_memtable_snapshot_threshold_in_byte - -| 名字 | wal_memtable_snapshot_threshold_in_byte | -| ------------ | ---------------------------------------- | -| 描述 | 触发写前日志中内存表快照的内存表大小阈值 | -| 类型 | int64 | -| 默认值 | 8388608 | -| 改后生效方式 | 热加载 | - -- max_wal_memtable_snapshot_num - -| 名字 | max_wal_memtable_snapshot_num | -| ------------ | ------------------------------ | -| 描述 | 写前日志中内存表的最大数量上限 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- delete_wal_files_period_in_ms - -| 名字 | delete_wal_files_period_in_ms | -| ------------ | ----------------------------- | -| 描述 | 删除写前日志的检查间隔 | -| 类型 | int64 | -| 默认值 | 20000 | -| 改后生效方式 | 热加载 | - -- wal_throttle_threshold_in_byte - -| 名字 | wal_throttle_threshold_in_byte | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在IoTConsensus中,当WAL文件的大小达到一定阈值时,会开始对写入操作进行节流,以控制写入速度。 | -| 类型 | long | -| 默认值 | 53687091200 | -| 改后生效方式 | 热加载 | - -- iot_consensus_cache_window_time_in_ms - -| 名字 | iot_consensus_cache_window_time_in_ms | -| ------------ | ---------------------------------------- | -| 描述 | 在IoTConsensus中,写缓存的最大等待时间。 | -| 类型 | long | -| 默认值 | -1 | -| 改后生效方式 | 热加载 | - -- enable_wal_compression - -| 名字 | iot_consensus_cache_window_time_in_ms | -| ------------ | ------------------------------------- | -| 描述 | 用于控制是否启用WAL的压缩。 | -| 类型 | boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -### 3.22 IoT 共识协议配置 - -当Region配置了IoTConsensus共识协议之后,下述的配置项才会生效 - -- data_region_iot_max_log_entries_num_per_batch - -| 名字 | data_region_iot_max_log_entries_num_per_batch | -| ------------ | --------------------------------------------- | -| 描述 | IoTConsensus batch 的最大日志条数 | -| 类型 | int32 | -| 默认值 | 1024 | -| 改后生效方式 | 重启服务生效 | - -- data_region_iot_max_size_per_batch - -| 名字 | data_region_iot_max_size_per_batch | -| ------------ | ---------------------------------- | -| 描述 | IoTConsensus batch 的最大大小 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- data_region_iot_max_pending_batches_num - -| 名字 | data_region_iot_max_pending_batches_num | -| ------------ | --------------------------------------- | -| 描述 | IoTConsensus batch 的流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- data_region_iot_max_memory_ratio_for_queue - -| 名字 | data_region_iot_max_memory_ratio_for_queue | -| ------------ | ------------------------------------------ | -| 描述 | IoTConsensus 队列内存分配比例 | -| 类型 | double | -| 默认值 | 0.6 | -| 改后生效方式 | 重启服务生效 | - -- region_migration_speed_limit_bytes_per_second - -| 名字 | region_migration_speed_limit_bytes_per_second | -| ------------ | --------------------------------------------- | -| 描述 | 定义了在region迁移过程中,数据传输的最大速率 | -| 类型 | long | -| 默认值 | 33554432 | -| 改后生效方式 | 重启服务生效 | - -### 3.23 TsFile配置 - -- group_size_in_byte - -| 名字 | group_size_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | 每次将内存中的数据写入到磁盘时的最大写入字节数 | -| 类型 | int32 | -| 默认值 | 134217728 | -| 改后生效方式 | 热加载 | - -- page_size_in_byte - -| 名字 | page_size_in_byte | -| ------------ | ---------------------------------------------------- | -| 描述 | 内存中每个列写出时,写成的单页最大的大小,单位为字节 | -| 类型 | int32 | -| 默认值 | 65536 | -| 改后生效方式 | 热加载 | - -- max_number_of_points_in_page - -| 名字 | max_number_of_points_in_page | -| ------------ | ------------------------------------------------- | -| 描述 | 一个页中最多包含的数据点(时间戳-值的二元组)数量 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 热加载 | - -- pattern_matching_threshold - -| 名字 | pattern_matching_threshold | -| ------------ | ------------------------------ | -| 描述 | 正则表达式匹配时最大的匹配次数 | -| 类型 | int32 | -| 默认值 | 1000000 | -| 改后生效方式 | 热加载 | - -- float_precision - -| 名字 | float_precision | -| ------------ | ------------------------------------------------------------ | -| 描述 | 浮点数精度,为小数点后数字的位数 | -| 类型 | int32 | -| 默认值 | 默认为 2 位。注意:32 位浮点数的十进制精度为 7 位,64 位浮点数的十进制精度为 15 位。如果设置超过机器精度将没有实际意义。 | -| 改后生效方式 | 热加载 | - -- value_encoder - -| 名字 | value_encoder | -| ------------ | ------------------------------------- | -| 描述 | value 列编码方式 | -| 类型 | 枚举 String: “TS_2DIFF”,“PLAIN”,“RLE” | -| 默认值 | PLAIN | -| 改后生效方式 | 热加载 | - -- compressor - -| 名字 | compressor | -| ------------ | ------------------------------------------------------------ | -| 描述 | 数据压缩方法; 对齐序列中时间列的压缩方法 | -| 类型 | 枚举 String : "UNCOMPRESSED", "SNAPPY", "LZ4", "ZSTD", "LZMA2" | -| 默认值 | LZ4 | -| 改后生效方式 | 热加载 | - -- encrypt_flag - -| 名字 | encrypt_flag | -| ------------ | ---------------------------- | -| 描述 | 用于开启或关闭数据加密功能。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- encrypt_type - -| 名字 | encrypt_type | -| ------------ | ------------------------------------- | -| 描述 | 数据加密的方法。 | -| 类型 | String | -| 默认值 | org.apache.tsfile.encrypt.UNENCRYPTED | -| 改后生效方式 | 重启服务生效 | - -- encrypt_key_path - -| 名字 | encrypt_key_path | -| ------------ | ---------------------------- | -| 描述 | 数据加密使用的密钥来源路径。 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -### 3.24 授权配置 - -- authorizer_provider_class - -| 名字 | authorizer_provider_class | -| ------------ | ------------------------------------------------------------ | -| 描述 | 权限服务的类名 | -| 类型 | String | -| 默认值 | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | -| 改后生效方式 | 重启服务生效 | -| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | - -- openID_url - -| 名字 | openID_url | -| ------------ | ---------------------------------------------------------- | -| 描述 | openID 服务器地址 (当 OpenIdAuthorizer 被启用时必须设定) | -| 类型 | String(一个 http 地址) | -| 默认值 | 无 | -| 改后生效方式 | 重启服务生效 | - -- iotdb_server_encrypt_decrypt_provider - -| 名字 | iotdb_server_encrypt_decrypt_provider | -| ------------ | ------------------------------------------------------------ | -| 描述 | 用于用户密码加密的类 | -| 类型 | String | -| 默认值 | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- iotdb_server_encrypt_decrypt_provider_parameter - -| 名字 | iotdb_server_encrypt_decrypt_provider_parameter | -| ------------ | ----------------------------------------------- | -| 描述 | 用于初始化用户密码加密类的参数 | -| 类型 | String | -| 默认值 | 无 | -| 改后生效方式 | 仅允许在第一次启动服务前修改 | - -- author_cache_size - -| 名字 | author_cache_size | -| ------------ | ------------------------ | -| 描述 | 用户缓存与角色缓存的大小 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- author_cache_expire_time - -| 名字 | author_cache_expire_time | -| ------------ | -------------------------------------- | -| 描述 | 用户缓存与角色缓存的有效期,单位为分钟 | -| 类型 | int32 | -| 默认值 | 30 | -| 改后生效方式 | 重启服务生效 | - -### 3.25 UDF配置 - -- udf_initial_byte_array_length_for_memory_control - -| 名字 | udf_initial_byte_array_length_for_memory_control | -| ------------ | ------------------------------------------------------------ | -| 描述 | 用于评估UDF查询中文本字段的内存使用情况。建议将此值设置为略大于所有文本的平均长度记录。 | -| 类型 | int32 | -| 默认值 | 48 | -| 改后生效方式 | 重启服务生效 | - -- udf_memory_budget_in_mb - -| 名字 | udf_memory_budget_in_mb | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在一个UDF查询中使用多少内存(以 MB 为单位)。上限为已分配内存的 20% 用于读取。 | -| 类型 | Float | -| 默认值 | 30.0 | -| 改后生效方式 | 重启服务生效 | - -- udf_reader_transformer_collector_memory_proportion - -| 名字 | udf_reader_transformer_collector_memory_proportion | -| ------------ | --------------------------------------------------------- | -| 描述 | UDF内存分配比例。参数形式为a : b : c,其中a、b、c为整数。 | -| 类型 | String | -| 默认值 | 1:1:1 | -| 改后生效方式 | 重启服务生效 | - -- udf_lib_dir - -| 名字 | udf_lib_dir | -| ------------ | ---------------------------- | -| 描述 | UDF 日志及jar文件存储路径 | -| 类型 | String | -| 默认值 | ext/udf(Windows:ext\\udf) | -| 改后生效方式 | 重启服务生效 | - -### 3.26 触发器配置 - -- trigger_lib_dir - -| 名字 | trigger_lib_dir | -| ------------ | ----------------------- | -| 描述 | 触发器 JAR 包存放的目录 | -| 类型 | String | -| 默认值 | ext/trigger | -| 改后生效方式 | 重启服务生效 | - -- stateful_trigger_retry_num_when_not_found - -| 名字 | stateful_trigger_retry_num_when_not_found | -| ------------ | ---------------------------------------------- | -| 描述 | 有状态触发器触发无法找到触发器实例时的重试次数 | -| 类型 | Int32 | -| 默认值 | 3 | -| 改后生效方式 | 重启服务生效 | - -### 3.27 SELECT-INTO配置 - -- into_operation_buffer_size_in_byte - -| 名字 | into_operation_buffer_size_in_byte | -| ------------ | ------------------------------------------------------------ | -| 描述 | 执行 select-into 语句时,待写入数据占用的最大内存(单位:Byte) | -| 类型 | long | -| 默认值 | 104857600 | -| 改后生效方式 | 热加载 | - -- select_into_insert_tablet_plan_row_limit - -| 名字 | select_into_insert_tablet_plan_row_limit | -| ------------ | ------------------------------------------------------------ | -| 描述 | 执行 select-into 语句时,一个 insert-tablet-plan 中可以处理的最大行数 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 热加载 | - -- into_operation_execution_thread_count - -| 名字 | into_operation_execution_thread_count | -| ------------ | ------------------------------------------ | -| 描述 | SELECT INTO 中执行写入任务的线程池的线程数 | -| 类型 | int32 | -| 默认值 | 2 | -| 改后生效方式 | 重启服务生效 | - -### 3.28 连续查询配置 -- continuous_query_submit_thread_count - -| 名字 | continuous_query_execution_thread | -| ------------ | --------------------------------- | -| 描述 | 执行连续查询任务的线程池的线程数 | -| 类型 | int32 | -| 默认值 | 2 | -| 改后生效方式 | 重启服务生效 | - -- continuous_query_min_every_interval_in_ms - -| 名字 | continuous_query_min_every_interval_in_ms | -| ------------ | ----------------------------------------- | -| 描述 | 连续查询执行时间间隔的最小值 | -| 类型 | long (duration) | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -### 3.29 PIPE配置 - -- pipe_lib_dir - -| 名字 | pipe_lib_dir | -| ------------ | -------------------------- | -| 描述 | 自定义 Pipe 插件的存放目录 | -| 类型 | string | -| 默认值 | ext/pipe | -| 改后生效方式 | 暂不支持修改 | - -- pipe_subtask_executor_max_thread_num - -| 名字 | pipe_subtask_executor_max_thread_num | -| ------------ | ------------------------------------------------------------ | -| 描述 | pipe 子任务 processor、sink 中各自可以使用的最大线程数。实际值将是 min(pipe_subtask_executor_max_thread_num, max(1, CPU核心数 / 2))。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- pipe_sink_timeout_ms - -| 名字 | pipe_sink_timeout_ms | -| ------------ | --------------------------------------------- | -| 描述 | thrift 客户端的连接超时时间(以毫秒为单位)。 | -| 类型 | int | -| 默认值 | 900000 | -| 改后生效方式 | 重启服务生效 | - -- pipe_sink_selector_number - -| 名字 | pipe_sink_selector_number | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大执行结果处理线程数量。 建议将此值设置为小于或等于 pipe_sink_max_client_number。 | -| 类型 | int | -| 默认值 | 4 | -| 改后生效方式 | 重启服务生效 | - -- pipe_sink_max_client_number - -| 名字 | pipe_sink_max_client_number | -| ------------ | ----------------------------------------------------------- | -| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大客户端数量。 | -| 类型 | int | -| 默认值 | 16 | -| 改后生效方式 | 重启服务生效 | - -- pipe_air_gap_receiver_enabled - -| 名字 | pipe_air_gap_receiver_enabled | -| ------------ | ------------------------------------------------------------ | -| 描述 | 是否启用通过网闸接收 pipe 数据。接收器只能在 tcp 模式下返回 0 或 1,以指示数据是否成功接收。 \| | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- pipe_air_gap_receiver_port - -| 名字 | pipe_air_gap_receiver_port | -| ------------ | ------------------------------------ | -| 描述 | 服务器通过网闸接收 pipe 数据的端口。 | -| 类型 | int | -| 默认值 | 9780 | -| 改后生效方式 | 重启服务生效 | - -- pipe_all_sinks_rate_limit_bytes_per_second - -| 名字 | pipe_all_sinks_rate_limit_bytes_per_second | -| ------------ | ------------------------------------------------------------ | -| 描述 | 所有 pipe sink 每秒可以传输的总字节数。当给定的值小于或等于 0 时,表示没有限制。默认值是 -1,表示没有限制。 | -| 类型 | double | -| 默认值 | -1 | -| 改后生效方式 | 热加载 | - -### 3.30 Ratis共识协议配置 - -当Region配置了RatisConsensus共识协议之后,下述的配置项才会生效 - -- config_node_ratis_log_appender_buffer_size_max - -| 名字 | config_node_ratis_log_appender_buffer_size_max | -| ------------ | ---------------------------------------------- | -| 描述 | confignode 一次同步日志RPC最大的传输字节限制 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_appender_buffer_size_max - -| 名字 | schema_region_ratis_log_appender_buffer_size_max | -| ------------ | ------------------------------------------------ | -| 描述 | schema region 一次同步日志RPC最大的传输字节限制 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_appender_buffer_size_max - -| 名字 | data_region_ratis_log_appender_buffer_size_max | -| ------------ | ---------------------------------------------- | -| 描述 | data region 一次同步日志RPC最大的传输字节限制 | -| 类型 | int32 | -| 默认值 | 16777216 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_snapshot_trigger_threshold - -| 名字 | config_node_ratis_snapshot_trigger_threshold | -| ------------ | -------------------------------------------- | -| 描述 | confignode 触发snapshot需要的日志条数 | -| 类型 | int32 | -| 默认值 | 400,000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_snapshot_trigger_threshold - -| 名字 | schema_region_ratis_snapshot_trigger_threshold | -| ------------ | ---------------------------------------------- | -| 描述 | schema region 触发snapshot需要的日志条数 | -| 类型 | int32 | -| 默认值 | 400,000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_snapshot_trigger_threshold - -| 名字 | data_region_ratis_snapshot_trigger_threshold | -| ------------ | -------------------------------------------- | -| 描述 | data region 触发snapshot需要的日志条数 | -| 类型 | int32 | -| 默认值 | 400,000 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_unsafe_flush_enable - -| 名字 | config_node_ratis_log_unsafe_flush_enable | -| ------------ | ----------------------------------------- | -| 描述 | confignode 是否允许Raft日志异步刷盘 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_unsafe_flush_enable - -| 名字 | schema_region_ratis_log_unsafe_flush_enable | -| ------------ | ------------------------------------------- | -| 描述 | schema region 是否允许Raft日志异步刷盘 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_unsafe_flush_enable - -| 名字 | data_region_ratis_log_unsafe_flush_enable | -| ------------ | ----------------------------------------- | -| 描述 | data region 是否允许Raft日志异步刷盘 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_segment_size_max_in_byte - -| 名字 | config_node_ratis_log_segment_size_max_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | confignode 一个RaftLog日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_segment_size_max_in_byte - -| 名字 | schema_region_ratis_log_segment_size_max_in_byte | -| ------------ | ------------------------------------------------ | -| 描述 | schema region 一个RaftLog日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_segment_size_max_in_byte - -| 名字 | data_region_ratis_log_segment_size_max_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | data region 一个RaftLog日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- config_node_simple_consensus_log_segment_size_max_in_byte - -| 名字 | data_region_ratis_log_segment_size_max_in_byte | -| ------------ | ---------------------------------------------- | -| 描述 | Confignode 简单共识协议一个Log日志段文件的大小 | -| 类型 | int32 | -| 默认值 | 25165824 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_grpc_flow_control_window - -| 名字 | config_node_ratis_grpc_flow_control_window | -| ------------ | ------------------------------------------ | -| 描述 | confignode grpc 流式拥塞窗口大小 | -| 类型 | int32 | -| 默认值 | 4194304 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_grpc_flow_control_window - -| 名字 | schema_region_ratis_grpc_flow_control_window | -| ------------ | -------------------------------------------- | -| 描述 | schema region grpc 流式拥塞窗口大小 | -| 类型 | int32 | -| 默认值 | 4194304 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_grpc_flow_control_window - -| 名字 | data_region_ratis_grpc_flow_control_window | -| ------------ | ------------------------------------------ | -| 描述 | data region grpc 流式拥塞窗口大小 | -| 类型 | int32 | -| 默认值 | 4194304 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_grpc_leader_outstanding_appends_max - -| 名字 | config_node_ratis_grpc_leader_outstanding_appends_max | -| ------------ | ----------------------------------------------------- | -| 描述 | config node grpc 流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_grpc_leader_outstanding_appends_max - -| 名字 | schema_region_ratis_grpc_leader_outstanding_appends_max | -| ------------ | ------------------------------------------------------- | -| 描述 | schema region grpc 流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_grpc_leader_outstanding_appends_max - -| 名字 | data_region_ratis_grpc_leader_outstanding_appends_max | -| ------------ | ----------------------------------------------------- | -| 描述 | data region grpc 流水线并发阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_force_sync_num - -| 名字 | config_node_ratis_log_force_sync_num | -| ------------ | ------------------------------------ | -| 描述 | config node fsync 阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_force_sync_num - -| 名字 | schema_region_ratis_log_force_sync_num | -| ------------ | -------------------------------------- | -| 描述 | schema region fsync 阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_force_sync_num - -| 名字 | data_region_ratis_log_force_sync_num | -| ------------ | ------------------------------------ | -| 描述 | data region fsync 阈值 | -| 类型 | int32 | -| 默认值 | 128 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_rpc_leader_election_timeout_min_ms - -| 名字 | config_node_ratis_rpc_leader_election_timeout_min_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | confignode leader 选举超时最小值 | -| 类型 | int32 | -| 默认值 | 2000ms | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_rpc_leader_election_timeout_min_ms - -| 名字 | schema_region_ratis_rpc_leader_election_timeout_min_ms | -| ------------ | ------------------------------------------------------ | -| 描述 | schema region leader 选举超时最小值 | -| 类型 | int32 | -| 默认值 | 2000ms | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_rpc_leader_election_timeout_min_ms - -| 名字 | data_region_ratis_rpc_leader_election_timeout_min_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | data region leader 选举超时最小值 | -| 类型 | int32 | -| 默认值 | 2000ms | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_rpc_leader_election_timeout_max_ms - -| 名字 | config_node_ratis_rpc_leader_election_timeout_max_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | confignode leader 选举超时最大值 | -| 类型 | int32 | -| 默认值 | 4000ms | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_rpc_leader_election_timeout_max_ms - -| 名字 | schema_region_ratis_rpc_leader_election_timeout_max_ms | -| ------------ | ------------------------------------------------------ | -| 描述 | schema region leader 选举超时最大值 | -| 类型 | int32 | -| 默认值 | 4000ms | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_rpc_leader_election_timeout_max_ms - -| 名字 | data_region_ratis_rpc_leader_election_timeout_max_ms | -| ------------ | ---------------------------------------------------- | -| 描述 | data region leader 选举超时最大值 | -| 类型 | int32 | -| 默认值 | 4000ms | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_request_timeout_ms - -| 名字 | config_node_ratis_request_timeout_ms | -| ------------ | ------------------------------------ | -| 描述 | confignode Raft 客户端重试超时 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_request_timeout_ms - -| 名字 | schema_region_ratis_request_timeout_ms | -| ------------ | -------------------------------------- | -| 描述 | schema region Raft 客户端重试超时 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_request_timeout_ms - -| 名字 | data_region_ratis_request_timeout_ms | -| ------------ | ------------------------------------ | -| 描述 | data region Raft 客户端重试超时 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_max_retry_attempts - -| 名字 | config_node_ratis_max_retry_attempts | -| ------------ | ------------------------------------ | -| 描述 | confignode Raft客户端最大重试次数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_initial_sleep_time_ms - -| 名字 | config_node_ratis_initial_sleep_time_ms | -| ------------ | --------------------------------------- | -| 描述 | confignode Raft客户端初始重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 100ms | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_max_sleep_time_ms - -| 名字 | config_node_ratis_max_sleep_time_ms | -| ------------ | ------------------------------------- | -| 描述 | confignode Raft客户端最大重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 10000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_max_retry_attempts - -| 名字 | schema_region_ratis_max_retry_attempts | -| ------------ | -------------------------------------- | -| 描述 | schema region Raft客户端最大重试次数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_initial_sleep_time_ms - -| 名字 | schema_region_ratis_initial_sleep_time_ms | -| ------------ | ----------------------------------------- | -| 描述 | schema region Raft客户端初始重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 100ms | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_max_sleep_time_ms - -| 名字 | schema_region_ratis_max_sleep_time_ms | -| ------------ | ---------------------------------------- | -| 描述 | schema region Raft客户端最大重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_max_retry_attempts - -| 名字 | data_region_ratis_max_retry_attempts | -| ------------ | ------------------------------------ | -| 描述 | data region Raft客户端最大重试次数 | -| 类型 | int32 | -| 默认值 | 10 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_initial_sleep_time_ms - -| 名字 | data_region_ratis_initial_sleep_time_ms | -| ------------ | --------------------------------------- | -| 描述 | data region Raft客户端初始重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 100ms | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_max_sleep_time_ms - -| 名字 | data_region_ratis_max_sleep_time_ms | -| ------------ | -------------------------------------- | -| 描述 | data region Raft客户端最大重试睡眠时长 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- ratis_first_election_timeout_min_ms - -| 名字 | ratis_first_election_timeout_min_ms | -| ------------ | ----------------------------------- | -| 描述 | Ratis协议首次选举最小超时时间 | -| 类型 | int64 | -| 默认值 | 50 (ms) | -| 改后生效方式 | 重启服务生效 | - -- ratis_first_election_timeout_max_ms - -| 名字 | ratis_first_election_timeout_max_ms | -| ------------ | ----------------------------------- | -| 描述 | Ratis协议首次选举最大超时时间 | -| 类型 | int64 | -| 默认值 | 150 (ms) | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_preserve_logs_num_when_purge - -| 名字 | config_node_ratis_preserve_logs_num_when_purge | -| ------------ | ---------------------------------------------- | -| 描述 | confignode snapshot后保持一定数量日志不删除 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_preserve_logs_num_when_purge - -| 名字 | schema_region_ratis_preserve_logs_num_when_purge | -| ------------ | ------------------------------------------------ | -| 描述 | schema region snapshot后保持一定数量日志不删除 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_preserve_logs_num_when_purge - -| 名字 | data_region_ratis_preserve_logs_num_when_purge | -| ------------ | ---------------------------------------------- | -| 描述 | data region snapshot后保持一定数量日志不删除 | -| 类型 | int32 | -| 默认值 | 1000 | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_log_max_size - -| 名字 | config_node_ratis_log_max_size | -| ------------ | ----------------------------------- | -| 描述 | config node磁盘Raft Log最大占用空间 | -| 类型 | int64 | -| 默认值 | 2147483648 (2GB) | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_log_max_size - -| 名字 | schema_region_ratis_log_max_size | -| ------------ | -------------------------------------- | -| 描述 | schema region 磁盘Raft Log最大占用空间 | -| 类型 | int64 | -| 默认值 | 2147483648 (2GB) | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_log_max_size - -| 名字 | data_region_ratis_log_max_size | -| ------------ | ------------------------------------ | -| 描述 | data region 磁盘Raft Log最大占用空间 | -| 类型 | int64 | -| 默认值 | 21474836480 (20GB) | -| 改后生效方式 | 重启服务生效 | - -- config_node_ratis_periodic_snapshot_interval - -| 名字 | config_node_ratis_periodic_snapshot_interval | -| ------------ | -------------------------------------------- | -| 描述 | config node定期snapshot的间隔时间 | -| 类型 | int64 | -| 默认值 | 86400 (秒) | -| 改后生效方式 | 重启服务生效 | - -- schema_region_ratis_periodic_snapshot_interval - -| 名字 | schema_region_ratis_preserve_logs_num_when_purge | -| ------------ | ------------------------------------------------ | -| 描述 | schema region定期snapshot的间隔时间 | -| 类型 | int64 | -| 默认值 | 86400 (秒) | -| 改后生效方式 | 重启服务生效 | - -- data_region_ratis_periodic_snapshot_interval - -| 名字 | data_region_ratis_preserve_logs_num_when_purge | -| ------------ | ---------------------------------------------- | -| 描述 | data region定期snapshot的间隔时间 | -| 类型 | int64 | -| 默认值 | 86400 (秒) | -| 改后生效方式 | 重启服务生效 | - -### 3.31 IoTConsensusV2配置 - -- iot_consensus_v2_pipeline_size - -| 名字 | iot_consensus_v2_pipeline_size | -| ------------ | ------------------------------------------------------------ | -| 描述 | IoTConsensus V2中连接器(connector)和接收器(receiver)的默认事件缓冲区大小。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -- iot_consensus_v2_mode - -| 名字 | iot_consensus_v2_pipeline_size | -| ------------ | ----------------------------------- | -| 描述 | IoTConsensus V2使用的共识协议模式。 | -| 类型 | String | -| 默认值 | batch | -| 改后生效方式 | 重启服务生效 | - -### 3.32 Procedure 配置 - -- procedure_core_worker_thread_count - -| 名字 | procedure_core_worker_thread_count | -| ------------ | ---------------------------------- | -| 描述 | 工作线程数量 | -| 类型 | int32 | -| 默认值 | 4 | -| 改后生效方式 | 重启服务生效 | - -- procedure_completed_clean_interval - -| 名字 | procedure_completed_clean_interval | -| ------------ | ---------------------------------- | -| 描述 | 清理已完成的 procedure 时间间隔 | -| 类型 | int32 | -| 默认值 | 30(s) | -| 改后生效方式 | 重启服务生效 | - -- procedure_completed_evict_ttl - -| 名字 | procedure_completed_evict_ttl | -| ------------ | --------------------------------- | -| 描述 | 已完成的 procedure 的数据保留时间 | -| 类型 | int32 | -| 默认值 | 60(s) | -| 改后生效方式 | 重启服务生效 | - -### 3.33 MQTT代理配置 - -- enable_mqtt_service - -| 名字 | enable_mqtt_service。 | -| ------------ | --------------------- | -| 描述 | 是否开启MQTT服务 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | - -- mqtt_host - -| 名字 | mqtt_host | -| ------------ | -------------------- | -| 描述 | MQTT服务绑定的host。 | -| 类型 | String | -| 默认值 | 127.0.0.1 | -| 改后生效方式 | 热加载 | - -- mqtt_port - -| 名字 | mqtt_port | -| ------------ | -------------------- | -| 描述 | MQTT服务绑定的port。 | -| 类型 | int32 | -| 默认值 | 1883 | -| 改后生效方式 | 热加载 | - -- mqtt_handler_pool_size - -| 名字 | mqtt_handler_pool_size | -| ------------ | ---------------------------------- | -| 描述 | 用于处理MQTT消息的处理程序池大小。 | -| 类型 | int32 | -| 默认值 | 1 | -| 改后生效方式 | 热加载 | - -- mqtt_payload_formatter - -| 名字 | mqtt_payload_formatter | -| ------------ | ---------------------------- | -| 描述 | MQTT消息有效负载格式化程序。 | -| 类型 | String | -| 默认值 | json | -| 改后生效方式 | 热加载 | - -- mqtt_max_message_size - -| 名字 | mqtt_max_message_size | -| ------------ | ------------------------------------ | -| 描述 | MQTT消息的最大长度(以字节为单位)。 | -| 类型 | int32 | -| 默认值 | 1048576 | -| 改后生效方式 | 热加载 | - -### 3.34 审计日志配置 - -- enable_audit_log - -| 名字 | enable_audit_log | -| ------------ | ------------------------------ | -| 描述 | 用于控制是否启用审计日志功能。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 重启服务生效 | - -- audit_log_storage - -| 名字 | audit_log_storage | -| ------------ | -------------------------- | -| 描述 | 定义了审计日志的输出位置。 | -| 类型 | String | -| 默认值 | IOTDB,LOGGER | -| 改后生效方式 | 重启服务生效 | - -- audit_log_operation - -| 名字 | audit_log_operation | -| ------------ | -------------------------------------- | -| 描述 | 定义了哪些类型的操作需要记录审计日志。 | -| 类型 | String | -| 默认值 | DML,DDL,QUERY | -| 改后生效方式 | 重启服务生效 | - -- enable_audit_log_for_native_insert_api - -| 名字 | enable_audit_log_for_native_insert_api | -| ------------ | -------------------------------------- | -| 描述 | 用于控制本地写入API是否记录审计日志。 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 重启服务生效 | - -### 3.35 白名单配置 -- enable_white_list - -| 名字 | enable_white_list | -| ------------ | ----------------- | -| 描述 | 是否启用白名单。 | -| 类型 | Boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | - -### 3.36 IoTDB-AI 配置 - -- model_inference_execution_thread_count - -| 名字 | model_inference_execution_thread_count | -| ------------ | -------------------------------------- | -| 描述 | 用于模型推理操作的线程数。 | -| 类型 | int | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - -### 3.37 TsFile 主动监听&加载功能配置 - -- load_clean_up_task_execution_delay_time_seconds - -| 名字 | load_clean_up_task_execution_delay_time_seconds | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在加载TsFile失败后,系统将等待多长时间才会执行清理任务来清除这些未成功加载的TsFile。 | -| 类型 | int | -| 默认值 | 1800 | -| 改后生效方式 | 热加载 | - -- load_write_throughput_bytes_per_second - -| 名字 | load_write_throughput_bytes_per_second | -| ------------ | -------------------------------------- | -| 描述 | 加载TsFile时磁盘写入的最大字节数每秒。 | -| 类型 | int | -| 默认值 | -1 | -| 改后生效方式 | 热加载 | - -- load_active_listening_enable - -| 名字 | load_active_listening_enable | -| ------------ | ------------------------------------------------------------ | -| 描述 | 是否开启 DataNode 主动监听并且加载 tsfile 的功能(默认开启)。 | -| 类型 | Boolean | -| 默认值 | true | -| 改后生效方式 | 热加载 | - -- load_active_listening_dirs - -| 名字 | load_active_listening_dirs | -| ------------ | ------------------------------------------------------------ | -| 描述 | 需要监听的目录(自动包括目录中的子目录),如有多个使用 “,“ 隔开默认的目录为 ext/load/pending(支持热装载)。 | -| 类型 | String | -| 默认值 | ext/load/pending | -| 改后生效方式 | 热加载 | - -- load_active_listening_fail_dir - -| 名字 | load_active_listening_fail_dir | -| ------------ | ---------------------------------------------------------- | -| 描述 | 执行加载 tsfile 文件失败后将文件转存的目录,只能配置一个。 | -| 类型 | String | -| 默认值 | ext/load/failed | -| 改后生效方式 | 热加载 | - -- load_active_listening_max_thread_num - -| 名字 | load_active_listening_max_thread_num | -| ------------ | ------------------------------------------------------------ | -| 描述 | 同时执行加载 tsfile 任务的最大线程数,参数被注释掉时的默值为 max(1, CPU 核心数 / 2),当用户设置的值不在这个区间[1, CPU核心数 /2]内时,会设置为默认值 (1, CPU 核心数 / 2)。 | -| 类型 | Long | -| 默认值 | 0 | -| 改后生效方式 | 重启服务生效 | - -- load_active_listening_check_interval_seconds - -| 名字 | load_active_listening_check_interval_seconds | -| ------------ | ------------------------------------------------------------ | -| 描述 | 主动监听轮询间隔,单位秒。主动监听 tsfile 的功能是通过轮询检查文件夹实现的。该配置指定了两次检查 load_active_listening_dirs 的时间间隔,每次检查完成 load_active_listening_check_interval_seconds 秒后,会执行下一次检查。当用户设置的轮询间隔小于 1 时,会被设置为默认值 5 秒。 | -| 类型 | Long | -| 默认值 | 5 | -| 改后生效方式 | 重启服务生效 | - - -* last_cache_operation_on_load - -|名字| last_cache_operation_on_load | -|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|描述| 当成功加载一个 TsFile 时,对 LastCache 执行的操作。`UPDATE`:使用 TsFile 中的数据更新 LastCache;`UPDATE_NO_BLOB`:与 UPDATE 类似,但会使 blob 序列的 LastCache 失效;`CLEAN_DEVICE`:使 TsFile 中包含的设备的 LastCache 失效;`CLEAN_ALL`:清空整个 LastCache。 | -|类型| String | -|默认值| UPDATE_NO_BLOB | -|改后生效方式| 重启后生效 | - -* cache_last_values_for_load - -|名字| cache_last_values_for_load | -|:---:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -|描述| 在加载 TsFile 之前是否缓存最新值(last values)。仅在 `last_cache_operation_on_load=UPDATE_NO_BLOB` 或 `last_cache_operation_on_load=UPDATE` 时生效。当设置为 true 时,即使 `last_cache_operation_on_load=UPDATE`,也会忽略 blob 序列。启用此选项会在加载 TsFile 期间增加内存占用。 | -|类型| Boolean | -|默认值| true | -|改后生效方式| 重启后生效 | - -* cache_last_values_memory_budget_in_byte - -|名字| cache_last_values_memory_budget_in_byte | -|:---:|:----------------------------------------------------------------------------------------------------| -|描述| 当 `cache_last_values_for_load=true` 时,用于缓存最新值的最大内存大小(以字节为单位)。如果超过该值,缓存的值将被丢弃,并以流式方式直接从 TsFile 中读取最新值。 | -|类型| int32 | -|默认值| 4194304 | -|改后生效方式| 重启后生效 | - - -### 3.38 分发重试配置 - -- enable_retry_for_unknown_error - -| 名字 | enable_retry_for_unknown_error | -| ------------ | ------------------------------------------------------------ | -| 描述 | 在遇到未知错误时,写请求远程分发的最大重试时间,单位是毫秒。 | -| 类型 | Long | -| 默认值 | 60000 | -| 改后生效方式 | 热加载 | - -- enable_retry_for_unknown_error - -| 名字 | enable_retry_for_unknown_error | -| ------------ | -------------------------------- | -| 描述 | 用于控制是否对未知错误进行重试。 | -| 类型 | boolean | -| 默认值 | false | -| 改后生效方式 | 热加载 | \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual_apache.md b/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual_apache.md new file mode 100644 index 000000000..672829976 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual_apache.md @@ -0,0 +1,3364 @@ + + +# 配置参数 + +IoTDB 配置文件位于 IoTDB 安装目录:`conf`文件夹下。 + +- `confignode-env.sh/bat`:环境配置项的配置文件,可以配置 ConfigNode 的内存大小。 +- `datanode-env.sh/bat`:环境配置项的配置文件,可以配置 DataNode 的内存大小。 +- `iotdb-system.properties`:IoTDB 的配置文件。 +- `iotdb-system.properties.template`:IoTDB 的配置文件模版。 + +## 1. 修改配置: + +在 `iotdb-system.properties` 文件中已存在的参数可以直接进行修改。对于那些在 `iotdb-system.properties` 中未列出的参数,可以从 `iotdb-system.properties.template` 配置文件模板中找到相应的参数,然后将其复制到 `iotdb-system.properties` 文件中进行修改。 + +### 1.1 改后生效方式 + +不同的配置参数有不同的生效方式,分为以下三种: + +- 仅允许在第一次启动服务前修改: 在第一次启动 ConfigNode/DataNode 后即禁止修改,修改会导致 ConfigNode/DataNode 无法启动。 +- 重启服务生效: ConfigNode/DataNode 启动后仍可修改,但需要重启 ConfigNode/DataNode 后才生效。 +- 热加载: 可在 ConfigNode/DataNode 运行时修改,修改后通过 Session 或 Cli 发送 `load configuration` 或 `set configuration key1 = 'value1'` 命令(SQL)至 IoTDB 使配置生效。 + +## 2. 环境配置项 + +### 2.1 confignode-env.sh/bat + +环境配置项主要用于对 ConfigNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。ConfigNode 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的十分之三,最多会被设置为 16G。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +### 2.2 datanode-env.sh/bat + +环境配置项主要用于对 DataNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。DataNode/Standalone 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ---------------------------------------------------- | +| 描述 | IoTDB DataNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的二分之一。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ---------------------------------------------------------- | +| 描述 | IoTDB DataNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB DataNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置 | +| 改后生效方式 | 重启服务生效 | + + +## 3. 系统配置项(iotdb-system.properties.template) + +### 3.1 集群管理 + +- cluster_name + +| 名字 | cluster_name | +| -------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 集群名称 | +| 类型 | String | +| 默认值 | default_cluster | +| 修改方式 | CLI 中执行语句 `set configuration cluster_name = 'xxx'` (xxx为希望修改成的集群名称) | +| 注意 | 此修改通过网络分发至每个节点。在网络波动或者有节点宕机的情况下,不保证能够在全部节点修改成功。未修改成功的节点重启时无法加入集群,此时需要手动修改该节点的配置文件中的cluster_name项,再重启。正常情况下,不建议通过手动修改配置文件的方式修改集群名称,不建议通过`load configuration`的方式热加载。 | + +### 3.2 SeedConfigNode 配置 + +- cn_seed_config_node + +| 名字 | cn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | 目标 ConfigNode 地址,ConfigNode 通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 cn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_seed_config_node + +| 名字 | dn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 地址,DataNode 启动时通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 dn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.3 Node RPC 配置 + +- cn_internal_address + +| 名字 | cn_internal_address | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群内部地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_internal_port + +| 名字 | cn_internal_port | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_consensus_port + +| 名字 | cn_consensus_port | +| ------------ | ----------------------------- | +| 描述 | ConfigNode 的共识协议通信端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10720 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_rpc_address + +| 名字 | dn_rpc_address | +| ------------ |----------------| +| 描述 | 客户端 RPC 服务监听地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_port + +| 名字 | dn_rpc_port | +| ------------ | ----------------------- | +| 描述 | Client RPC 服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 6667 | +| 改后生效方式 | 重启服务生效 | + +- dn_internal_address + +| 名字 | dn_internal_address | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信地址 | +| 类型 | string | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_internal_port + +| 名字 | dn_internal_port | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信端口 | +| 类型 | int | +| 默认值 | 10730 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_mpp_data_exchange_port + +| 名字 | dn_mpp_data_exchange_port | +| ------------ | ---------------------------- | +| 描述 | MPP 数据交换端口 | +| 类型 | int | +| 默认值 | 10740 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_schema_region_consensus_port + +| 名字 | dn_schema_region_consensus_port | +| ------------ | ------------------------------------- | +| 描述 | DataNode 元数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10750 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_data_region_consensus_port + +| 名字 | dn_data_region_consensus_port | +| ------------ | ----------------------------------- | +| 描述 | DataNode 数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10760 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_join_cluster_retry_interval_ms + +| 名字 | dn_join_cluster_retry_interval_ms | +| ------------ | --------------------------------- | +| 描述 | DataNode 再次重试加入集群等待时间 | +| 类型 | long | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.4 副本配置 + +- config_node_consensus_protocol_class + +| 名字 | config_node_consensus_protocol_class | +| ------------ | ------------------------------------------------ | +| 描述 | ConfigNode 副本的共识协议,仅支持 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_replication_factor + +| 名字 | schema_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认元数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- schema_region_consensus_protocol_class + +| 名字 | schema_region_consensus_protocol_class | +| ------------ | ----------------------------------------------------- | +| 描述 | 元数据副本的共识协议,多副本时只能使用 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- data_replication_factor + +| 名字 | data_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- data_region_consensus_protocol_class + +| 名字 | data_region_consensus_protocol_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据副本的共识协议,多副本时可以使用 IoTConsensus 或 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.iot.IoTConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.5 目录配置 + +- cn_system_dir + +| 名字 | cn_system_dir | +| ------------ | ----------------------------------------------------------- | +| 描述 | ConfigNode 系统数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/system(Windows:data\\configndoe\\system) | +| 改后生效方式 | 重启服务生效 | + +- cn_consensus_dir + +| 名字 | cn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 共识协议数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- cn_pipe_receiver_file_dir + +| 名字 | cn_pipe_receiver_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- dn_system_dir + +| 名字 | dn_system_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 元数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/system(Windows:data\\datanode\\system) | +| 改后生效方式 | 重启服务生效 | + +- dn_data_dirs + +| 名字 | dn_data_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/data(Windows:data\\datanode\\data) | +| 改后生效方式 | 重启服务生效 | + +- dn_multi_dir_strategy + +| 名字 | dn_multi_dir_strategy | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 在 data_dirs 中为 TsFile 选择目录时采用的策略。可使用简单类名或类名全称。系统提供以下三种策略:
1. SequenceStrategy:IoTDB 按顺序选择目录,依次遍历 data_dirs 中的所有目录,并不断轮循;
2. MaxDiskUsableSpaceFirstStrategy:IoTDB 优先选择 data_dirs 中对应磁盘空余空间最大的目录;
您可以通过以下方法完成用户自定义策略:
1. 继承 org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy 类并实现自身的 Strategy 方法;
2. 将实现的类的完整类名(包名加类名,UserDefineStrategyPackage)填写到该配置项;
3. 将该类 jar 包添加到工程中。 | +| 类型 | String | +| 默认值 | SequenceStrategy | +| 改后生效方式 | 热加载 | + +- dn_consensus_dir + +| 名字 | dn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 共识层日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- dn_wal_dirs + +| 名字 | dn_wal_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 写前日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/wal(Windows:data\\datanode\\wal) | +| 改后生效方式 | 重启服务生效 | + +- dn_tracing_dir + +| 名字 | dn_tracing_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 追踪根目录路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | datanode/tracing(Windows:datanode\\tracing) | +| 改后生效方式 | 重启服务生效 | + +- dn_sync_dir + +| 名字 | dn_sync_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB sync 存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/sync(Windows:data\\datanode\\sync) | +| 改后生效方式 | 重启服务生效 | + +- sort_tmp_dir + +| 名字 | sort_tmp_dir | +| ------------ | ------------------------------------------------- | +| 描述 | 用于配置排序操作的临时目录。 | +| 类型 | String | +| 默认值 | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| 改后生效方式 | 重启服务生效 | + +- dn_pipe_receiver_file_dirs + +| 名字 | dn_pipe_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_receiver_file_dirs + +| 名字 | iot_consensus_v2_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_deletion_file_dir + +| 名字 | iot_consensus_v2_deletion_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中删除操作用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| 改后生效方式 | 重启服务生效 | + +### 3.6 监控配置 + +- cn_metric_reporter_list + +| 名字 | cn_metric_reporter_list | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_level + +| 名字 | cn_metric_level | +| ------------ | ------------------------------------------ | +| 描述 | confignode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_async_collect_period + +| 名字 | cn_metric_async_collect_period | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_prometheus_reporter_port + +| 名字 | cn_metric_prometheus_reporter_port | +| ------------ | ------------------------------------------------------ | +| 描述 | confignode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9091 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_reporter_list + +| 名字 | dn_metric_reporter_list | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_level + +| 名字 | dn_metric_level | +| ------------ | ---------------------------------------- | +| 描述 | DataNode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_async_collect_period + +| 名字 | dn_metric_async_collect_period | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_prometheus_reporter_port + +| 名字 | dn_metric_prometheus_reporter_port | +| ------------ | ---------------------------------------------------- | +| 描述 | DataNode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9092 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_internal_reporter_type + +| 名字 | dn_metric_internal_reporter_type | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中监控模块内部报告者的种类,用于内部监控和检查数据是否已经成功写入和刷新。 | +| 类型 | String | +| 默认值 | IOTDB | +| 改后生效方式 | 重启服务生效 | + +### 3.7 SSL 配置 + +- enable_thrift_ssl + +| 名字 | enable_thrift_ssl | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当enable_thrift_ssl配置为true时,将通过dn_rpc_port使用 SSL 加密进行通信 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_https + +| 名字 | enable_https | +| ------------ | ------------------------------ | +| 描述 | REST Service 是否开启 SSL 配置 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- key_store_path + +| 名字 | key_store_path | +| ------------ | -------------- | +| 描述 | ssl证书路径 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- key_store_pwd + +| 名字 | key_store_pwd | +| ------------ | ------------- | +| 描述 | ssl证书密码 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.8 连接配置 + +- cn_rpc_thrift_compression_enable + +| 名字 | cn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- cn_rpc_max_concurrent_client_num + +| 名字 | cn_rpc_max_concurrent_client_num | +| ------------ |---------------------------------| +| 描述 | 最大连接数。 | +| 类型 | int | +| 默认值 | 3000 | +| 改后生效方式 | 重启服务生效 | + +- cn_connection_timeout_ms + +| 名字 | cn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- cn_selector_thread_nums_of_client_manager + +| 名字 | cn_selector_thread_nums_of_client_manager | +| ------------ | ----------------------------------------- | +| 描述 | 客户端异步线程管理的选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- cn_max_client_count_for_each_node_in_client_manager + +| 名字 | cn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +- dn_session_timeout_threshold + +| 名字 | dn_session_timeout_threshold | +| ------------ | ---------------------------- | +| 描述 | 最大的会话空闲时间 | +| 类型 | int | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_thrift_compression_enable + +| 名字 | dn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_advanced_compression_enable + +| 名字 | dn_rpc_advanced_compression_enable | +| ------------ | ---------------------------------- | +| 描述 | 是否启用 thrift 的自定制压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_selector_thread_count + +| 名字 | rpc_selector_thread_count | +| ------------ | ------------------------- | +| 描述 | rpc 选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_min_concurrent_client_num + +| 名字 | rpc_min_concurrent_client_num | +| ------------ | ----------------------------- | +| 描述 | 最小连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_max_concurrent_client_num + +| 名字 | dn_rpc_max_concurrent_client_num | +| ------------ |----------------------------------| +| 描述 | 最大连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_max_frame_size + +| 名字 | dn_thrift_max_frame_size | +| ------------ | ------------------------------------------------------ | +| 描述 | RPC 请求/响应的最大字节数 | +| 类型 | long | +| 默认值 | 536870912 (默认值512MB) | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_init_buffer_size + +| 名字 | dn_thrift_init_buffer_size | +| ------------ | -------------------------- | +| 描述 | 字节数 | +| 类型 | long | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- dn_connection_timeout_ms + +| 名字 | dn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- dn_selector_thread_count_of_client_manager + +| 名字 | dn_selector_thread_count_of_client_manager | +| ------------ | ------------------------------------------------------------ | +| 描述 | selector thread (TAsyncClientManager) nums for async thread in a clientManagerclientManager中异步线程的选择器线程(TAsyncClientManager)编号 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_max_client_count_for_each_node_in_client_manager + +| 名字 | dn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +### 3.9 对象存储管理 + +- remote_tsfile_cache_dirs + +| 名字 | remote_tsfile_cache_dirs | +| ------------ | ------------------------ | +| 描述 | 云端存储在本地的缓存目录 | +| 类型 | String | +| 默认值 | data/datanode/data/cache | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_page_size_in_kb + +| 名字 | remote_tsfile_cache_page_size_in_kb | +| ------------ | ----------------------------------- | +| 描述 | 云端存储在本地缓存文件的块大小 | +| 类型 | int | +| 默认值 | 20480 | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| 名字 | remote_tsfile_cache_max_disk_usage_in_mb | +| ------------ | ---------------------------------------- | +| 描述 | 云端存储本地缓存的最大磁盘占用大小 | +| 类型 | long | +| 默认值 | 51200 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_type + +| 名字 | object_storage_type | +| ------------ | ------------------- | +| 描述 | 云端存储类型 | +| 类型 | String | +| 默认值 | AWS_S3 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_endpoint + +| 名字 | object_storage_endpoint | +| ------------ | ----------------------- | +| 描述 | 云端存储的 endpoint | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_bucket + +| 名字 | object_storage_bucket | +| ------------ | ---------------------- | +| 描述 | 云端存储 bucket 的名称 | +| 类型 | String | +| 默认值 | iotdb_data | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_key + +| 名字 | object_storage_access_key | +| ------------ | ------------------------- | +| 描述 | 云端存储的验证信息 key | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_secret + +| 名字 | object_storage_access_secret | +| ------------ | ---------------------------- | +| 描述 | 云端存储的验证信息 secret | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.10 多级管理 + +- dn_default_space_usage_thresholds + +| 名字 | dn_default_space_usage_thresholds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 定义每个层级数据目录的最小剩余空间比例;当剩余空间少于该比例时,数据会被自动迁移至下一个层级;当最后一个层级的剩余存储空间到低于此阈值时,会将系统置为 READ_ONLY | +| 类型 | double | +| 默认值 | 0.85 | +| 改后生效方式 | 热加载 | + +- dn_tier_full_policy + +| 名字 | dn_tier_full_policy | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如何处理最后一层数据,当其已用空间高于其dn_default_space_usage_threshold时。| +| 类型 | String | +| 默认值 | NULL | +| 改后生效方式 | 热加载 | + +- migrate_thread_count + +| 名字 | migrate_thread_count | +| ------------ | ---------------------------------------- | +| 描述 | DataNode数据目录中迁移操作的线程池大小。 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| 名字 | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ------------ | ------------------------------------------------ | +| 描述 | 限制不同存储层级之间的数据迁移速度。 | +| 类型 | int | +| 默认值 | 10485760 | +| 改后生效方式 | 热加载 | + +### 3.11 REST服务配置 + +- enable_rest_service + +| 名字 | enable_rest_service | +| ------------ | ------------------- | +| 描述 | 是否开启Rest服务。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_service_port + +| 名字 | rest_service_port | +| ------------ | ------------------ | +| 描述 | Rest服务监听端口号 | +| 类型 | int32 | +| 默认值 | 18080 | +| 改后生效方式 | 重启服务生效 | + +- enable_swagger + +| 名字 | enable_swagger | +| ------------ | --------------------------------- | +| 描述 | 是否启用swagger来展示rest接口信息 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_query_default_row_size_limit + +| 名字 | rest_query_default_row_size_limit | +| ------------ | --------------------------------- | +| 描述 | 一次查询能返回的结果集最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- cache_expire_in_seconds + +| 名字 | cache_expire_in_seconds | +| ------------ | -------------------------------- | +| 描述 | 用户登录信息缓存的过期时间(秒) | +| 类型 | int32 | +| 默认值 | 28800 | +| 改后生效方式 | 重启服务生效 | + +- cache_max_num + +| 名字 | cache_max_num | +| ------------ | ------------------------ | +| 描述 | 缓存中存储的最大用户数量 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- cache_init_num + +| 名字 | cache_init_num | +| ------------ | -------------- | +| 描述 | 缓存初始容量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- client_auth + +| 名字 | client_auth | +| ------------ | ---------------------- | +| 描述 | 是否需要客户端身份验证 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- trust_store_path + +| 名字 | trust_store_path | +| ------------ | ----------------------- | +| 描述 | keyStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- trust_store_pwd + +| 名字 | trust_store_pwd | +| ------------ | ------------------------- | +| 描述 | trustStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- idle_timeout_in_seconds + +| 名字 | idle_timeout_in_seconds | +| ------------ | ----------------------- | +| 描述 | SSL 超时时间,单位为秒 | +| 类型 | int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.12 负载均衡配置 + +- series_slot_num + +| 名字 | series_slot_num | +| ------------ | ---------------------------- | +| 描述 | 序列分区槽数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- series_partition_executor_class + +| 名字 | series_partition_executor_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 序列分区哈希函数 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_region_group_extension_policy + +| 名字 | schema_region_group_extension_policy | +| ------------ | ------------------------------------ | +| 描述 | SchemaRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_schema_region_group_num_per_database + +| 名字 | default_schema_region_group_num_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-SchemaRegionGroup 扩容策略时,此参数为每个 Database 拥有的 SchemaRegionGroup 数量;当选用 AUTO-SchemaRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 SchemaRegionGroup 数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_per_data_node + +| 名字 | schema_region_per_data_node | +| ------------ | -------------------------------------------------- | +| 描述 | 期望每个 DataNode 可管理的 SchemaRegion 的最大数量 | +| 类型 | double | +| 默认值 | 1.0 | +| 改后生效方式 | 重启服务生效 | + +- data_region_group_extension_policy + +| 名字 | data_region_group_extension_policy | +| ------------ | ---------------------------------- | +| 描述 | DataRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_data_region_group_num_per_database + +| 名字 | default_data_region_group_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-DataRegionGroup 扩容策略时,此参数为每个 Database 拥有的 DataRegionGroup 数量;当选用 AUTO-DataRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 DataRegionGroup 数量 | +| 类型 | int | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- data_region_per_data_node + +| 名字 | data_region_per_data_node | +| ------------ | ------------------------------------------------ | +| 描述 | 期望每个 DataNode 可管理的 DataRegion 的最大数量 | +| 类型 | double | +| 默认值 | CPU 核心数的一半 | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_ratis_consensus + +| 名字 | enable_auto_leader_balance_for_ratis_consensus | +| ------------ | ---------------------------------------------- | +| 描述 | 是否为 Ratis 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_iot_consensus + +| 名字 | enable_auto_leader_balance_for_iot_consensus | +| ------------ | -------------------------------------------- | +| 描述 | 是否为 IoT 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.13 集群管理 + +- time_partition_origin + +| 名字 | time_partition_origin | +| ------------ | ------------------------------------------------------------ | +| 描述 | Database 数据时间分区的起始点,即从哪个时间点开始计算时间分区。 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 0 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- time_partition_interval + +| 名字 | time_partition_interval | +| ------------ | ------------------------------- | +| 描述 | Database 默认的数据时间分区间隔 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 604800000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- heartbeat_interval_in_ms + +| 名字 | heartbeat_interval_in_ms | +| ------------ | ------------------------ | +| 描述 | 集群节点间的心跳间隔 | +| 类型 | Long | +| 单位 | ms | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- disk_space_warning_threshold + +| 名字 | disk_space_warning_threshold | +| ------------ | ---------------------------- | +| 描述 | DataNode 磁盘剩余阈值 | +| 类型 | double(percentage) | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +### 3.14 内存控制配置 + +- datanode_memory_proportion + +| 名字 | datanode_memory_proportion | +| ------------ | ---------------------------------------------------- | +| 描述 | 存储引擎、查询引擎、元数据、共识、流处理引擎和空闲内存比例 | +| 类型 | Ratio | +| 默认值 | 3:3:1:1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- schema_memory_proportion + +| 名字 | schema_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | Schema 相关的内存如何在 SchemaRegion、SchemaCache 和 PartitionCache 之间分配 | +| 类型 | Ratio | +| 默认值 | 5:4:1 | +| 改后生效方式 | 重启服务生效 | + +- storage_engine_memory_proportion + +| 名字 | storage_engine_memory_proportion | +| ------------ | -------------------------------- | +| 描述 | 写入和合并占存储内存比例 | +| 类型 | Ratio | +| 默认值 | 8:2 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_proportion + +| 名字 | write_memory_proportion | +| ------------ | -------------------------------------------- | +| 描述 | Memtable 和 TimePartitionInfo 占写入内存比例 | +| 类型 | Ratio | +| 默认值 | 19:1 | +| 改后生效方式 | 重启服务生效 | + +- primitive_array_size + +| 名字 | primitive_array_size | +| ------------ | ---------------------------------------- | +| 描述 | 数组池中的原始数组大小(每个数组的长度) | +| 类型 | int32 | +| 默认值 | 64 | +| 改后生效方式 | 重启服务生效 | + +- chunk_metadata_size_proportion + +| 名字 | chunk_metadata_size_proportion | +| ------------ | -------------------------------------------- | +| 描述 | 在数据压缩过程中,用于存储块元数据的内存比例 | +| 类型 | Double | +| 默认值 | 0.1 | +| 改后生效方式 | 重启服务生效 | + +- flush_proportion + +| 名字 | flush_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 调用flush disk的写入内存比例,默认0.4,若有极高的写入负载力(比如batch=1000),可以设置为低于默认值,比如0.2 | +| 类型 | Double | +| 默认值 | 0.4 | +| 改后生效方式 | 重启服务生效 | + +- buffered_arrays_memory_proportion + +| 名字 | buffered_arrays_memory_proportion | +| ------------ | --------------------------------------- | +| 描述 | 为缓冲数组分配的写入内存比例,默认为0.6 | +| 类型 | Double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- reject_proportion + +| 名字 | reject_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 拒绝插入的写入内存比例,默认0.8,若有极高的写入负载力(比如batch=1000)并且物理内存足够大,它可以设置为高于默认值,如0.9 | +| 类型 | Double | +| 默认值 | 0.8 | +| 改后生效方式 | 重启服务生效 | + +- device_path_cache_proportion + +| 名字 | device_path_cache_proportion | +| ------------ | --------------------------------------------------- | +| 描述 | 在内存中分配给设备路径缓存(DevicePathCache)的比例 | +| 类型 | Double | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_variation_report_proportion + +| 名字 | write_memory_variation_report_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果 DataRegion 的内存增加超过写入可用内存的一定比例,则向系统报告。默认值为0.001 | +| 类型 | Double | +| 默认值 | 0.001 | +| 改后生效方式 | 重启服务生效 | + +- check_period_when_insert_blocked + +| 名字 | check_period_when_insert_blocked | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当插入被拒绝时,等待时间(以毫秒为单位)去再次检查系统,默认为50。若插入被拒绝,读取负载低,可以设置大一些。 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- io_task_queue_size_for_flushing + +| 名字 | io_task_queue_size_for_flushing | +| ------------ | -------------------------------- | +| 描述 | ioTaskQueue 的大小。默认值为10。 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- enable_query_memory_estimation + +| 名字 | enable_query_memory_estimation | +| ------------ | ------------------------------------------------------------ | +| 描述 | 开启后会预估每次查询的内存使用量,如果超过可用内存,会拒绝本次查询 | +| 类型 | bool | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.15 元数据引擎配置 + +- schema_engine_mode + +| 名字 | schema_engine_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 元数据引擎的运行模式,支持 Memory 和 PBTree;PBTree 模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。 | +| 类型 | string | +| 默认值 | Memory | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- partition_cache_size + +| 名字 | partition_cache_size | +| ------------ | ------------------------------ | +| 描述 | 分区信息缓存的最大缓存条目数。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- sync_mlog_period_in_ms + +| 名字 | sync_mlog_period_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | mlog定期刷新到磁盘的周期,单位毫秒。如果该参数为0,则表示每次对元数据的更新操作都会被立即写到磁盘上。 | +| 类型 | Int64 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- tag_attribute_flush_interval + +| 名字 | tag_attribute_flush_interval | +| ------------ | -------------------------------------------------- | +| 描述 | 标签和属性记录的间隔数,达到此记录数量时将强制刷盘 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- tag_attribute_total_size + +| 名字 | tag_attribute_total_size | +| ------------ | ---------------------------------------- | +| 描述 | 每个时间序列标签和属性的最大持久化字节数 | +| 类型 | int32 | +| 默认值 | 700 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_measurement_num_of_internal_request + +| 名字 | max_measurement_num_of_internal_request | +| ------------ | ------------------------------------------------------------ | +| 描述 | 一次注册序列请求中若物理量过多,在系统内部执行时将被拆分为若干个轻量级的子请求,每个子请求中的物理量数目不超过此参数设置的最大值。 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- datanode_schema_cache_eviction_policy + +| 名字 | datanode_schema_cache_eviction_policy | +| ------------ | ----------------------------------------------------- | +| 描述 | 当 Schema 缓存达到其最大容量时,Schema 缓存的淘汰策略 | +| 类型 | String | +| 默认值 | FIFO | +| 改后生效方式 | 重启服务生效 | + +- cluster_timeseries_limit_threshold + +| 名字 | cluster_timeseries_limit_threshold | +| ------------ | ---------------------------------- | +| 描述 | 集群中可以创建的时间序列的最大数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- cluster_device_limit_threshold + +| 名字 | cluster_device_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大设备数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- database_limit_threshold + +| 名字 | database_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大数据库数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +### 3.16 自动推断数据类型 + +- enable_auto_create_schema + +| 名字 | enable_auto_create_schema | +| ------------ | -------------------------------------- | +| 描述 | 当写入的序列不存在时,是否自动创建序列 | +| 取值 | true or false | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- default_storage_group_level + +| 名字 | default_storage_group_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当写入的数据不存在且自动创建序列时,若需要创建相应的 database,将序列路径的哪一层当做 database。例如,如果我们接到一个新序列 root.sg0.d1.s2, 并且 level=1, 那么 root.sg0 被视为database(因为 root 是 level 0 层) | +| 取值 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- boolean_string_infer_type + +| 名字 | boolean_string_infer_type | +| ------------ | ------------------------------------------ | +| 描述 | "true" 或者 "false" 字符串被推断的数据类型 | +| 取值 | BOOLEAN 或者 TEXT | +| 默认值 | BOOLEAN | +| 改后生效方式 | 热加载 | + +- integer_string_infer_type + +| 名字 | integer_string_infer_type | +| ------------ | --------------------------------- | +| 描述 | 整型字符串推断的数据类型 | +| 取值 | INT32, INT64, FLOAT, DOUBLE, TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- floating_string_infer_type + +| 名字 | floating_string_infer_type | +| ------------ | ----------------------------- | +| 描述 | "6.7"等字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- nan_string_infer_type + +| 名字 | nan_string_infer_type | +| ------------ | ---------------------------- | +| 描述 | "NaN" 字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- default_boolean_encoding + +| 名字 | default_boolean_encoding | +| ------------ | ------------------------ | +| 描述 | BOOLEAN 类型编码格式 | +| 取值 | PLAIN, RLE | +| 默认值 | RLE | +| 改后生效方式 | 热加载 | + +- default_int32_encoding + +| 名字 | default_int32_encoding | +| ------------ | -------------------------------------- | +| 描述 | int32 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_int64_encoding + +| 名字 | default_int64_encoding | +| ------------ | -------------------------------------- | +| 描述 | int64 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_float_encoding + +| 名字 | default_float_encoding | +| ------------ | ----------------------------- | +| 描述 | float 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_double_encoding + +| 名字 | default_double_encoding | +| ------------ | ----------------------------- | +| 描述 | double 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_text_encoding + +| 名字 | default_text_encoding | +| ------------ | --------------------- | +| 描述 | text 类型编码格式 | +| 取值 | PLAIN | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +* boolean_compressor + +| 名字 | boolean_compressor | +| -------------- | ----------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,BOOLEAN 数据类型的压缩方式 (V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int32_compressor + +| 名字 | int32_compressor | +| -------------- | ------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,INT32/DATE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int64_compressor + +| 名字 | int64_compressor | +| -------------- | ------------------------------------------------------------------------------ | +| 描述 | 启用自动创建模式时,INT64/TIMESTAMP 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* float_compressor + +| 名字 | float_compressor | +| -------------- | -------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,FLOAT 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* double_compressor + +| 名字 | double_compressor | +| -------------- | --------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,DOUBLE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* text_compressor + +| 名字 | text_compressor | +| -------------- | -------------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,TEXT/BINARY/BLOB 数据类型的压缩方式(V2.0.6 版本开始支持 ) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + + + +### 3.17 查询配置 + +- read_consistency_level + +| 名字 | read_consistency_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 查询一致性等级,取值 “strong” 时从 Leader 副本查询,取值 “weak” 时随机查询一个副本。 | +| 类型 | String | +| 默认值 | strong | +| 改后生效方式 | 重启服务生效 | + +- meta_data_cache_enable + +| 名字 | meta_data_cache_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否缓存元数据(包括 BloomFilter、Chunk Metadata 和 TimeSeries Metadata。) | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- chunk_timeseriesmeta_free_memory_proportion + +| 名字 | chunk_timeseriesmeta_free_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 读取内存分配比例,BloomFilterCache、ChunkCache、TimeseriesMetadataCache、数据集查询的内存和可用内存的查询。参数形式为a : b : c : d : e,其中a、b、c、d、e为整数。 例如“1 : 1 : 1 : 1 : 1” ,“1 : 100 : 200 : 300 : 400” 。 | +| 类型 | String | +| 默认值 | 1 : 100 : 200 : 300 : 400 | +| 改后生效方式 | 重启服务生效 | + +- enable_last_cache + +| 名字 | enable_last_cache | +| ------------ | ------------------ | +| 描述 | 是否开启最新点缓存 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_core_pool_size + +| 名字 | mpp_data_exchange_core_pool_size | +| ------------ | -------------------------------- | +| 描述 | MPP 数据交换线程池核心线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_max_pool_size + +| 名字 | mpp_data_exchange_max_pool_size | +| ------------ | ------------------------------- | +| 描述 | MPP 数据交换线程池最大线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_keep_alive_time_in_ms + +| 名字 | mpp_data_exchange_keep_alive_time_in_ms | +| ------------ | --------------------------------------- | +| 描述 | MPP 数据交换最大等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- driver_task_execution_time_slice_in_ms + +| 名字 | driver_task_execution_time_slice_in_ms | +| ------------ | -------------------------------------- | +| 描述 | 单个 DriverTask 最长执行时间(ms) | +| 类型 | int32 | +| 默认值 | 200 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_size_in_bytes + +| 名字 | max_tsblock_size_in_bytes | +| ------------ | ------------------------------- | +| 描述 | 单个 TsBlock 的最大容量(byte) | +| 类型 | int32 | +| 默认值 | 131072 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_line_numbers + +| 名字 | max_tsblock_line_numbers | +| ------------ | ------------------------ | +| 描述 | 单个 TsBlock 的最大行数 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- slow_query_threshold + +| 名字 | slow_query_threshold | +| ------------ | ------------------------------ | +| 描述 | 慢查询的时间阈值。单位:毫秒。 | +| 类型 | long | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- query_cost_stat_window + +| 名字 | query_cost_stat_window | +| ------------ |--------------------| +| 描述 | 查询耗时统计的窗口,单位为分钟。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 热加载 | + +- query_timeout_threshold + +| 名字 | query_timeout_threshold | +| ------------ | -------------------------------- | +| 描述 | 查询的最大执行时间。单位:毫秒。 | +| 类型 | Int32 | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- max_allowed_concurrent_queries + +| 名字 | max_allowed_concurrent_queries | +| ------------ | ------------------------------ | +| 描述 | 允许的最大并发查询数量。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- query_thread_count + +| 名字 | query_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 对内存中的数据进行查询时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- degree_of_query_parallelism + +| 名字 | degree_of_query_parallelism | +| ------------ | ------------------------------------------------------------ | +| 描述 | 设置单个查询片段实例将创建的 pipeline 驱动程序数量,也就是查询操作的并行度。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- mode_map_size_threshold + +| 名字 | mode_map_size_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | 计算 MODE 聚合函数时,计数映射可以增长到的阈值 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- batch_size + +| 名字 | batch_size | +| ------------ | ---------------------------------------------------------- | +| 描述 | 服务器中每次迭代的数据量(数据条目,即不同时间戳的数量。) | +| 类型 | Int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- sort_buffer_size_in_bytes + +| 名字 | sort_buffer_size_in_bytes | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 设置外部排序操作中使用的内存缓冲区大小 | +| 类型 | long | +| 默认值 | 1048576(V2.0.6 之前版本)
0(V2.0.6 及之后版本),当值小于等于 0 时,由系统自动进行计算,计算公式为:`sort_buffer_size_in_bytes = Math.min(32 * 1024 * 1024, 堆内内存 * 查询引擎内存比例 * 查询执行内存比例 / 查询线程数 / 2)` | +| 改后生效方式 | 热加载 | + +- merge_threshold_of_explain_analyze + +| 名字 | merge_threshold_of_explain_analyze | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于设置在 `EXPLAIN ANALYZE` 语句的结果集中操作符(operator)数量的合并阈值。 | +| 类型 | int | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +### 3.18 TTL配置 + +- ttl_check_interval + +| 名字 | ttl_check_interval | +| ------------ | -------------------------------------- | +| 描述 | ttl 检查任务的间隔,单位 ms,默认为 2h | +| 类型 | int | +| 默认值 | 7200000 | +| 改后生效方式 | 重启服务生效 | + +- max_expired_time + +| 名字 | max_expired_time | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果一个文件中存在设备已经过期超过此时间,那么这个文件将被立即整理。单位 ms,默认为一个月 | +| 类型 | int | +| 默认值 | 2592000000 | +| 改后生效方式 | 重启服务生效 | + +- expired_data_ratio + +| 名字 | expired_data_ratio | +| ------------ | ------------------------------------------------------------ | +| 描述 | 过期设备比例。如果一个文件中过期设备的比率超过这个值,那么这个文件中的过期数据将通过 compaction 清理。 | +| 类型 | float | +| 默认值 | 0.3 | +| 改后生效方式 | 重启服务生效 | + +### 3.19 存储引擎配置 + +- timestamp_precision + +| 名字 | timestamp_precision | +| ------------ | ---------------------------- | +| 描述 | 时间戳精度,支持 ms、us、ns | +| 类型 | String | +| 默认值 | ms | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- timestamp_precision_check_enabled + +| 名字 | timestamp_precision_check_enabled | +| ------------ | --------------------------------- | +| 描述 | 用于控制是否启用时间戳精度检查 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_waiting_time_when_insert_blocked + +| 名字 | max_waiting_time_when_insert_blocked | +| ------------ | ----------------------------------------------- | +| 描述 | 当插入请求等待超过这个时间,则抛出异常,单位 ms | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- handle_system_error + +| 名字 | handle_system_error | +| ------------ | ------------------------------------ | +| 描述 | 当系统遇到不可恢复的错误时的处理方法 | +| 类型 | String | +| 默认值 | CHANGE_TO_READ_ONLY | +| 改后生效方式 | 重启服务生效 | + +- enable_timed_flush_seq_memtable + +| 名字 | enable_timed_flush_seq_memtable | +| ------------ | ------------------------------- | +| 描述 | 是否开启定时刷盘顺序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_interval_in_ms + +| 名字 | seq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_check_interval_in_ms + +| 名字 | seq_memtable_flush_check_interval_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 检查顺序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- enable_timed_flush_unseq_memtable + +| 名字 | enable_timed_flush_unseq_memtable | +| ------------ | --------------------------------- | +| 描述 | 是否开启定时刷新乱序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_interval_in_ms + +| 名字 | unseq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_check_interval_in_ms + +| 名字 | unseq_memtable_flush_check_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 检查乱序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- tvlist_sort_algorithm + +| 名字 | tvlist_sort_algorithm | +| ------------ | ------------------------ | +| 描述 | memtable中数据的排序方法 | +| 类型 | String | +| 默认值 | TIM | +| 改后生效方式 | 重启服务生效 | + +- avg_series_point_number_threshold + +| 名字 | avg_series_point_number_threshold | +| ------------ | ------------------------------------------------ | +| 描述 | 内存中平均每个时间序列点数最大值,达到触发 flush | +| 类型 | int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- flush_thread_count + +| 名字 | flush_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 将内存中的数据写入磁盘时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。默认值为 0。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- enable_partial_insert + +| 名字 | enable_partial_insert | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一次 insert 请求中,如果部分测点写入失败,是否继续写入其他测点。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- recovery_log_interval_in_ms + +| 名字 | recovery_log_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | data region的恢复过程中打印日志信息的间隔 | +| 类型 | Int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +- 0.13_data_insert_adapt + +| 名字 | 0.13_data_insert_adapt | +| ------------ | ------------------------------------------------------- | +| 描述 | 如果 0.13 版本客户端进行写入,需要将此配置项设置为 true | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_tsfile_validation + +| 名字 | enable_tsfile_validation | +| ------------ | -------------------------------------- | +| 描述 | Flush, Load 或合并后验证 tsfile 正确性 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- tier_ttl_in_ms + +| 名字 | tier_ttl_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 定义每个层级负责的数据范围,通过 TTL 表示 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +* max_object_file_size_in_byte + +| 名字 | max\_object\_file\_size\_in\_byte | +| -------------- |-----------------------------------| +| 描述 | 单对象文件的最大尺寸限制 (V2.0.8-beta 版本起支持) | +| 类型 | long | +| 默认值 | 4294967296 | +| 改后生效方式 | 热加载 | + +* restrict_object_limit + +| 名字 | restrict\_object\_limit | +|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 对 OBJECT 类型的表名、列名和设备名称没有特殊限制。(V2.0.8-beta 版本起支持)当设置为 true 且表中包含 OBJECT 列时,需遵循以下限制:
1. 命名规范:TAG 列的值、表名和字段名禁止使用 “.” 或 “..”,且不得包含 “./” 或 “.\” 字符,否则元数据创建将失败。若名称包含文件系统不支持的字符,则会在数据写入时报错。
2. 大小写敏感:如果底层文件系统不区分大小写,则设备标识符(如 'd1' 与 'D1')将被视为相同。在此情况下,若创建此类名称相似的设备,其 OBJECT 数据文件可能互相覆盖,导致数据错误。
3. 存储路径:OBJECT 类型数据的实际存储路径格式为:`${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin`。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + + +### 3.20 合并配置 + +- enable_seq_space_compaction + +| 名字 | enable_seq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 顺序空间内合并,开启顺序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_unseq_space_compaction + +| 名字 | enable_unseq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 乱序空间内合并,开启乱序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_cross_space_compaction + +| 名字 | enable_cross_space_compaction | +| ------------ | ------------------------------------------ | +| 描述 | 跨空间合并,开启将乱序文件合并到顺序文件中 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_auto_repair_compaction + +| 名字 | enable_auto_repair_compaction | +| ------------ | ----------------------------- | +| 描述 | 启用通过合并操作自动修复未排序文件的功能 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- cross_selector + +| 名字 | cross_selector | +| ------------ |----------------| +| 描述 | 跨空间合并任务的选择器 | +| 类型 | String | +| 默认值 | rewrite | +| 改后生效方式 | 重启服务生效 | + +- cross_performer + +| 名字 | cross_performer | +| ------------ |-----------------------------------| +| 描述 | 跨空间合并任务的执行器,可选项:read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- inner_seq_selector + +| 名字 | inner_seq_selector | +| ------------ |------------------------------------------------------------------------| +| 描述 | 顺序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_seq_performer + +| 名字 | inner_seq_performer | +| ------------ |--------------------------------------| +| 描述 | 顺序空间内合并任务的执行器,可选项是 read_chunk 和 fast | +| 类型 | String | +| 默认值 | read_chunk | +| 改后生效方式 | 热加载 | + +- inner_unseq_selector + +| 名字 | inner_unseq_selector | +| ------------ |-------------------------------------------------------------------------| +| 描述 | 乱序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_unseq_performer + +| 名字 | inner_unseq_performer | +| ------------ |--------------------------------------| +| 描述 | 乱序空间内合并任务的执行器,可选项是 read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- compaction_priority + +| 名字 | compaction_priority | +| ------------ |-------------------------------------------------------------------------------------------| +| 描述 | 合并时的优先级。INNER_CROSS:优先执行空间内合并,优先减少文件数量;CROSS_INNER:优先执行跨空间合并,优先清理乱序文件;BALANCE:交替执行两种合并类型。 | +| 类型 | String | +| 默认值 | INNER_CROSS | +| 改后生效方式 | 重启服务生效 | + +- candidate_compaction_task_queue_size + +| 名字 | candidate_compaction_task_queue_size | +| ------------ | ------------------------------------ | +| 描述 | 待选合并任务队列容量 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- target_compaction_file_size + +| 名字 | target_compaction_file_size | +| ------------ |-----------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 该参数作用于两个场景:1. 空间内合并的目标文件大小 2. 跨空间合并中待选序列文件的大小需小于 target_compaction_file_size * 1.5 多数情况下,跨空间合并的目标文件大小不会超过此阈值,即便超出,幅度也不会过大 。 默认值:2GB ,单位:byte | +| 类型 | Long | +| 默认值 | 2147483648 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_size_threshold + +| 名字 | inner_compaction_total_file_size_threshold | +| ------------ |--------------------------------------------| +| 描述 | 空间内合并的文件总大小阈值,单位:byte | +| 类型 | Long | +| 默认值 | 10737418240 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_num_threshold + +| 名字 | inner_compaction_total_file_num_threshold | +| ------------ | ----------------------------------------- | +| 描述 | 空间内合并的文件总数阈值 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 热加载 | + +- max_level_gap_in_inner_compaction + +| 名字 | max_level_gap_in_inner_compaction | +| ------------ | -------------------------------------- | +| 描述 | 空间内合并筛选的最大层级差 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 热加载 | + +- target_chunk_size + +| 名字 | target_chunk_size | +| ------------ |--------------------------------------------------| +| 描述 | 刷盘与合并操作的目标数据块大小, 若内存表中某条时序数据的大小超过该值,数据会被刷盘至多个数据块 | +| 类型 | Long | +| 默认值 | 1600000 | +| 改后生效方式 | 重启服务生效 | + +- target_chunk_point_num + +| 名字 | target_chunk_point_num | +| ------------ |------------------------------------------------------| +| 描述 | 刷盘与合并操作中单个数据块的目标点数, 若内存表中某条时序数据的点数超过该值,数据会被刷盘至多个数据块中 | +| 类型 | Long | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- chunk_size_lower_bound_in_compaction + +| 名字 | chunk_size_lower_bound_in_compaction | +| ------------ |--------------------------------------| +| 描述 | 若数据块大小低于此阈值,则会被反序列化为数据点,默认值为128字节 | +| 类型 | Long | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- chunk_point_num_lower_bound_in_compaction + +| 名字 | chunk_point_num_lower_bound_in_compaction | +| ------------ |------------------------------------------| +| 描述 | 若数据块内的数据点数低于此阈值,则会被反序列化为数据点 | +| 类型 | Long | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- inner_compaction_candidate_file_num + +| 名字 | inner_compaction_candidate_file_num | +| ------------ | ---------------------------------------- | +| 描述 | 空间内合并待选文件筛选的文件数量要求 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_num + +| 名字 | max_cross_compaction_candidate_file_num | +| ------------ | --------------------------------------- | +| 描述 | 跨空间合并待选文件筛选的文件数量上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_size + +| 名字 | max_cross_compaction_candidate_file_size | +| ------------ |------------------------------------------| +| 描述 | 跨空间合并待选文件筛选的总大小上限 | +| 类型 | Long | +| 默认值 | 5368709120 | +| 改后生效方式 | 热加载 | + +- min_cross_compaction_unseq_file_level + +| 名字 | min_cross_compaction_unseq_file_level | +| ------------ |---------------------------------------| +| 描述 | 可被选为待选文件的乱序文件的最小空间内合并层级 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- compaction_thread_count + +| 名字 | compaction_thread_count | +| ------------ | ----------------------- | +| 描述 | 执行合并任务的线程数目 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_max_aligned_series_num_in_one_batch + +| 名字 | compaction_max_aligned_series_num_in_one_batch | +| ------------ | ---------------------------------------------- | +| 描述 | 对齐序列合并一次执行时处理的值列数量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_interval_in_ms + +| 名字 | compaction_schedule_interval_in_ms | +| ------------ |------------------------------------| +| 描述 | 合并调度的时间间隔,单位 ms | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- compaction_write_throughput_mb_per_sec + +| 名字 | compaction_write_throughput_mb_per_sec | +| ------------ |----------------------------------------| +| 描述 | 合并操作每秒可达到的写入吞吐量上限, 小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- compaction_read_throughput_mb_per_sec + +| 名字 | compaction_read_throughput_mb_per_sec | +| --------- | ---------------------------------------------------- | +| 描述 | 合并每秒读吞吐限制,单位为 megabyte,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- compaction_read_operation_per_sec + +| 名字 | compaction_read_operation_per_sec | +| --------- | ------------------------------------------- | +| 描述 | 合并每秒读操作数量限制,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- sub_compaction_thread_count + +| 名字 | sub_compaction_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 每个合并任务的子任务线程数,只对跨空间合并和乱序空间内合并生效 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_disk_redundancy + +| 名字 | inner_compaction_task_selection_disk_redundancy | +| ------------ | ----------------------------------------------- | +| 描述 | 定义了磁盘可用空间的冗余值,仅用于内部压缩 | +| 类型 | double | +| 默认值 | 0.05 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_mods_file_threshold + +| 名字 | inner_compaction_task_selection_mods_file_threshold | +| ------------ | --------------------------------------------------- | +| 描述 | 定义了mods文件大小的阈值,仅用于内部压缩。 | +| 类型 | long | +| 默认值 | 131072 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_thread_num + +| 名字 | compaction_schedule_thread_num | +| ------------ | ------------------------------ | +| 描述 | 选择合并任务的线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +### 3.21 写前日志配置 + +- wal_mode + +| 名字 | wal_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 写前日志的写入模式. DISABLE 模式下会关闭写前日志;SYNC 模式下写入请求会在成功写入磁盘后返回; ASYNC 模式下写入请求返回时可能尚未成功写入磁盘后。 | +| 类型 | String | +| 默认值 | ASYNC | +| 改后生效方式 | 重启服务生效 | + +- max_wal_nodes_num + +| 名字 | max_wal_nodes_num | +| ------------ | ----------------------------------------------------- | +| 描述 | 写前日志节点的最大数量,默认值 0 表示数量由系统控制。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- wal_async_mode_fsync_delay_in_ms + +| 名字 | wal_async_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------- | +| 描述 | async 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 热加载 | + +- wal_sync_mode_fsync_delay_in_ms + +| 名字 | wal_sync_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------ | +| 描述 | sync 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 3 | +| 改后生效方式 | 热加载 | + +- wal_buffer_size_in_byte + +| 名字 | wal_buffer_size_in_byte | +| ------------ | ----------------------- | +| 描述 | 写前日志的 buffer 大小 | +| 类型 | int32 | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +- wal_buffer_queue_capacity + +| 名字 | wal_buffer_queue_capacity | +| ------------ | ------------------------- | +| 描述 | 写前日志阻塞队列大小上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 重启服务生效 | + +- wal_file_size_threshold_in_byte + +| 名字 | wal_file_size_threshold_in_byte | +| ------------ | ------------------------------- | +| 描述 | 写前日志文件封口阈值 | +| 类型 | int32 | +| 默认值 | 31457280 | +| 改后生效方式 | 热加载 | + +- wal_min_effective_info_ratio + +| 名字 | wal_min_effective_info_ratio | +| ------------ | ---------------------------- | +| 描述 | 写前日志最小有效信息比 | +| 类型 | double | +| 默认值 | 0.1 | +| 改后生效方式 | 热加载 | + +- wal_memtable_snapshot_threshold_in_byte + +| 名字 | wal_memtable_snapshot_threshold_in_byte | +| ------------ | ---------------------------------------- | +| 描述 | 触发写前日志中内存表快照的内存表大小阈值 | +| 类型 | int64 | +| 默认值 | 8388608 | +| 改后生效方式 | 热加载 | + +- max_wal_memtable_snapshot_num + +| 名字 | max_wal_memtable_snapshot_num | +| ------------ | ------------------------------ | +| 描述 | 写前日志中内存表的最大数量上限 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- delete_wal_files_period_in_ms + +| 名字 | delete_wal_files_period_in_ms | +| ------------ | ----------------------------- | +| 描述 | 删除写前日志的检查间隔 | +| 类型 | int64 | +| 默认值 | 20000 | +| 改后生效方式 | 热加载 | + +- wal_throttle_threshold_in_byte + +| 名字 | wal_throttle_threshold_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在IoTConsensus中,当WAL文件的大小达到一定阈值时,会开始对写入操作进行节流,以控制写入速度。 | +| 类型 | long | +| 默认值 | 53687091200 | +| 改后生效方式 | 热加载 | + +- iot_consensus_cache_window_time_in_ms + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 在IoTConsensus中,写缓存的最大等待时间。 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- enable_wal_compression + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ------------------------------------- | +| 描述 | 用于控制是否启用WAL的压缩。 | +| 类型 | boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.22 IoT 共识协议配置 + +当Region配置了IoTConsensus共识协议之后,下述的配置项才会生效 + +- data_region_iot_max_log_entries_num_per_batch + +| 名字 | data_region_iot_max_log_entries_num_per_batch | +| ------------ | --------------------------------------------- | +| 描述 | IoTConsensus batch 的最大日志条数 | +| 类型 | int32 | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_size_per_batch + +| 名字 | data_region_iot_max_size_per_batch | +| ------------ | ---------------------------------- | +| 描述 | IoTConsensus batch 的最大大小 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_pending_batches_num + +| 名字 | data_region_iot_max_pending_batches_num | +| ------------ | --------------------------------------- | +| 描述 | IoTConsensus batch 的流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_memory_ratio_for_queue + +| 名字 | data_region_iot_max_memory_ratio_for_queue | +| ------------ | ------------------------------------------ | +| 描述 | IoTConsensus 队列内存分配比例 | +| 类型 | double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- region_migration_speed_limit_bytes_per_second + +| 名字 | region_migration_speed_limit_bytes_per_second | +| ------------ | --------------------------------------------- | +| 描述 | 定义了在region迁移过程中,数据传输的最大速率 | +| 类型 | long | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +### 3.23 TsFile配置 + +- group_size_in_byte + +| 名字 | group_size_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | 每次将内存中的数据写入到磁盘时的最大写入字节数 | +| 类型 | int32 | +| 默认值 | 134217728 | +| 改后生效方式 | 热加载 | + +- page_size_in_byte + +| 名字 | page_size_in_byte | +| ------------ | ---------------------------------------------------- | +| 描述 | 内存中每个列写出时,写成的单页最大的大小,单位为字节 | +| 类型 | int32 | +| 默认值 | 65536 | +| 改后生效方式 | 热加载 | + +- max_number_of_points_in_page + +| 名字 | max_number_of_points_in_page | +| ------------ | ------------------------------------------------- | +| 描述 | 一个页中最多包含的数据点(时间戳-值的二元组)数量 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- pattern_matching_threshold + +| 名字 | pattern_matching_threshold | +| ------------ | ------------------------------ | +| 描述 | 正则表达式匹配时最大的匹配次数 | +| 类型 | int32 | +| 默认值 | 1000000 | +| 改后生效方式 | 热加载 | + +- float_precision + +| 名字 | float_precision | +| ------------ | ------------------------------------------------------------ | +| 描述 | 浮点数精度,为小数点后数字的位数 | +| 类型 | int32 | +| 默认值 | 默认为 2 位。注意:32 位浮点数的十进制精度为 7 位,64 位浮点数的十进制精度为 15 位。如果设置超过机器精度将没有实际意义。 | +| 改后生效方式 | 热加载 | + +- value_encoder + +| 名字 | value_encoder | +| ------------ | ------------------------------------- | +| 描述 | value 列编码方式 | +| 类型 | 枚举 String: “TS_2DIFF”,“PLAIN”,“RLE” | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +- compressor + +| 名字 | compressor | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据压缩方法; 对齐序列中时间列的压缩方法 | +| 类型 | 枚举 String : "UNCOMPRESSED", "SNAPPY", "LZ4", "ZSTD", "LZMA2" | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +- encrypt_flag + +| 名字 | encrypt_flag | +| ------------ | ---------------------------- | +| 描述 | 用于开启或关闭数据加密功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- encrypt_type + +| 名字 | encrypt_type | +| ------------ | ------------------------------------- | +| 描述 | 数据加密的方法。 | +| 类型 | String | +| 默认值 | org.apache.tsfile.encrypt.UNENCRYPTED | +| 改后生效方式 | 重启服务生效 | + +- encrypt_key_path + +| 名字 | encrypt_key_path | +| ------------ | ---------------------------- | +| 描述 | 数据加密使用的密钥来源路径。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.24 授权配置 + +- authorizer_provider_class + +| 名字 | authorizer_provider_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 权限服务的类名 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| 改后生效方式 | 重启服务生效 | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| 名字 | openID_url | +| ------------ | ---------------------------------------------------------- | +| 描述 | openID 服务器地址 (当 OpenIdAuthorizer 被启用时必须设定) | +| 类型 | String(一个 http 地址) | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- iotdb_server_encrypt_decrypt_provider + +| 名字 | iotdb_server_encrypt_decrypt_provider | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于用户密码加密的类 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| 名字 | iotdb_server_encrypt_decrypt_provider_parameter | +| ------------ | ----------------------------------------------- | +| 描述 | 用于初始化用户密码加密类的参数 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- author_cache_size + +| 名字 | author_cache_size | +| ------------ | ------------------------ | +| 描述 | 用户缓存与角色缓存的大小 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- author_cache_expire_time + +| 名字 | author_cache_expire_time | +| ------------ | -------------------------------------- | +| 描述 | 用户缓存与角色缓存的有效期,单位为分钟 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 重启服务生效 | + +### 3.25 UDF配置 + +- udf_initial_byte_array_length_for_memory_control + +| 名字 | udf_initial_byte_array_length_for_memory_control | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于评估UDF查询中文本字段的内存使用情况。建议将此值设置为略大于所有文本的平均长度记录。 | +| 类型 | int32 | +| 默认值 | 48 | +| 改后生效方式 | 重启服务生效 | + +- udf_memory_budget_in_mb + +| 名字 | udf_memory_budget_in_mb | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一个UDF查询中使用多少内存(以 MB 为单位)。上限为已分配内存的 20% 用于读取。 | +| 类型 | Float | +| 默认值 | 30.0 | +| 改后生效方式 | 重启服务生效 | + +- udf_reader_transformer_collector_memory_proportion + +| 名字 | udf_reader_transformer_collector_memory_proportion | +| ------------ | --------------------------------------------------------- | +| 描述 | UDF内存分配比例。参数形式为a : b : c,其中a、b、c为整数。 | +| 类型 | String | +| 默认值 | 1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- udf_lib_dir + +| 名字 | udf_lib_dir | +| ------------ | ---------------------------- | +| 描述 | UDF 日志及jar文件存储路径 | +| 类型 | String | +| 默认值 | ext/udf(Windows:ext\\udf) | +| 改后生效方式 | 重启服务生效 | + +### 3.26 触发器配置 + +- trigger_lib_dir + +| 名字 | trigger_lib_dir | +| ------------ | ----------------------- | +| 描述 | 触发器 JAR 包存放的目录 | +| 类型 | String | +| 默认值 | ext/trigger | +| 改后生效方式 | 重启服务生效 | + +- stateful_trigger_retry_num_when_not_found + +| 名字 | stateful_trigger_retry_num_when_not_found | +| ------------ | ---------------------------------------------- | +| 描述 | 有状态触发器触发无法找到触发器实例时的重试次数 | +| 类型 | Int32 | +| 默认值 | 3 | +| 改后生效方式 | 重启服务生效 | + +### 3.27 SELECT-INTO配置 + +- into_operation_buffer_size_in_byte + +| 名字 | into_operation_buffer_size_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,待写入数据占用的最大内存(单位:Byte) | +| 类型 | long | +| 默认值 | 104857600 | +| 改后生效方式 | 热加载 | + +- select_into_insert_tablet_plan_row_limit + +| 名字 | select_into_insert_tablet_plan_row_limit | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,一个 insert-tablet-plan 中可以处理的最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- into_operation_execution_thread_count + +| 名字 | into_operation_execution_thread_count | +| ------------ | ------------------------------------------ | +| 描述 | SELECT INTO 中执行写入任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +### 3.28 连续查询配置 +- continuous_query_submit_thread_count + +| 名字 | continuous_query_execution_thread | +| ------------ | --------------------------------- | +| 描述 | 执行连续查询任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- continuous_query_min_every_interval_in_ms + +| 名字 | continuous_query_min_every_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 连续查询执行时间间隔的最小值 | +| 类型 | long (duration) | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +### 3.29 PIPE配置 + +- pipe_lib_dir + +| 名字 | pipe_lib_dir | +| ------------ | -------------------------- | +| 描述 | 自定义 Pipe 插件的存放目录 | +| 类型 | string | +| 默认值 | ext/pipe | +| 改后生效方式 | 暂不支持修改 | + +- pipe_subtask_executor_max_thread_num + +| 名字 | pipe_subtask_executor_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | pipe 子任务 processor、sink 中各自可以使用的最大线程数。实际值将是 min(pipe_subtask_executor_max_thread_num, max(1, CPU核心数 / 2))。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_timeout_ms + +| 名字 | pipe_sink_timeout_ms | +| ------------ | --------------------------------------------- | +| 描述 | thrift 客户端的连接超时时间(以毫秒为单位)。 | +| 类型 | int | +| 默认值 | 900000 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_selector_number + +| 名字 | pipe_sink_selector_number | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大执行结果处理线程数量。 建议将此值设置为小于或等于 pipe_sink_max_client_number。 | +| 类型 | int | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_max_client_number + +| 名字 | pipe_sink_max_client_number | +| ------------ | ----------------------------------------------------------- | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大客户端数量。 | +| 类型 | int | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_enabled + +| 名字 | pipe_air_gap_receiver_enabled | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否启用通过网闸接收 pipe 数据。接收器只能在 tcp 模式下返回 0 或 1,以指示数据是否成功接收。 \| | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_port + +| 名字 | pipe_air_gap_receiver_port | +| ------------ | ------------------------------------ | +| 描述 | 服务器通过网闸接收 pipe 数据的端口。 | +| 类型 | int | +| 默认值 | 9780 | +| 改后生效方式 | 重启服务生效 | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| 名字 | pipe_all_sinks_rate_limit_bytes_per_second | +| ------------ | ------------------------------------------------------------ | +| 描述 | 所有 pipe sink 每秒可以传输的总字节数。当给定的值小于或等于 0 时,表示没有限制。默认值是 -1,表示没有限制。 | +| 类型 | double | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +### 3.30 Ratis共识协议配置 + +当Region配置了RatisConsensus共识协议之后,下述的配置项才会生效 + +- config_node_ratis_log_appender_buffer_size_max + +| 名字 | config_node_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_appender_buffer_size_max + +| 名字 | schema_region_ratis_log_appender_buffer_size_max | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_appender_buffer_size_max + +| 名字 | data_region_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_snapshot_trigger_threshold + +| 名字 | config_node_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | confignode 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_snapshot_trigger_threshold + +| 名字 | schema_region_ratis_snapshot_trigger_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | schema region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_snapshot_trigger_threshold + +| 名字 | data_region_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | data region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_unsafe_flush_enable + +| 名字 | config_node_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | confignode 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_unsafe_flush_enable + +| 名字 | schema_region_ratis_log_unsafe_flush_enable | +| ------------ | ------------------------------------------- | +| 描述 | schema region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_unsafe_flush_enable + +| 名字 | data_region_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | data region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_segment_size_max_in_byte + +| 名字 | config_node_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_segment_size_max_in_byte + +| 名字 | schema_region_ratis_log_segment_size_max_in_byte | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | Confignode 简单共识协议一个Log日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_flow_control_window + +| 名字 | config_node_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | confignode grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_flow_control_window + +| 名字 | schema_region_ratis_grpc_flow_control_window | +| ------------ | -------------------------------------------- | +| 描述 | schema region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_flow_control_window + +| 名字 | data_region_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | data region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| 名字 | config_node_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | config node grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ------------------------------------------------------- | +| 描述 | schema region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | data_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | data region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_force_sync_num + +| 名字 | config_node_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | config node fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_force_sync_num + +| 名字 | schema_region_ratis_log_force_sync_num | +| ------------ | -------------------------------------- | +| 描述 | schema region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_force_sync_num + +| 名字 | data_region_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | data region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_request_timeout_ms + +| 名字 | config_node_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_request_timeout_ms + +| 名字 | schema_region_ratis_request_timeout_ms | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_request_timeout_ms + +| 名字 | data_region_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | data region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_retry_attempts + +| 名字 | config_node_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_initial_sleep_time_ms + +| 名字 | config_node_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | confignode Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_sleep_time_ms + +| 名字 | config_node_ratis_max_sleep_time_ms | +| ------------ | ------------------------------------- | +| 描述 | confignode Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_retry_attempts + +| 名字 | schema_region_ratis_max_retry_attempts | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_initial_sleep_time_ms + +| 名字 | schema_region_ratis_initial_sleep_time_ms | +| ------------ | ----------------------------------------- | +| 描述 | schema region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_sleep_time_ms + +| 名字 | schema_region_ratis_max_sleep_time_ms | +| ------------ | ---------------------------------------- | +| 描述 | schema region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_retry_attempts + +| 名字 | data_region_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | data region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_initial_sleep_time_ms + +| 名字 | data_region_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | data region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_sleep_time_ms + +| 名字 | data_region_ratis_max_sleep_time_ms | +| ------------ | -------------------------------------- | +| 描述 | data region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_min_ms + +| 名字 | ratis_first_election_timeout_min_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最小超时时间 | +| 类型 | int64 | +| 默认值 | 50 (ms) | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_max_ms + +| 名字 | ratis_first_election_timeout_max_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最大超时时间 | +| 类型 | int64 | +| 默认值 | 150 (ms) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_preserve_logs_num_when_purge + +| 名字 | config_node_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | confignode snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_preserve_logs_num_when_purge + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_preserve_logs_num_when_purge + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_max_size + +| 名字 | config_node_ratis_log_max_size | +| ------------ | ----------------------------------- | +| 描述 | config node磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_max_size + +| 名字 | schema_region_ratis_log_max_size | +| ------------ | -------------------------------------- | +| 描述 | schema region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_max_size + +| 名字 | data_region_ratis_log_max_size | +| ------------ | ------------------------------------ | +| 描述 | data region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 21474836480 (20GB) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_periodic_snapshot_interval + +| 名字 | config_node_ratis_periodic_snapshot_interval | +| ------------ | -------------------------------------------- | +| 描述 | config node定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_periodic_snapshot_interval + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_periodic_snapshot_interval + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +### 3.31 IoTConsensusV2配置 + +- iot_consensus_v2_pipeline_size + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中连接器(connector)和接收器(receiver)的默认事件缓冲区大小。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_mode + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ----------------------------------- | +| 描述 | IoTConsensus V2使用的共识协议模式。 | +| 类型 | String | +| 默认值 | batch | +| 改后生效方式 | 重启服务生效 | + +### 3.32 Procedure 配置 + +- procedure_core_worker_thread_count + +| 名字 | procedure_core_worker_thread_count | +| ------------ | ---------------------------------- | +| 描述 | 工作线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_clean_interval + +| 名字 | procedure_completed_clean_interval | +| ------------ | ---------------------------------- | +| 描述 | 清理已完成的 procedure 时间间隔 | +| 类型 | int32 | +| 默认值 | 30(s) | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_evict_ttl + +| 名字 | procedure_completed_evict_ttl | +| ------------ | --------------------------------- | +| 描述 | 已完成的 procedure 的数据保留时间 | +| 类型 | int32 | +| 默认值 | 60(s) | +| 改后生效方式 | 重启服务生效 | + +### 3.33 MQTT代理配置 + +- enable_mqtt_service + +| 名字 | enable_mqtt_service。 | +| ------------ | --------------------- | +| 描述 | 是否开启MQTT服务 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- mqtt_host + +| 名字 | mqtt_host | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的host。 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 热加载 | + +- mqtt_port + +| 名字 | mqtt_port | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的port。 | +| 类型 | int32 | +| 默认值 | 1883 | +| 改后生效方式 | 热加载 | + +- mqtt_handler_pool_size + +| 名字 | mqtt_handler_pool_size | +| ------------ | ---------------------------------- | +| 描述 | 用于处理MQTT消息的处理程序池大小。 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- mqtt_payload_formatter + +| 名字 | mqtt_payload_formatter | +| ------------ | ---------------------------- | +| 描述 | MQTT消息有效负载格式化程序。 | +| 类型 | String | +| 默认值 | json | +| 改后生效方式 | 热加载 | + +- mqtt_max_message_size + +| 名字 | mqtt_max_message_size | +| ------------ | ------------------------------------ | +| 描述 | MQTT消息的最大长度(以字节为单位)。 | +| 类型 | int32 | +| 默认值 | 1048576 | +| 改后生效方式 | 热加载 | + +### 3.34 审计日志配置 + +- enable_audit_log + +| 名字 | enable_audit_log | +| ------------ | ------------------------------ | +| 描述 | 用于控制是否启用审计日志功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- audit_log_storage + +| 名字 | audit_log_storage | +| ------------ | -------------------------- | +| 描述 | 定义了审计日志的输出位置。 | +| 类型 | String | +| 默认值 | IOTDB,LOGGER | +| 改后生效方式 | 重启服务生效 | + +- audit_log_operation + +| 名字 | audit_log_operation | +| ------------ | -------------------------------------- | +| 描述 | 定义了哪些类型的操作需要记录审计日志。 | +| 类型 | String | +| 默认值 | DML,DDL,QUERY | +| 改后生效方式 | 重启服务生效 | + +- enable_audit_log_for_native_insert_api + +| 名字 | enable_audit_log_for_native_insert_api | +| ------------ | -------------------------------------- | +| 描述 | 用于控制本地写入API是否记录审计日志。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.35 白名单配置 +- enable_white_list + +| 名字 | enable_white_list | +| ------------ | ----------------- | +| 描述 | 是否启用白名单。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +### 3.36 IoTDB-AI 配置 + +- model_inference_execution_thread_count + +| 名字 | model_inference_execution_thread_count | +| ------------ | -------------------------------------- | +| 描述 | 用于模型推理操作的线程数。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +### 3.37 TsFile 主动监听&加载功能配置 + +- load_clean_up_task_execution_delay_time_seconds + +| 名字 | load_clean_up_task_execution_delay_time_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在加载TsFile失败后,系统将等待多长时间才会执行清理任务来清除这些未成功加载的TsFile。 | +| 类型 | int | +| 默认值 | 1800 | +| 改后生效方式 | 热加载 | + +- load_write_throughput_bytes_per_second + +| 名字 | load_write_throughput_bytes_per_second | +| ------------ | -------------------------------------- | +| 描述 | 加载TsFile时磁盘写入的最大字节数每秒。 | +| 类型 | int | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- load_active_listening_enable + +| 名字 | load_active_listening_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否开启 DataNode 主动监听并且加载 tsfile 的功能(默认开启)。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- load_active_listening_dirs + +| 名字 | load_active_listening_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | 需要监听的目录(自动包括目录中的子目录),如有多个使用 “,“ 隔开默认的目录为 ext/load/pending(支持热装载)。 | +| 类型 | String | +| 默认值 | ext/load/pending | +| 改后生效方式 | 热加载 | + +- load_active_listening_fail_dir + +| 名字 | load_active_listening_fail_dir | +| ------------ | ---------------------------------------------------------- | +| 描述 | 执行加载 tsfile 文件失败后将文件转存的目录,只能配置一个。 | +| 类型 | String | +| 默认值 | ext/load/failed | +| 改后生效方式 | 热加载 | + +- load_active_listening_max_thread_num + +| 名字 | load_active_listening_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | 同时执行加载 tsfile 任务的最大线程数,参数被注释掉时的默值为 max(1, CPU 核心数 / 2),当用户设置的值不在这个区间[1, CPU核心数 /2]内时,会设置为默认值 (1, CPU 核心数 / 2)。 | +| 类型 | Long | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- load_active_listening_check_interval_seconds + +| 名字 | load_active_listening_check_interval_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 主动监听轮询间隔,单位秒。主动监听 tsfile 的功能是通过轮询检查文件夹实现的。该配置指定了两次检查 load_active_listening_dirs 的时间间隔,每次检查完成 load_active_listening_check_interval_seconds 秒后,会执行下一次检查。当用户设置的轮询间隔小于 1 时,会被设置为默认值 5 秒。 | +| 类型 | Long | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + + +* last_cache_operation_on_load + +|名字| last_cache_operation_on_load | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 当成功加载一个 TsFile 时,对 LastCache 执行的操作。`UPDATE`:使用 TsFile 中的数据更新 LastCache;`UPDATE_NO_BLOB`:与 UPDATE 类似,但会使 blob 序列的 LastCache 失效;`CLEAN_DEVICE`:使 TsFile 中包含的设备的 LastCache 失效;`CLEAN_ALL`:清空整个 LastCache。 | +|类型| String | +|默认值| UPDATE_NO_BLOB | +|改后生效方式| 重启后生效 | + +* cache_last_values_for_load + +|名字| cache_last_values_for_load | +|:---:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 在加载 TsFile 之前是否缓存最新值(last values)。仅在 `last_cache_operation_on_load=UPDATE_NO_BLOB` 或 `last_cache_operation_on_load=UPDATE` 时生效。当设置为 true 时,即使 `last_cache_operation_on_load=UPDATE`,也会忽略 blob 序列。启用此选项会在加载 TsFile 期间增加内存占用。 | +|类型| Boolean | +|默认值| true | +|改后生效方式| 重启后生效 | + +* cache_last_values_memory_budget_in_byte + +|名字| cache_last_values_memory_budget_in_byte | +|:---:|:----------------------------------------------------------------------------------------------------| +|描述| 当 `cache_last_values_for_load=true` 时,用于缓存最新值的最大内存大小(以字节为单位)。如果超过该值,缓存的值将被丢弃,并以流式方式直接从 TsFile 中读取最新值。 | +|类型| int32 | +|默认值| 4194304 | +|改后生效方式| 重启后生效 | + + +### 3.38 分发重试配置 + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在遇到未知错误时,写请求远程分发的最大重试时间,单位是毫秒。 | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 热加载 | + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | -------------------------------- | +| 描述 | 用于控制是否对未知错误进行重试。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual_timecho.md b/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual_timecho.md new file mode 100644 index 000000000..b6ce9a099 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/Reference/System-Config-Manual_timecho.md @@ -0,0 +1,3364 @@ + + +# 配置参数 + +IoTDB 配置文件位于 IoTDB 安装目录:`conf`文件夹下。 + +- `confignode-env.sh/bat`:环境配置项的配置文件,可以配置 ConfigNode 的内存大小。 +- `datanode-env.sh/bat`:环境配置项的配置文件,可以配置 DataNode 的内存大小。 +- `iotdb-system.properties`:IoTDB 的配置文件。 +- `iotdb-system.properties.template`:IoTDB 的配置文件模版。 + +## 1. 修改配置: + +在 `iotdb-system.properties` 文件中已存在的参数可以直接进行修改。对于那些在 `iotdb-system.properties` 中未列出的参数,可以从 `iotdb-system.properties.template` 配置文件模板中找到相应的参数,然后将其复制到 `iotdb-system.properties` 文件中进行修改。 + +### 1.1 改后生效方式 + +不同的配置参数有不同的生效方式,分为以下三种: + +- 仅允许在第一次启动服务前修改: 在第一次启动 ConfigNode/DataNode 后即禁止修改,修改会导致 ConfigNode/DataNode 无法启动。 +- 重启服务生效: ConfigNode/DataNode 启动后仍可修改,但需要重启 ConfigNode/DataNode 后才生效。 +- 热加载: 可在 ConfigNode/DataNode 运行时修改,修改后通过 Session 或 Cli 发送 `load configuration` 或 `set configuration key1 = 'value1'` 命令(SQL)至 IoTDB 使配置生效。 + +## 2. 环境配置项 + +### 2.1 confignode-env.sh/bat + +环境配置项主要用于对 ConfigNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。ConfigNode 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的十分之三,最多会被设置为 16G。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB ConfigNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +### 2.2 datanode-env.sh/bat + +环境配置项主要用于对 DataNode 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。DataNode/Standalone 启动时,此部分配置会被传给 JVM,详细配置项说明如下: + +- MEMORY_SIZE + +| 名字 | MEMORY_SIZE | +| ------------ | ---------------------------------------------------- | +| 描述 | IoTDB DataNode 启动时分配的内存大小 | +| 类型 | String | +| 默认值 | 取决于操作系统和机器配置。默认为机器内存的二分之一。 | +| 改后生效方式 | 重启服务生效 | + +- ON_HEAP_MEMORY + +| 名字 | ON_HEAP_MEMORY | +| ------------ | ---------------------------------------------------------- | +| 描述 | IoTDB DataNode 能使用的堆内内存大小, 曾用名: MAX_HEAP_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置。 | +| 改后生效方式 | 重启服务生效 | + +- OFF_HEAP_MEMORY + +| 名字 | OFF_HEAP_MEMORY | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB DataNode 能使用的堆外内存大小, 曾用名: MAX_DIRECT_MEMORY_SIZE | +| 类型 | String | +| 默认值 | 取决于MEMORY_SIZE的配置 | +| 改后生效方式 | 重启服务生效 | + + +## 3. 系统配置项(iotdb-system.properties.template) + +### 3.1 集群管理 + +- cluster_name + +| 名字 | cluster_name | +| -------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 集群名称 | +| 类型 | String | +| 默认值 | default_cluster | +| 修改方式 | CLI 中执行语句 `set configuration cluster_name = 'xxx'` (xxx为希望修改成的集群名称) | +| 注意 | 此修改通过网络分发至每个节点。在网络波动或者有节点宕机的情况下,不保证能够在全部节点修改成功。未修改成功的节点重启时无法加入集群,此时需要手动修改该节点的配置文件中的cluster_name项,再重启。正常情况下,不建议通过手动修改配置文件的方式修改集群名称,不建议通过`load configuration`的方式热加载。 | + +### 3.2 SeedConfigNode 配置 + +- cn_seed_config_node + +| 名字 | cn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | 目标 ConfigNode 地址,ConfigNode 通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 cn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_seed_config_node + +| 名字 | dn_seed_config_node | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 地址,DataNode 启动时通过此地址加入集群,推荐使用 SeedConfigNode。V1.2.2 及以前曾用名是 dn_target_config_node_list | +| 类型 | String | +| 默认值 | 127.0.0.1:10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.3 Node RPC 配置 + +- cn_internal_address + +| 名字 | cn_internal_address | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群内部地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_internal_port + +| 名字 | cn_internal_port | +| ------------ | ---------------------------- | +| 描述 | ConfigNode 集群服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10710 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- cn_consensus_port + +| 名字 | cn_consensus_port | +| ------------ | ----------------------------- | +| 描述 | ConfigNode 的共识协议通信端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 10720 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_rpc_address + +| 名字 | dn_rpc_address | +| ------------ |----------------| +| 描述 | 客户端 RPC 服务监听地址 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_port + +| 名字 | dn_rpc_port | +| ------------ | ----------------------- | +| 描述 | Client RPC 服务监听端口 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 6667 | +| 改后生效方式 | 重启服务生效 | + +- dn_internal_address + +| 名字 | dn_internal_address | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信地址 | +| 类型 | string | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_internal_port + +| 名字 | dn_internal_port | +| ------------ | ---------------------------- | +| 描述 | DataNode 内网通信端口 | +| 类型 | int | +| 默认值 | 10730 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_mpp_data_exchange_port + +| 名字 | dn_mpp_data_exchange_port | +| ------------ | ---------------------------- | +| 描述 | MPP 数据交换端口 | +| 类型 | int | +| 默认值 | 10740 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_schema_region_consensus_port + +| 名字 | dn_schema_region_consensus_port | +| ------------ | ------------------------------------- | +| 描述 | DataNode 元数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10750 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_data_region_consensus_port + +| 名字 | dn_data_region_consensus_port | +| ------------ | ----------------------------------- | +| 描述 | DataNode 数据副本的共识协议通信端口 | +| 类型 | int | +| 默认值 | 10760 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- dn_join_cluster_retry_interval_ms + +| 名字 | dn_join_cluster_retry_interval_ms | +| ------------ | --------------------------------- | +| 描述 | DataNode 再次重试加入集群等待时间 | +| 类型 | long | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.4 副本配置 + +- config_node_consensus_protocol_class + +| 名字 | config_node_consensus_protocol_class | +| ------------ | ------------------------------------------------ | +| 描述 | ConfigNode 副本的共识协议,仅支持 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_replication_factor + +| 名字 | schema_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认元数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- schema_region_consensus_protocol_class + +| 名字 | schema_region_consensus_protocol_class | +| ------------ | ----------------------------------------------------- | +| 描述 | 元数据副本的共识协议,多副本时只能使用 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.ratis.RatisConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- data_replication_factor + +| 名字 | data_replication_factor | +| ------------ | ---------------------------------- | +| 描述 | Database 的默认数据副本数 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务后对**新的 Database** 生效 | + +- data_region_consensus_protocol_class + +| 名字 | data_region_consensus_protocol_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据副本的共识协议,多副本时可以使用 IoTConsensus 或 RatisConsensus | +| 类型 | String | +| 默认值 | org.apache.iotdb.consensus.iot.IoTConsensus | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +### 3.5 目录配置 + +- cn_system_dir + +| 名字 | cn_system_dir | +| ------------ | ----------------------------------------------------------- | +| 描述 | ConfigNode 系统数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/system(Windows:data\\configndoe\\system) | +| 改后生效方式 | 重启服务生效 | + +- cn_consensus_dir + +| 名字 | cn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode 共识协议数据存储路径 | +| 类型 | String | +| 默认值 | data/confignode/consensus(Windows:data\\configndoe\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- cn_pipe_receiver_file_dir + +| 名字 | cn_pipe_receiver_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | ConfigNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/confignode/system/pipe/receiver(Windows:data\\confignode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- dn_system_dir + +| 名字 | dn_system_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 元数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/system(Windows:data\\datanode\\system) | +| 改后生效方式 | 重启服务生效 | + +- dn_data_dirs + +| 名字 | dn_data_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 数据存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/data(Windows:data\\datanode\\data) | +| 改后生效方式 | 重启服务生效 | + +- dn_multi_dir_strategy + +| 名字 | dn_multi_dir_strategy | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 在 data_dirs 中为 TsFile 选择目录时采用的策略。可使用简单类名或类名全称。系统提供以下三种策略:
1. SequenceStrategy:IoTDB 按顺序选择目录,依次遍历 data_dirs 中的所有目录,并不断轮循;
2. MaxDiskUsableSpaceFirstStrategy:IoTDB 优先选择 data_dirs 中对应磁盘空余空间最大的目录;
您可以通过以下方法完成用户自定义策略:
1. 继承 org.apache.iotdb.db.storageengine.rescon.disk.strategy.DirectoryStrategy 类并实现自身的 Strategy 方法;
2. 将实现的类的完整类名(包名加类名,UserDefineStrategyPackage)填写到该配置项;
3. 将该类 jar 包添加到工程中。 | +| 类型 | String | +| 默认值 | SequenceStrategy | +| 改后生效方式 | 热加载 | + +- dn_consensus_dir + +| 名字 | dn_consensus_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 共识层日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/consensus(Windows:data\\datanode\\consensus) | +| 改后生效方式 | 重启服务生效 | + +- dn_wal_dirs + +| 名字 | dn_wal_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 写前日志存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/wal(Windows:data\\datanode\\wal) | +| 改后生效方式 | 重启服务生效 | + +- dn_tracing_dir + +| 名字 | dn_tracing_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB 追踪根目录路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | datanode/tracing(Windows:datanode\\tracing) | +| 改后生效方式 | 重启服务生效 | + +- dn_sync_dir + +| 名字 | dn_sync_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTDB sync 存储路径,默认存放在和 sbin 目录同级的 data 目录下。相对路径的起始目录与操作系统相关,建议使用绝对路径。 | +| 类型 | String | +| 默认值 | data/datanode/sync(Windows:data\\datanode\\sync) | +| 改后生效方式 | 重启服务生效 | + +- sort_tmp_dir + +| 名字 | sort_tmp_dir | +| ------------ | ------------------------------------------------- | +| 描述 | 用于配置排序操作的临时目录。 | +| 类型 | String | +| 默认值 | data/datanode/tmp(Windows:data\\datanode\\tmp) | +| 改后生效方式 | 重启服务生效 | + +- dn_pipe_receiver_file_dirs + +| 名字 | dn_pipe_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中pipe接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/receiver(Windows:data\\datanode\\system\\pipe\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_receiver_file_dirs + +| 名字 | iot_consensus_v2_receiver_file_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中接收者用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/receiver(Windows:data\\datanode\\system\\pipe\\consensus\\receiver) | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_deletion_file_dir + +| 名字 | iot_consensus_v2_deletion_file_dir | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中删除操作用于存储文件的目录路径。 | +| 类型 | String | +| 默认值 | data/datanode/system/pipe/consensus/deletion(Windows:data\\datanode\\system\\pipe\\consensus\\deletion) | +| 改后生效方式 | 重启服务生效 | + +### 3.6 监控配置 + +- cn_metric_reporter_list + +| 名字 | cn_metric_reporter_list | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_level + +| 名字 | cn_metric_level | +| ------------ | ------------------------------------------ | +| 描述 | confignode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_async_collect_period + +| 名字 | cn_metric_async_collect_period | +| ------------ | -------------------------------------------------- | +| 描述 | confignode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- cn_metric_prometheus_reporter_port + +| 名字 | cn_metric_prometheus_reporter_port | +| ------------ | ------------------------------------------------------ | +| 描述 | confignode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9091 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_reporter_list + +| 名字 | dn_metric_reporter_list | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中用于配置监控模块的数据需要报告的系统。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_level + +| 名字 | dn_metric_level | +| ------------ | ---------------------------------------- | +| 描述 | DataNode中控制监控模块收集数据的详细程度 | +| 类型 | String | +| 默认值 | IMPORTANT | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_async_collect_period + +| 名字 | dn_metric_async_collect_period | +| ------------ | ------------------------------------------------ | +| 描述 | DataNode中某些监控数据异步收集的周期,单位是秒。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_prometheus_reporter_port + +| 名字 | dn_metric_prometheus_reporter_port | +| ------------ | ---------------------------------------------------- | +| 描述 | DataNode中Prometheus报告者用于监控数据报告的端口号。 | +| 类型 | int | +| 默认值 | 9092 | +| 改后生效方式 | 重启服务生效 | + +- dn_metric_internal_reporter_type + +| 名字 | dn_metric_internal_reporter_type | +| ------------ | ------------------------------------------------------------ | +| 描述 | DataNode中监控模块内部报告者的种类,用于内部监控和检查数据是否已经成功写入和刷新。 | +| 类型 | String | +| 默认值 | IOTDB | +| 改后生效方式 | 重启服务生效 | + +### 3.7 SSL 配置 + +- enable_thrift_ssl + +| 名字 | enable_thrift_ssl | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当enable_thrift_ssl配置为true时,将通过dn_rpc_port使用 SSL 加密进行通信 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_https + +| 名字 | enable_https | +| ------------ | ------------------------------ | +| 描述 | REST Service 是否开启 SSL 配置 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- key_store_path + +| 名字 | key_store_path | +| ------------ | -------------- | +| 描述 | ssl证书路径 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- key_store_pwd + +| 名字 | key_store_pwd | +| ------------ | ------------- | +| 描述 | ssl证书密码 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.8 连接配置 + +- cn_rpc_thrift_compression_enable + +| 名字 | cn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- cn_rpc_max_concurrent_client_num + +| 名字 | cn_rpc_max_concurrent_client_num | +| ------------ |---------------------------------| +| 描述 | 最大连接数。 | +| 类型 | int | +| 默认值 | 3000 | +| 改后生效方式 | 重启服务生效 | + +- cn_connection_timeout_ms + +| 名字 | cn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- cn_selector_thread_nums_of_client_manager + +| 名字 | cn_selector_thread_nums_of_client_manager | +| ------------ | ----------------------------------------- | +| 描述 | 客户端异步线程管理的选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- cn_max_client_count_for_each_node_in_client_manager + +| 名字 | cn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +- dn_session_timeout_threshold + +| 名字 | dn_session_timeout_threshold | +| ------------ | ---------------------------- | +| 描述 | 最大的会话空闲时间 | +| 类型 | int | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_thrift_compression_enable + +| 名字 | dn_rpc_thrift_compression_enable | +| ------------ | -------------------------------- | +| 描述 | 是否启用 thrift 的压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_advanced_compression_enable + +| 名字 | dn_rpc_advanced_compression_enable | +| ------------ | ---------------------------------- | +| 描述 | 是否启用 thrift 的自定制压缩机制 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_selector_thread_count + +| 名字 | rpc_selector_thread_count | +| ------------ | ------------------------- | +| 描述 | rpc 选择器线程数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_min_concurrent_client_num + +| 名字 | rpc_min_concurrent_client_num | +| ------------ | ----------------------------- | +| 描述 | 最小连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_rpc_max_concurrent_client_num + +| 名字 | dn_rpc_max_concurrent_client_num | +| ------------ |----------------------------------| +| 描述 | 最大连接数 | +| 类型 | Short Int : [0,65535] | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_max_frame_size + +| 名字 | dn_thrift_max_frame_size | +| ------------ | ------------------------------------------------------ | +| 描述 | RPC 请求/响应的最大字节数 | +| 类型 | long | +| 默认值 | 536870912 (默认值512MB) | +| 改后生效方式 | 重启服务生效 | + +- dn_thrift_init_buffer_size + +| 名字 | dn_thrift_init_buffer_size | +| ------------ | -------------------------- | +| 描述 | 字节数 | +| 类型 | long | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- dn_connection_timeout_ms + +| 名字 | dn_connection_timeout_ms | +| ------------ | ------------------------ | +| 描述 | 节点连接超时时间 | +| 类型 | int | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- dn_selector_thread_count_of_client_manager + +| 名字 | dn_selector_thread_count_of_client_manager | +| ------------ | ------------------------------------------------------------ | +| 描述 | selector thread (TAsyncClientManager) nums for async thread in a clientManagerclientManager中异步线程的选择器线程(TAsyncClientManager)编号 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- dn_max_client_count_for_each_node_in_client_manager + +| 名字 | dn_max_client_count_for_each_node_in_client_manager | +| ------------ | --------------------------------------------------- | +| 描述 | 单 ClientManager 中路由到每个节点的最大 Client 个数 | +| 类型 | int | +| 默认值 | 300 | +| 改后生效方式 | 重启服务生效 | + +### 3.9 对象存储管理 + +- remote_tsfile_cache_dirs + +| 名字 | remote_tsfile_cache_dirs | +| ------------ | ------------------------ | +| 描述 | 云端存储在本地的缓存目录 | +| 类型 | String | +| 默认值 | data/datanode/data/cache | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_page_size_in_kb + +| 名字 | remote_tsfile_cache_page_size_in_kb | +| ------------ | ----------------------------------- | +| 描述 | 云端存储在本地缓存文件的块大小 | +| 类型 | int | +| 默认值 | 20480 | +| 改后生效方式 | 重启服务生效 | + +- remote_tsfile_cache_max_disk_usage_in_mb + +| 名字 | remote_tsfile_cache_max_disk_usage_in_mb | +| ------------ | ---------------------------------------- | +| 描述 | 云端存储本地缓存的最大磁盘占用大小 | +| 类型 | long | +| 默认值 | 51200 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_type + +| 名字 | object_storage_type | +| ------------ | ------------------- | +| 描述 | 云端存储类型 | +| 类型 | String | +| 默认值 | AWS_S3 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_endpoint + +| 名字 | object_storage_endpoint | +| ------------ | ----------------------- | +| 描述 | 云端存储的 endpoint | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_bucket + +| 名字 | object_storage_bucket | +| ------------ | ---------------------- | +| 描述 | 云端存储 bucket 的名称 | +| 类型 | String | +| 默认值 | iotdb_data | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_key + +| 名字 | object_storage_access_key | +| ------------ | ------------------------- | +| 描述 | 云端存储的验证信息 key | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- object_storage_access_secret + +| 名字 | object_storage_access_secret | +| ------------ | ---------------------------- | +| 描述 | 云端存储的验证信息 secret | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.10 多级管理 + +- dn_default_space_usage_thresholds + +| 名字 | dn_default_space_usage_thresholds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 定义每个层级数据目录的最小剩余空间比例;当剩余空间少于该比例时,数据会被自动迁移至下一个层级;当最后一个层级的剩余存储空间到低于此阈值时,会将系统置为 READ_ONLY | +| 类型 | double | +| 默认值 | 0.85 | +| 改后生效方式 | 热加载 | + +- dn_tier_full_policy + +| 名字 | dn_tier_full_policy | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如何处理最后一层数据,当其已用空间高于其dn_default_space_usage_threshold时。| +| 类型 | String | +| 默认值 | NULL | +| 改后生效方式 | 热加载 | + +- migrate_thread_count + +| 名字 | migrate_thread_count | +| ------------ | ---------------------------------------- | +| 描述 | DataNode数据目录中迁移操作的线程池大小。 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- tiered_storage_migrate_speed_limit_bytes_per_sec + +| 名字 | tiered_storage_migrate_speed_limit_bytes_per_sec | +| ------------ | ------------------------------------------------ | +| 描述 | 限制不同存储层级之间的数据迁移速度。 | +| 类型 | int | +| 默认值 | 10485760 | +| 改后生效方式 | 热加载 | + +### 3.11 REST服务配置 + +- enable_rest_service + +| 名字 | enable_rest_service | +| ------------ | ------------------- | +| 描述 | 是否开启Rest服务。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_service_port + +| 名字 | rest_service_port | +| ------------ | ------------------ | +| 描述 | Rest服务监听端口号 | +| 类型 | int32 | +| 默认值 | 18080 | +| 改后生效方式 | 重启服务生效 | + +- enable_swagger + +| 名字 | enable_swagger | +| ------------ | --------------------------------- | +| 描述 | 是否启用swagger来展示rest接口信息 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- rest_query_default_row_size_limit + +| 名字 | rest_query_default_row_size_limit | +| ------------ | --------------------------------- | +| 描述 | 一次查询能返回的结果集最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- cache_expire_in_seconds + +| 名字 | cache_expire_in_seconds | +| ------------ | -------------------------------- | +| 描述 | 用户登录信息缓存的过期时间(秒) | +| 类型 | int32 | +| 默认值 | 28800 | +| 改后生效方式 | 重启服务生效 | + +- cache_max_num + +| 名字 | cache_max_num | +| ------------ | ------------------------ | +| 描述 | 缓存中存储的最大用户数量 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- cache_init_num + +| 名字 | cache_init_num | +| ------------ | -------------- | +| 描述 | 缓存初始容量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- client_auth + +| 名字 | client_auth | +| ------------ | ---------------------- | +| 描述 | 是否需要客户端身份验证 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- trust_store_path + +| 名字 | trust_store_path | +| ------------ | ----------------------- | +| 描述 | keyStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- trust_store_pwd + +| 名字 | trust_store_pwd | +| ------------ | ------------------------- | +| 描述 | trustStore 密码(非必填) | +| 类型 | String | +| 默认值 | "" | +| 改后生效方式 | 重启服务生效 | + +- idle_timeout_in_seconds + +| 名字 | idle_timeout_in_seconds | +| ------------ | ----------------------- | +| 描述 | SSL 超时时间,单位为秒 | +| 类型 | int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +### 3.12 负载均衡配置 + +- series_slot_num + +| 名字 | series_slot_num | +| ------------ | ---------------------------- | +| 描述 | 序列分区槽数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- series_partition_executor_class + +| 名字 | series_partition_executor_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 序列分区哈希函数 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- schema_region_group_extension_policy + +| 名字 | schema_region_group_extension_policy | +| ------------ | ------------------------------------ | +| 描述 | SchemaRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_schema_region_group_num_per_database + +| 名字 | default_schema_region_group_num_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-SchemaRegionGroup 扩容策略时,此参数为每个 Database 拥有的 SchemaRegionGroup 数量;当选用 AUTO-SchemaRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 SchemaRegionGroup 数量 | +| 类型 | int | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_per_data_node + +| 名字 | schema_region_per_data_node | +| ------------ | -------------------------------------------------- | +| 描述 | 期望每个 DataNode 可管理的 SchemaRegion 的最大数量 | +| 类型 | double | +| 默认值 | 1.0 | +| 改后生效方式 | 重启服务生效 | + +- data_region_group_extension_policy + +| 名字 | data_region_group_extension_policy | +| ------------ | ---------------------------------- | +| 描述 | DataRegionGroup 的扩容策略 | +| 类型 | string | +| 默认值 | AUTO | +| 改后生效方式 | 重启服务生效 | + +- default_data_region_group_num_per_database + +| 名字 | default_data_region_group_per_database | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当选用 CUSTOM-DataRegionGroup 扩容策略时,此参数为每个 Database 拥有的 DataRegionGroup 数量;当选用 AUTO-DataRegionGroup 扩容策略时,此参数为每个 Database 最少拥有的 DataRegionGroup 数量 | +| 类型 | int | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- data_region_per_data_node + +| 名字 | data_region_per_data_node | +| ------------ | ------------------------------------------------ | +| 描述 | 期望每个 DataNode 可管理的 DataRegion 的最大数量 | +| 类型 | double | +| 默认值 | CPU 核心数的一半 | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_ratis_consensus + +| 名字 | enable_auto_leader_balance_for_ratis_consensus | +| ------------ | ---------------------------------------------- | +| 描述 | 是否为 Ratis 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- enable_auto_leader_balance_for_iot_consensus + +| 名字 | enable_auto_leader_balance_for_iot_consensus | +| ------------ | -------------------------------------------- | +| 描述 | 是否为 IoT 共识协议开启自动均衡 leader 策略 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.13 集群管理 + +- time_partition_origin + +| 名字 | time_partition_origin | +| ------------ | ------------------------------------------------------------ | +| 描述 | Database 数据时间分区的起始点,即从哪个时间点开始计算时间分区。 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 0 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- time_partition_interval + +| 名字 | time_partition_interval | +| ------------ | ------------------------------- | +| 描述 | Database 默认的数据时间分区间隔 | +| 类型 | Long | +| 单位 | 毫秒 | +| 默认值 | 604800000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- heartbeat_interval_in_ms + +| 名字 | heartbeat_interval_in_ms | +| ------------ | ------------------------ | +| 描述 | 集群节点间的心跳间隔 | +| 类型 | Long | +| 单位 | ms | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- disk_space_warning_threshold + +| 名字 | disk_space_warning_threshold | +| ------------ | ---------------------------- | +| 描述 | DataNode 磁盘剩余阈值 | +| 类型 | double(percentage) | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +### 3.14 内存控制配置 + +- datanode_memory_proportion + +| 名字 | datanode_memory_proportion | +| ------------ | ---------------------------------------------------- | +| 描述 | 存储引擎、查询引擎、元数据、共识、流处理引擎和空闲内存比例 | +| 类型 | Ratio | +| 默认值 | 3:3:1:1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- schema_memory_proportion + +| 名字 | schema_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | Schema 相关的内存如何在 SchemaRegion、SchemaCache 和 PartitionCache 之间分配 | +| 类型 | Ratio | +| 默认值 | 5:4:1 | +| 改后生效方式 | 重启服务生效 | + +- storage_engine_memory_proportion + +| 名字 | storage_engine_memory_proportion | +| ------------ | -------------------------------- | +| 描述 | 写入和合并占存储内存比例 | +| 类型 | Ratio | +| 默认值 | 8:2 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_proportion + +| 名字 | write_memory_proportion | +| ------------ | -------------------------------------------- | +| 描述 | Memtable 和 TimePartitionInfo 占写入内存比例 | +| 类型 | Ratio | +| 默认值 | 19:1 | +| 改后生效方式 | 重启服务生效 | + +- primitive_array_size + +| 名字 | primitive_array_size | +| ------------ | ---------------------------------------- | +| 描述 | 数组池中的原始数组大小(每个数组的长度) | +| 类型 | int32 | +| 默认值 | 64 | +| 改后生效方式 | 重启服务生效 | + +- chunk_metadata_size_proportion + +| 名字 | chunk_metadata_size_proportion | +| ------------ | -------------------------------------------- | +| 描述 | 在数据压缩过程中,用于存储块元数据的内存比例 | +| 类型 | Double | +| 默认值 | 0.1 | +| 改后生效方式 | 重启服务生效 | + +- flush_proportion + +| 名字 | flush_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 调用flush disk的写入内存比例,默认0.4,若有极高的写入负载力(比如batch=1000),可以设置为低于默认值,比如0.2 | +| 类型 | Double | +| 默认值 | 0.4 | +| 改后生效方式 | 重启服务生效 | + +- buffered_arrays_memory_proportion + +| 名字 | buffered_arrays_memory_proportion | +| ------------ | --------------------------------------- | +| 描述 | 为缓冲数组分配的写入内存比例,默认为0.6 | +| 类型 | Double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- reject_proportion + +| 名字 | reject_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 拒绝插入的写入内存比例,默认0.8,若有极高的写入负载力(比如batch=1000)并且物理内存足够大,它可以设置为高于默认值,如0.9 | +| 类型 | Double | +| 默认值 | 0.8 | +| 改后生效方式 | 重启服务生效 | + +- device_path_cache_proportion + +| 名字 | device_path_cache_proportion | +| ------------ | --------------------------------------------------- | +| 描述 | 在内存中分配给设备路径缓存(DevicePathCache)的比例 | +| 类型 | Double | +| 默认值 | 0.05 | +| 改后生效方式 | 重启服务生效 | + +- write_memory_variation_report_proportion + +| 名字 | write_memory_variation_report_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果 DataRegion 的内存增加超过写入可用内存的一定比例,则向系统报告。默认值为0.001 | +| 类型 | Double | +| 默认值 | 0.001 | +| 改后生效方式 | 重启服务生效 | + +- check_period_when_insert_blocked + +| 名字 | check_period_when_insert_blocked | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当插入被拒绝时,等待时间(以毫秒为单位)去再次检查系统,默认为50。若插入被拒绝,读取负载低,可以设置大一些。 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- io_task_queue_size_for_flushing + +| 名字 | io_task_queue_size_for_flushing | +| ------------ | -------------------------------- | +| 描述 | ioTaskQueue 的大小。默认值为10。 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- enable_query_memory_estimation + +| 名字 | enable_query_memory_estimation | +| ------------ | ------------------------------------------------------------ | +| 描述 | 开启后会预估每次查询的内存使用量,如果超过可用内存,会拒绝本次查询 | +| 类型 | bool | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.15 元数据引擎配置 + +- schema_engine_mode + +| 名字 | schema_engine_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 元数据引擎的运行模式,支持 Memory 和 PBTree;PBTree 模式下支持将内存中暂时不用的序列元数据实时置换到磁盘上,需要使用时再加载进内存;此参数在集群中所有的 DataNode 上务必保持相同。 | +| 类型 | string | +| 默认值 | Memory | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- partition_cache_size + +| 名字 | partition_cache_size | +| ------------ | ------------------------------ | +| 描述 | 分区信息缓存的最大缓存条目数。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- sync_mlog_period_in_ms + +| 名字 | sync_mlog_period_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | mlog定期刷新到磁盘的周期,单位毫秒。如果该参数为0,则表示每次对元数据的更新操作都会被立即写到磁盘上。 | +| 类型 | Int64 | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- tag_attribute_flush_interval + +| 名字 | tag_attribute_flush_interval | +| ------------ | -------------------------------------------------- | +| 描述 | 标签和属性记录的间隔数,达到此记录数量时将强制刷盘 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- tag_attribute_total_size + +| 名字 | tag_attribute_total_size | +| ------------ | ---------------------------------------- | +| 描述 | 每个时间序列标签和属性的最大持久化字节数 | +| 类型 | int32 | +| 默认值 | 700 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_measurement_num_of_internal_request + +| 名字 | max_measurement_num_of_internal_request | +| ------------ | ------------------------------------------------------------ | +| 描述 | 一次注册序列请求中若物理量过多,在系统内部执行时将被拆分为若干个轻量级的子请求,每个子请求中的物理量数目不超过此参数设置的最大值。 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- datanode_schema_cache_eviction_policy + +| 名字 | datanode_schema_cache_eviction_policy | +| ------------ | ----------------------------------------------------- | +| 描述 | 当 Schema 缓存达到其最大容量时,Schema 缓存的淘汰策略 | +| 类型 | String | +| 默认值 | FIFO | +| 改后生效方式 | 重启服务生效 | + +- cluster_timeseries_limit_threshold + +| 名字 | cluster_timeseries_limit_threshold | +| ------------ | ---------------------------------- | +| 描述 | 集群中可以创建的时间序列的最大数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- cluster_device_limit_threshold + +| 名字 | cluster_device_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大设备数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +- database_limit_threshold + +| 名字 | database_limit_threshold | +| ------------ | ------------------------------ | +| 描述 | 集群中可以创建的最大数据库数量 | +| 类型 | Int32 | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +### 3.16 自动推断数据类型 + +- enable_auto_create_schema + +| 名字 | enable_auto_create_schema | +| ------------ | -------------------------------------- | +| 描述 | 当写入的序列不存在时,是否自动创建序列 | +| 取值 | true or false | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- default_storage_group_level + +| 名字 | default_storage_group_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当写入的数据不存在且自动创建序列时,若需要创建相应的 database,将序列路径的哪一层当做 database。例如,如果我们接到一个新序列 root.sg0.d1.s2, 并且 level=1, 那么 root.sg0 被视为database(因为 root 是 level 0 层) | +| 取值 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 重启服务生效 | + +- boolean_string_infer_type + +| 名字 | boolean_string_infer_type | +| ------------ | ------------------------------------------ | +| 描述 | "true" 或者 "false" 字符串被推断的数据类型 | +| 取值 | BOOLEAN 或者 TEXT | +| 默认值 | BOOLEAN | +| 改后生效方式 | 热加载 | + +- integer_string_infer_type + +| 名字 | integer_string_infer_type | +| ------------ | --------------------------------- | +| 描述 | 整型字符串推断的数据类型 | +| 取值 | INT32, INT64, FLOAT, DOUBLE, TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- floating_string_infer_type + +| 名字 | floating_string_infer_type | +| ------------ | ----------------------------- | +| 描述 | "6.7"等字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- nan_string_infer_type + +| 名字 | nan_string_infer_type | +| ------------ | ---------------------------- | +| 描述 | "NaN" 字符串被推断的数据类型 | +| 取值 | DOUBLE, FLOAT or TEXT | +| 默认值 | DOUBLE | +| 改后生效方式 | 热加载 | + +- default_boolean_encoding + +| 名字 | default_boolean_encoding | +| ------------ | ------------------------ | +| 描述 | BOOLEAN 类型编码格式 | +| 取值 | PLAIN, RLE | +| 默认值 | RLE | +| 改后生效方式 | 热加载 | + +- default_int32_encoding + +| 名字 | default_int32_encoding | +| ------------ | -------------------------------------- | +| 描述 | int32 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_int64_encoding + +| 名字 | default_int64_encoding | +| ------------ | -------------------------------------- | +| 描述 | int64 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, REGULAR, GORILLA | +| 默认值 | TS_2DIFF | +| 改后生效方式 | 热加载 | + +- default_float_encoding + +| 名字 | default_float_encoding | +| ------------ | ----------------------------- | +| 描述 | float 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_double_encoding + +| 名字 | default_double_encoding | +| ------------ | ----------------------------- | +| 描述 | double 类型编码格式 | +| 取值 | PLAIN, RLE, TS_2DIFF, GORILLA | +| 默认值 | GORILLA | +| 改后生效方式 | 热加载 | + +- default_text_encoding + +| 名字 | default_text_encoding | +| ------------ | --------------------- | +| 描述 | text 类型编码格式 | +| 取值 | PLAIN | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +* boolean_compressor + +| 名字 | boolean_compressor | +| -------------- | ----------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,BOOLEAN 数据类型的压缩方式 (V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int32_compressor + +| 名字 | int32_compressor | +| -------------- | ------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,INT32/DATE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* int64_compressor + +| 名字 | int64_compressor | +| -------------- | ------------------------------------------------------------------------------ | +| 描述 | 启用自动创建模式时,INT64/TIMESTAMP 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* float_compressor + +| 名字 | float_compressor | +| -------------- | -------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,FLOAT 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* double_compressor + +| 名字 | double_compressor | +| -------------- | --------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,DOUBLE 数据类型的压缩方式(V2.0.6 版本开始支持) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +* text_compressor + +| 名字 | text_compressor | +| -------------- | -------------------------------------------------------------------------------- | +| 描述 | 启用自动创建模式时,TEXT/BINARY/BLOB 数据类型的压缩方式(V2.0.6 版本开始支持 ) | +| 类型 | String | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + + + +### 3.17 查询配置 + +- read_consistency_level + +| 名字 | read_consistency_level | +| ------------ | ------------------------------------------------------------ | +| 描述 | 查询一致性等级,取值 “strong” 时从 Leader 副本查询,取值 “weak” 时随机查询一个副本。 | +| 类型 | String | +| 默认值 | strong | +| 改后生效方式 | 重启服务生效 | + +- meta_data_cache_enable + +| 名字 | meta_data_cache_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否缓存元数据(包括 BloomFilter、Chunk Metadata 和 TimeSeries Metadata。) | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- chunk_timeseriesmeta_free_memory_proportion + +| 名字 | chunk_timeseriesmeta_free_memory_proportion | +| ------------ | ------------------------------------------------------------ | +| 描述 | 读取内存分配比例,BloomFilterCache、ChunkCache、TimeseriesMetadataCache、数据集查询的内存和可用内存的查询。参数形式为a : b : c : d : e,其中a、b、c、d、e为整数。 例如“1 : 1 : 1 : 1 : 1” ,“1 : 100 : 200 : 300 : 400” 。 | +| 类型 | String | +| 默认值 | 1 : 100 : 200 : 300 : 400 | +| 改后生效方式 | 重启服务生效 | + +- enable_last_cache + +| 名字 | enable_last_cache | +| ------------ | ------------------ | +| 描述 | 是否开启最新点缓存 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_core_pool_size + +| 名字 | mpp_data_exchange_core_pool_size | +| ------------ | -------------------------------- | +| 描述 | MPP 数据交换线程池核心线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_max_pool_size + +| 名字 | mpp_data_exchange_max_pool_size | +| ------------ | ------------------------------- | +| 描述 | MPP 数据交换线程池最大线程数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- mpp_data_exchange_keep_alive_time_in_ms + +| 名字 | mpp_data_exchange_keep_alive_time_in_ms | +| ------------ | --------------------------------------- | +| 描述 | MPP 数据交换最大等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- driver_task_execution_time_slice_in_ms + +| 名字 | driver_task_execution_time_slice_in_ms | +| ------------ | -------------------------------------- | +| 描述 | 单个 DriverTask 最长执行时间(ms) | +| 类型 | int32 | +| 默认值 | 200 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_size_in_bytes + +| 名字 | max_tsblock_size_in_bytes | +| ------------ | ------------------------------- | +| 描述 | 单个 TsBlock 的最大容量(byte) | +| 类型 | int32 | +| 默认值 | 131072 | +| 改后生效方式 | 重启服务生效 | + +- max_tsblock_line_numbers + +| 名字 | max_tsblock_line_numbers | +| ------------ | ------------------------ | +| 描述 | 单个 TsBlock 的最大行数 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- slow_query_threshold + +| 名字 | slow_query_threshold | +| ------------ | ------------------------------ | +| 描述 | 慢查询的时间阈值。单位:毫秒。 | +| 类型 | long | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- query_cost_stat_window + +| 名字 | query_cost_stat_window | +| ------------ |--------------------| +| 描述 | 查询耗时统计的窗口,单位为分钟。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 热加载 | + +- query_timeout_threshold + +| 名字 | query_timeout_threshold | +| ------------ | -------------------------------- | +| 描述 | 查询的最大执行时间。单位:毫秒。 | +| 类型 | Int32 | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- max_allowed_concurrent_queries + +| 名字 | max_allowed_concurrent_queries | +| ------------ | ------------------------------ | +| 描述 | 允许的最大并发查询数量。 | +| 类型 | Int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- query_thread_count + +| 名字 | query_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 对内存中的数据进行查询时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- degree_of_query_parallelism + +| 名字 | degree_of_query_parallelism | +| ------------ | ------------------------------------------------------------ | +| 描述 | 设置单个查询片段实例将创建的 pipeline 驱动程序数量,也就是查询操作的并行度。 | +| 类型 | Int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- mode_map_size_threshold + +| 名字 | mode_map_size_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | 计算 MODE 聚合函数时,计数映射可以增长到的阈值 | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- batch_size + +| 名字 | batch_size | +| ------------ | ---------------------------------------------------------- | +| 描述 | 服务器中每次迭代的数据量(数据条目,即不同时间戳的数量。) | +| 类型 | Int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- sort_buffer_size_in_bytes + +| 名字 | sort_buffer_size_in_bytes | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 设置外部排序操作中使用的内存缓冲区大小 | +| 类型 | long | +| 默认值 | 1048576(V2.0.6 之前版本)
0(V2.0.6 及之后版本),当值小于等于 0 时,由系统自动进行计算,计算公式为:`sort_buffer_size_in_bytes = Math.min(32 * 1024 * 1024, 堆内内存 * 查询引擎内存比例 * 查询执行内存比例 / 查询线程数 / 2)` | +| 改后生效方式 | 热加载 | + +- merge_threshold_of_explain_analyze + +| 名字 | merge_threshold_of_explain_analyze | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于设置在 `EXPLAIN ANALYZE` 语句的结果集中操作符(operator)数量的合并阈值。 | +| 类型 | int | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +### 3.18 TTL配置 + +- ttl_check_interval + +| 名字 | ttl_check_interval | +| ------------ | -------------------------------------- | +| 描述 | ttl 检查任务的间隔,单位 ms,默认为 2h | +| 类型 | int | +| 默认值 | 7200000 | +| 改后生效方式 | 重启服务生效 | + +- max_expired_time + +| 名字 | max_expired_time | +| ------------ | ------------------------------------------------------------ | +| 描述 | 如果一个文件中存在设备已经过期超过此时间,那么这个文件将被立即整理。单位 ms,默认为一个月 | +| 类型 | int | +| 默认值 | 2592000000 | +| 改后生效方式 | 重启服务生效 | + +- expired_data_ratio + +| 名字 | expired_data_ratio | +| ------------ | ------------------------------------------------------------ | +| 描述 | 过期设备比例。如果一个文件中过期设备的比率超过这个值,那么这个文件中的过期数据将通过 compaction 清理。 | +| 类型 | float | +| 默认值 | 0.3 | +| 改后生效方式 | 重启服务生效 | + +### 3.19 存储引擎配置 + +- timestamp_precision + +| 名字 | timestamp_precision | +| ------------ | ---------------------------- | +| 描述 | 时间戳精度,支持 ms、us、ns | +| 类型 | String | +| 默认值 | ms | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- timestamp_precision_check_enabled + +| 名字 | timestamp_precision_check_enabled | +| ------------ | --------------------------------- | +| 描述 | 用于控制是否启用时间戳精度检查 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- max_waiting_time_when_insert_blocked + +| 名字 | max_waiting_time_when_insert_blocked | +| ------------ | ----------------------------------------------- | +| 描述 | 当插入请求等待超过这个时间,则抛出异常,单位 ms | +| 类型 | Int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- handle_system_error + +| 名字 | handle_system_error | +| ------------ | ------------------------------------ | +| 描述 | 当系统遇到不可恢复的错误时的处理方法 | +| 类型 | String | +| 默认值 | CHANGE_TO_READ_ONLY | +| 改后生效方式 | 重启服务生效 | + +- enable_timed_flush_seq_memtable + +| 名字 | enable_timed_flush_seq_memtable | +| ------------ | ------------------------------- | +| 描述 | 是否开启定时刷盘顺序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_interval_in_ms + +| 名字 | seq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- seq_memtable_flush_check_interval_in_ms + +| 名字 | seq_memtable_flush_check_interval_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 检查顺序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- enable_timed_flush_unseq_memtable + +| 名字 | enable_timed_flush_unseq_memtable | +| ------------ | --------------------------------- | +| 描述 | 是否开启定时刷新乱序 memtable | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_interval_in_ms + +| 名字 | unseq_memtable_flush_interval_in_ms | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 memTable 的创建时间小于当前时间减去该值时,该 memtable 需要被刷盘 | +| 类型 | long | +| 默认值 | 600000 | +| 改后生效方式 | 热加载 | + +- unseq_memtable_flush_check_interval_in_ms + +| 名字 | unseq_memtable_flush_check_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 检查乱序 memtable 是否需要刷盘的时间间隔 | +| 类型 | long | +| 默认值 | 30000 | +| 改后生效方式 | 热加载 | + +- tvlist_sort_algorithm + +| 名字 | tvlist_sort_algorithm | +| ------------ | ------------------------ | +| 描述 | memtable中数据的排序方法 | +| 类型 | String | +| 默认值 | TIM | +| 改后生效方式 | 重启服务生效 | + +- avg_series_point_number_threshold + +| 名字 | avg_series_point_number_threshold | +| ------------ | ------------------------------------------------ | +| 描述 | 内存中平均每个时间序列点数最大值,达到触发 flush | +| 类型 | int32 | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- flush_thread_count + +| 名字 | flush_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 当 IoTDB 将内存中的数据写入磁盘时,最多启动多少个线程来执行该操作。如果该值小于等于 0,那么采用机器所安装的 CPU 核的数量。默认值为 0。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- enable_partial_insert + +| 名字 | enable_partial_insert | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一次 insert 请求中,如果部分测点写入失败,是否继续写入其他测点。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +- recovery_log_interval_in_ms + +| 名字 | recovery_log_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | data region的恢复过程中打印日志信息的间隔 | +| 类型 | Int32 | +| 默认值 | 5000 | +| 改后生效方式 | 重启服务生效 | + +- 0.13_data_insert_adapt + +| 名字 | 0.13_data_insert_adapt | +| ------------ | ------------------------------------------------------- | +| 描述 | 如果 0.13 版本客户端进行写入,需要将此配置项设置为 true | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- enable_tsfile_validation + +| 名字 | enable_tsfile_validation | +| ------------ | -------------------------------------- | +| 描述 | Flush, Load 或合并后验证 tsfile 正确性 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- tier_ttl_in_ms + +| 名字 | tier_ttl_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 定义每个层级负责的数据范围,通过 TTL 表示 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 重启服务生效 | + +* max_object_file_size_in_byte + +| 名字 | max\_object\_file\_size\_in\_byte | +| -------------- |------------------------------| +| 描述 | 单对象文件的最大尺寸限制 (V2.0.8 版本起支持) | +| 类型 | long | +| 默认值 | 4294967296 | +| 改后生效方式 | 热加载 | + +* restrict_object_limit + +| 名字 | restrict\_object\_limit | +|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 对 OBJECT 类型的表名、列名和设备名称没有特殊限制。(V2.0.8 版本起支持)当设置为 true 且表中包含 OBJECT 列时,需遵循以下限制:
1. 命名规范:TAG 列的值、表名和字段名禁止使用 “.” 或 “..”,且不得包含 “./” 或 “.\” 字符,否则元数据创建将失败。若名称包含文件系统不支持的字符,则会在数据写入时报错。
2. 大小写敏感:如果底层文件系统不区分大小写,则设备标识符(如 'd1' 与 'D1')将被视为相同。在此情况下,若创建此类名称相似的设备,其 OBJECT 数据文件可能互相覆盖,导致数据错误。
3. 存储路径:OBJECT 类型数据的实际存储路径格式为:`${dataregionid}/${tablename}/${tag1}/${tag2}/.../${field}/${timestamp}.bin`。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + + +### 3.20 合并配置 + +- enable_seq_space_compaction + +| 名字 | enable_seq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 顺序空间内合并,开启顺序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_unseq_space_compaction + +| 名字 | enable_unseq_space_compaction | +| ------------ | -------------------------------------- | +| 描述 | 乱序空间内合并,开启乱序文件之间的合并 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_cross_space_compaction + +| 名字 | enable_cross_space_compaction | +| ------------ | ------------------------------------------ | +| 描述 | 跨空间合并,开启将乱序文件合并到顺序文件中 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- enable_auto_repair_compaction + +| 名字 | enable_auto_repair_compaction | +| ------------ | ----------------------------- | +| 描述 | 启用通过合并操作自动修复未排序文件的功能 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- cross_selector + +| 名字 | cross_selector | +| ------------ |----------------| +| 描述 | 跨空间合并任务的选择器 | +| 类型 | String | +| 默认值 | rewrite | +| 改后生效方式 | 重启服务生效 | + +- cross_performer + +| 名字 | cross_performer | +| ------------ |-----------------------------------| +| 描述 | 跨空间合并任务的执行器,可选项:read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- inner_seq_selector + +| 名字 | inner_seq_selector | +| ------------ |------------------------------------------------------------------------| +| 描述 | 顺序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_seq_performer + +| 名字 | inner_seq_performer | +| ------------ |--------------------------------------| +| 描述 | 顺序空间内合并任务的执行器,可选项是 read_chunk 和 fast | +| 类型 | String | +| 默认值 | read_chunk | +| 改后生效方式 | 热加载 | + +- inner_unseq_selector + +| 名字 | inner_unseq_selector | +| ------------ |-------------------------------------------------------------------------| +| 描述 | 乱序空间内合并任务的选择器,可选 size_tiered_single_\target,size_tiered_multi_target | +| 类型 | String | +| 默认值 | size_tiered_multi_target | +| 改后生效方式 | 热加载 | + +- inner_unseq_performer + +| 名字 | inner_unseq_performer | +| ------------ |--------------------------------------| +| 描述 | 乱序空间内合并任务的执行器,可选项是 read_point 和 fast | +| 类型 | String | +| 默认值 | fast | +| 改后生效方式 | 热加载 | + +- compaction_priority + +| 名字 | compaction_priority | +| ------------ |-------------------------------------------------------------------------------------------| +| 描述 | 合并时的优先级。INNER_CROSS:优先执行空间内合并,优先减少文件数量;CROSS_INNER:优先执行跨空间合并,优先清理乱序文件;BALANCE:交替执行两种合并类型。 | +| 类型 | String | +| 默认值 | INNER_CROSS | +| 改后生效方式 | 重启服务生效 | + +- candidate_compaction_task_queue_size + +| 名字 | candidate_compaction_task_queue_size | +| ------------ | ------------------------------------ | +| 描述 | 待选合并任务队列容量 | +| 类型 | int32 | +| 默认值 | 50 | +| 改后生效方式 | 重启服务生效 | + +- target_compaction_file_size + +| 名字 | target_compaction_file_size | +| ------------ |-----------------------------------------------------------------------------------------------------------------------------------------------| +| 描述 | 该参数作用于两个场景:1. 空间内合并的目标文件大小 2. 跨空间合并中待选序列文件的大小需小于 target_compaction_file_size * 1.5 多数情况下,跨空间合并的目标文件大小不会超过此阈值,即便超出,幅度也不会过大 。 默认值:2GB ,单位:byte | +| 类型 | Long | +| 默认值 | 2147483648 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_size_threshold + +| 名字 | inner_compaction_total_file_size_threshold | +| ------------ |--------------------------------------------| +| 描述 | 空间内合并的文件总大小阈值,单位:byte | +| 类型 | Long | +| 默认值 | 10737418240 | +| 改后生效方式 | 热加载 | + +- inner_compaction_total_file_num_threshold + +| 名字 | inner_compaction_total_file_num_threshold | +| ------------ | ----------------------------------------- | +| 描述 | 空间内合并的文件总数阈值 | +| 类型 | int32 | +| 默认值 | 100 | +| 改后生效方式 | 热加载 | + +- max_level_gap_in_inner_compaction + +| 名字 | max_level_gap_in_inner_compaction | +| ------------ | -------------------------------------- | +| 描述 | 空间内合并筛选的最大层级差 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 热加载 | + +- target_chunk_size + +| 名字 | target_chunk_size | +| ------------ |--------------------------------------------------| +| 描述 | 刷盘与合并操作的目标数据块大小, 若内存表中某条时序数据的大小超过该值,数据会被刷盘至多个数据块 | +| 类型 | Long | +| 默认值 | 1600000 | +| 改后生效方式 | 重启服务生效 | + +- target_chunk_point_num + +| 名字 | target_chunk_point_num | +| ------------ |------------------------------------------------------| +| 描述 | 刷盘与合并操作中单个数据块的目标点数, 若内存表中某条时序数据的点数超过该值,数据会被刷盘至多个数据块中 | +| 类型 | Long | +| 默认值 | 100000 | +| 改后生效方式 | 重启服务生效 | + +- chunk_size_lower_bound_in_compaction + +| 名字 | chunk_size_lower_bound_in_compaction | +| ------------ |--------------------------------------| +| 描述 | 若数据块大小低于此阈值,则会被反序列化为数据点,默认值为128字节 | +| 类型 | Long | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- chunk_point_num_lower_bound_in_compaction + +| 名字 | chunk_point_num_lower_bound_in_compaction | +| ------------ |------------------------------------------| +| 描述 | 若数据块内的数据点数低于此阈值,则会被反序列化为数据点 | +| 类型 | Long | +| 默认值 | 100 | +| 改后生效方式 | 重启服务生效 | + +- inner_compaction_candidate_file_num + +| 名字 | inner_compaction_candidate_file_num | +| ------------ | ---------------------------------------- | +| 描述 | 空间内合并待选文件筛选的文件数量要求 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_num + +| 名字 | max_cross_compaction_candidate_file_num | +| ------------ | --------------------------------------- | +| 描述 | 跨空间合并待选文件筛选的文件数量上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 热加载 | + +- max_cross_compaction_candidate_file_size + +| 名字 | max_cross_compaction_candidate_file_size | +| ------------ |------------------------------------------| +| 描述 | 跨空间合并待选文件筛选的总大小上限 | +| 类型 | Long | +| 默认值 | 5368709120 | +| 改后生效方式 | 热加载 | + +- min_cross_compaction_unseq_file_level + +| 名字 | min_cross_compaction_unseq_file_level | +| ------------ |---------------------------------------| +| 描述 | 可被选为待选文件的乱序文件的最小空间内合并层级 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- compaction_thread_count + +| 名字 | compaction_thread_count | +| ------------ | ----------------------- | +| 描述 | 执行合并任务的线程数目 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_max_aligned_series_num_in_one_batch + +| 名字 | compaction_max_aligned_series_num_in_one_batch | +| ------------ | ---------------------------------------------- | +| 描述 | 对齐序列合并一次执行时处理的值列数量 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_interval_in_ms + +| 名字 | compaction_schedule_interval_in_ms | +| ------------ |------------------------------------| +| 描述 | 合并调度的时间间隔,单位 ms | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 重启服务生效 | + +- compaction_write_throughput_mb_per_sec + +| 名字 | compaction_write_throughput_mb_per_sec | +| ------------ |----------------------------------------| +| 描述 | 合并操作每秒可达到的写入吞吐量上限, 小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- compaction_read_throughput_mb_per_sec + +| 名字 | compaction_read_throughput_mb_per_sec | +| --------- | ---------------------------------------------------- | +| 描述 | 合并每秒读吞吐限制,单位为 megabyte,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- compaction_read_operation_per_sec + +| 名字 | compaction_read_operation_per_sec | +| --------- | ------------------------------------------- | +| 描述 | 合并每秒读操作数量限制,小于或等于 0 的取值表示无限制 | +| 类型 | int32 | +| 默认值 | 0 | +| Effective | 热加载 | + +- sub_compaction_thread_count + +| 名字 | sub_compaction_thread_count | +| ------------ | ------------------------------------------------------------ | +| 描述 | 每个合并任务的子任务线程数,只对跨空间合并和乱序空间内合并生效 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_disk_redundancy + +| 名字 | inner_compaction_task_selection_disk_redundancy | +| ------------ | ----------------------------------------------- | +| 描述 | 定义了磁盘可用空间的冗余值,仅用于内部压缩 | +| 类型 | double | +| 默认值 | 0.05 | +| 改后生效方式 | 热加载 | + +- inner_compaction_task_selection_mods_file_threshold + +| 名字 | inner_compaction_task_selection_mods_file_threshold | +| ------------ | --------------------------------------------------- | +| 描述 | 定义了mods文件大小的阈值,仅用于内部压缩。 | +| 类型 | long | +| 默认值 | 131072 | +| 改后生效方式 | 热加载 | + +- compaction_schedule_thread_num + +| 名字 | compaction_schedule_thread_num | +| ------------ | ------------------------------ | +| 描述 | 选择合并任务的线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 热加载 | + +### 3.21 写前日志配置 + +- wal_mode + +| 名字 | wal_mode | +| ------------ | ------------------------------------------------------------ | +| 描述 | 写前日志的写入模式. DISABLE 模式下会关闭写前日志;SYNC 模式下写入请求会在成功写入磁盘后返回; ASYNC 模式下写入请求返回时可能尚未成功写入磁盘后。 | +| 类型 | String | +| 默认值 | ASYNC | +| 改后生效方式 | 重启服务生效 | + +- max_wal_nodes_num + +| 名字 | max_wal_nodes_num | +| ------------ | ----------------------------------------------------- | +| 描述 | 写前日志节点的最大数量,默认值 0 表示数量由系统控制。 | +| 类型 | int32 | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- wal_async_mode_fsync_delay_in_ms + +| 名字 | wal_async_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------- | +| 描述 | async 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 热加载 | + +- wal_sync_mode_fsync_delay_in_ms + +| 名字 | wal_sync_mode_fsync_delay_in_ms | +| ------------ | ------------------------------------------ | +| 描述 | sync 模式下写前日志调用 fsync 前的等待时间 | +| 类型 | int32 | +| 默认值 | 3 | +| 改后生效方式 | 热加载 | + +- wal_buffer_size_in_byte + +| 名字 | wal_buffer_size_in_byte | +| ------------ | ----------------------- | +| 描述 | 写前日志的 buffer 大小 | +| 类型 | int32 | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +- wal_buffer_queue_capacity + +| 名字 | wal_buffer_queue_capacity | +| ------------ | ------------------------- | +| 描述 | 写前日志阻塞队列大小上限 | +| 类型 | int32 | +| 默认值 | 500 | +| 改后生效方式 | 重启服务生效 | + +- wal_file_size_threshold_in_byte + +| 名字 | wal_file_size_threshold_in_byte | +| ------------ | ------------------------------- | +| 描述 | 写前日志文件封口阈值 | +| 类型 | int32 | +| 默认值 | 31457280 | +| 改后生效方式 | 热加载 | + +- wal_min_effective_info_ratio + +| 名字 | wal_min_effective_info_ratio | +| ------------ | ---------------------------- | +| 描述 | 写前日志最小有效信息比 | +| 类型 | double | +| 默认值 | 0.1 | +| 改后生效方式 | 热加载 | + +- wal_memtable_snapshot_threshold_in_byte + +| 名字 | wal_memtable_snapshot_threshold_in_byte | +| ------------ | ---------------------------------------- | +| 描述 | 触发写前日志中内存表快照的内存表大小阈值 | +| 类型 | int64 | +| 默认值 | 8388608 | +| 改后生效方式 | 热加载 | + +- max_wal_memtable_snapshot_num + +| 名字 | max_wal_memtable_snapshot_num | +| ------------ | ------------------------------ | +| 描述 | 写前日志中内存表的最大数量上限 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- delete_wal_files_period_in_ms + +| 名字 | delete_wal_files_period_in_ms | +| ------------ | ----------------------------- | +| 描述 | 删除写前日志的检查间隔 | +| 类型 | int64 | +| 默认值 | 20000 | +| 改后生效方式 | 热加载 | + +- wal_throttle_threshold_in_byte + +| 名字 | wal_throttle_threshold_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在IoTConsensus中,当WAL文件的大小达到一定阈值时,会开始对写入操作进行节流,以控制写入速度。 | +| 类型 | long | +| 默认值 | 53687091200 | +| 改后生效方式 | 热加载 | + +- iot_consensus_cache_window_time_in_ms + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ---------------------------------------- | +| 描述 | 在IoTConsensus中,写缓存的最大等待时间。 | +| 类型 | long | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- enable_wal_compression + +| 名字 | iot_consensus_cache_window_time_in_ms | +| ------------ | ------------------------------------- | +| 描述 | 用于控制是否启用WAL的压缩。 | +| 类型 | boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +### 3.22 IoT 共识协议配置 + +当Region配置了IoTConsensus共识协议之后,下述的配置项才会生效 + +- data_region_iot_max_log_entries_num_per_batch + +| 名字 | data_region_iot_max_log_entries_num_per_batch | +| ------------ | --------------------------------------------- | +| 描述 | IoTConsensus batch 的最大日志条数 | +| 类型 | int32 | +| 默认值 | 1024 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_size_per_batch + +| 名字 | data_region_iot_max_size_per_batch | +| ------------ | ---------------------------------- | +| 描述 | IoTConsensus batch 的最大大小 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_pending_batches_num + +| 名字 | data_region_iot_max_pending_batches_num | +| ------------ | --------------------------------------- | +| 描述 | IoTConsensus batch 的流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- data_region_iot_max_memory_ratio_for_queue + +| 名字 | data_region_iot_max_memory_ratio_for_queue | +| ------------ | ------------------------------------------ | +| 描述 | IoTConsensus 队列内存分配比例 | +| 类型 | double | +| 默认值 | 0.6 | +| 改后生效方式 | 重启服务生效 | + +- region_migration_speed_limit_bytes_per_second + +| 名字 | region_migration_speed_limit_bytes_per_second | +| ------------ | --------------------------------------------- | +| 描述 | 定义了在region迁移过程中,数据传输的最大速率 | +| 类型 | long | +| 默认值 | 33554432 | +| 改后生效方式 | 重启服务生效 | + +### 3.23 TsFile配置 + +- group_size_in_byte + +| 名字 | group_size_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | 每次将内存中的数据写入到磁盘时的最大写入字节数 | +| 类型 | int32 | +| 默认值 | 134217728 | +| 改后生效方式 | 热加载 | + +- page_size_in_byte + +| 名字 | page_size_in_byte | +| ------------ | ---------------------------------------------------- | +| 描述 | 内存中每个列写出时,写成的单页最大的大小,单位为字节 | +| 类型 | int32 | +| 默认值 | 65536 | +| 改后生效方式 | 热加载 | + +- max_number_of_points_in_page + +| 名字 | max_number_of_points_in_page | +| ------------ | ------------------------------------------------- | +| 描述 | 一个页中最多包含的数据点(时间戳-值的二元组)数量 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- pattern_matching_threshold + +| 名字 | pattern_matching_threshold | +| ------------ | ------------------------------ | +| 描述 | 正则表达式匹配时最大的匹配次数 | +| 类型 | int32 | +| 默认值 | 1000000 | +| 改后生效方式 | 热加载 | + +- float_precision + +| 名字 | float_precision | +| ------------ | ------------------------------------------------------------ | +| 描述 | 浮点数精度,为小数点后数字的位数 | +| 类型 | int32 | +| 默认值 | 默认为 2 位。注意:32 位浮点数的十进制精度为 7 位,64 位浮点数的十进制精度为 15 位。如果设置超过机器精度将没有实际意义。 | +| 改后生效方式 | 热加载 | + +- value_encoder + +| 名字 | value_encoder | +| ------------ | ------------------------------------- | +| 描述 | value 列编码方式 | +| 类型 | 枚举 String: “TS_2DIFF”,“PLAIN”,“RLE” | +| 默认值 | PLAIN | +| 改后生效方式 | 热加载 | + +- compressor + +| 名字 | compressor | +| ------------ | ------------------------------------------------------------ | +| 描述 | 数据压缩方法; 对齐序列中时间列的压缩方法 | +| 类型 | 枚举 String : "UNCOMPRESSED", "SNAPPY", "LZ4", "ZSTD", "LZMA2" | +| 默认值 | LZ4 | +| 改后生效方式 | 热加载 | + +- encrypt_flag + +| 名字 | encrypt_flag | +| ------------ | ---------------------------- | +| 描述 | 用于开启或关闭数据加密功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- encrypt_type + +| 名字 | encrypt_type | +| ------------ | ------------------------------------- | +| 描述 | 数据加密的方法。 | +| 类型 | String | +| 默认值 | org.apache.tsfile.encrypt.UNENCRYPTED | +| 改后生效方式 | 重启服务生效 | + +- encrypt_key_path + +| 名字 | encrypt_key_path | +| ------------ | ---------------------------- | +| 描述 | 数据加密使用的密钥来源路径。 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +### 3.24 授权配置 + +- authorizer_provider_class + +| 名字 | authorizer_provider_class | +| ------------ | ------------------------------------------------------------ | +| 描述 | 权限服务的类名 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer | +| 改后生效方式 | 重启服务生效 | +| 其他可选值 | org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer | + +- openID_url + +| 名字 | openID_url | +| ------------ | ---------------------------------------------------------- | +| 描述 | openID 服务器地址 (当 OpenIdAuthorizer 被启用时必须设定) | +| 类型 | String(一个 http 地址) | +| 默认值 | 无 | +| 改后生效方式 | 重启服务生效 | + +- iotdb_server_encrypt_decrypt_provider + +| 名字 | iotdb_server_encrypt_decrypt_provider | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于用户密码加密的类 | +| 类型 | String | +| 默认值 | org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- iotdb_server_encrypt_decrypt_provider_parameter + +| 名字 | iotdb_server_encrypt_decrypt_provider_parameter | +| ------------ | ----------------------------------------------- | +| 描述 | 用于初始化用户密码加密类的参数 | +| 类型 | String | +| 默认值 | 无 | +| 改后生效方式 | 仅允许在第一次启动服务前修改 | + +- author_cache_size + +| 名字 | author_cache_size | +| ------------ | ------------------------ | +| 描述 | 用户缓存与角色缓存的大小 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- author_cache_expire_time + +| 名字 | author_cache_expire_time | +| ------------ | -------------------------------------- | +| 描述 | 用户缓存与角色缓存的有效期,单位为分钟 | +| 类型 | int32 | +| 默认值 | 30 | +| 改后生效方式 | 重启服务生效 | + +### 3.25 UDF配置 + +- udf_initial_byte_array_length_for_memory_control + +| 名字 | udf_initial_byte_array_length_for_memory_control | +| ------------ | ------------------------------------------------------------ | +| 描述 | 用于评估UDF查询中文本字段的内存使用情况。建议将此值设置为略大于所有文本的平均长度记录。 | +| 类型 | int32 | +| 默认值 | 48 | +| 改后生效方式 | 重启服务生效 | + +- udf_memory_budget_in_mb + +| 名字 | udf_memory_budget_in_mb | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在一个UDF查询中使用多少内存(以 MB 为单位)。上限为已分配内存的 20% 用于读取。 | +| 类型 | Float | +| 默认值 | 30.0 | +| 改后生效方式 | 重启服务生效 | + +- udf_reader_transformer_collector_memory_proportion + +| 名字 | udf_reader_transformer_collector_memory_proportion | +| ------------ | --------------------------------------------------------- | +| 描述 | UDF内存分配比例。参数形式为a : b : c,其中a、b、c为整数。 | +| 类型 | String | +| 默认值 | 1:1:1 | +| 改后生效方式 | 重启服务生效 | + +- udf_lib_dir + +| 名字 | udf_lib_dir | +| ------------ | ---------------------------- | +| 描述 | UDF 日志及jar文件存储路径 | +| 类型 | String | +| 默认值 | ext/udf(Windows:ext\\udf) | +| 改后生效方式 | 重启服务生效 | + +### 3.26 触发器配置 + +- trigger_lib_dir + +| 名字 | trigger_lib_dir | +| ------------ | ----------------------- | +| 描述 | 触发器 JAR 包存放的目录 | +| 类型 | String | +| 默认值 | ext/trigger | +| 改后生效方式 | 重启服务生效 | + +- stateful_trigger_retry_num_when_not_found + +| 名字 | stateful_trigger_retry_num_when_not_found | +| ------------ | ---------------------------------------------- | +| 描述 | 有状态触发器触发无法找到触发器实例时的重试次数 | +| 类型 | Int32 | +| 默认值 | 3 | +| 改后生效方式 | 重启服务生效 | + +### 3.27 SELECT-INTO配置 + +- into_operation_buffer_size_in_byte + +| 名字 | into_operation_buffer_size_in_byte | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,待写入数据占用的最大内存(单位:Byte) | +| 类型 | long | +| 默认值 | 104857600 | +| 改后生效方式 | 热加载 | + +- select_into_insert_tablet_plan_row_limit + +| 名字 | select_into_insert_tablet_plan_row_limit | +| ------------ | ------------------------------------------------------------ | +| 描述 | 执行 select-into 语句时,一个 insert-tablet-plan 中可以处理的最大行数 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 热加载 | + +- into_operation_execution_thread_count + +| 名字 | into_operation_execution_thread_count | +| ------------ | ------------------------------------------ | +| 描述 | SELECT INTO 中执行写入任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +### 3.28 连续查询配置 +- continuous_query_submit_thread_count + +| 名字 | continuous_query_execution_thread | +| ------------ | --------------------------------- | +| 描述 | 执行连续查询任务的线程池的线程数 | +| 类型 | int32 | +| 默认值 | 2 | +| 改后生效方式 | 重启服务生效 | + +- continuous_query_min_every_interval_in_ms + +| 名字 | continuous_query_min_every_interval_in_ms | +| ------------ | ----------------------------------------- | +| 描述 | 连续查询执行时间间隔的最小值 | +| 类型 | long (duration) | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +### 3.29 PIPE配置 + +- pipe_lib_dir + +| 名字 | pipe_lib_dir | +| ------------ | -------------------------- | +| 描述 | 自定义 Pipe 插件的存放目录 | +| 类型 | string | +| 默认值 | ext/pipe | +| 改后生效方式 | 暂不支持修改 | + +- pipe_subtask_executor_max_thread_num + +| 名字 | pipe_subtask_executor_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | pipe 子任务 processor、sink 中各自可以使用的最大线程数。实际值将是 min(pipe_subtask_executor_max_thread_num, max(1, CPU核心数 / 2))。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_timeout_ms + +| 名字 | pipe_sink_timeout_ms | +| ------------ | --------------------------------------------- | +| 描述 | thrift 客户端的连接超时时间(以毫秒为单位)。 | +| 类型 | int | +| 默认值 | 900000 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_selector_number + +| 名字 | pipe_sink_selector_number | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大执行结果处理线程数量。 建议将此值设置为小于或等于 pipe_sink_max_client_number。 | +| 类型 | int | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- pipe_sink_max_client_number + +| 名字 | pipe_sink_max_client_number | +| ------------ | ----------------------------------------------------------- | +| 描述 | 在 iotdb-thrift-async-sink 插件中可以使用的最大客户端数量。 | +| 类型 | int | +| 默认值 | 16 | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_enabled + +| 名字 | pipe_air_gap_receiver_enabled | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否启用通过网闸接收 pipe 数据。接收器只能在 tcp 模式下返回 0 或 1,以指示数据是否成功接收。 \| | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- pipe_air_gap_receiver_port + +| 名字 | pipe_air_gap_receiver_port | +| ------------ | ------------------------------------ | +| 描述 | 服务器通过网闸接收 pipe 数据的端口。 | +| 类型 | int | +| 默认值 | 9780 | +| 改后生效方式 | 重启服务生效 | + +- pipe_all_sinks_rate_limit_bytes_per_second + +| 名字 | pipe_all_sinks_rate_limit_bytes_per_second | +| ------------ | ------------------------------------------------------------ | +| 描述 | 所有 pipe sink 每秒可以传输的总字节数。当给定的值小于或等于 0 时,表示没有限制。默认值是 -1,表示没有限制。 | +| 类型 | double | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +### 3.30 Ratis共识协议配置 + +当Region配置了RatisConsensus共识协议之后,下述的配置项才会生效 + +- config_node_ratis_log_appender_buffer_size_max + +| 名字 | config_node_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_appender_buffer_size_max + +| 名字 | schema_region_ratis_log_appender_buffer_size_max | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_appender_buffer_size_max + +| 名字 | data_region_ratis_log_appender_buffer_size_max | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一次同步日志RPC最大的传输字节限制 | +| 类型 | int32 | +| 默认值 | 16777216 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_snapshot_trigger_threshold + +| 名字 | config_node_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | confignode 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_snapshot_trigger_threshold + +| 名字 | schema_region_ratis_snapshot_trigger_threshold | +| ------------ | ---------------------------------------------- | +| 描述 | schema region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_snapshot_trigger_threshold + +| 名字 | data_region_ratis_snapshot_trigger_threshold | +| ------------ | -------------------------------------------- | +| 描述 | data region 触发snapshot需要的日志条数 | +| 类型 | int32 | +| 默认值 | 400,000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_unsafe_flush_enable + +| 名字 | config_node_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | confignode 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_unsafe_flush_enable + +| 名字 | schema_region_ratis_log_unsafe_flush_enable | +| ------------ | ------------------------------------------- | +| 描述 | schema region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_unsafe_flush_enable + +| 名字 | data_region_ratis_log_unsafe_flush_enable | +| ------------ | ----------------------------------------- | +| 描述 | data region 是否允许Raft日志异步刷盘 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_segment_size_max_in_byte + +| 名字 | config_node_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | confignode 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_segment_size_max_in_byte + +| 名字 | schema_region_ratis_log_segment_size_max_in_byte | +| ------------ | ------------------------------------------------ | +| 描述 | schema region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | data region 一个RaftLog日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_simple_consensus_log_segment_size_max_in_byte + +| 名字 | data_region_ratis_log_segment_size_max_in_byte | +| ------------ | ---------------------------------------------- | +| 描述 | Confignode 简单共识协议一个Log日志段文件的大小 | +| 类型 | int32 | +| 默认值 | 25165824 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_flow_control_window + +| 名字 | config_node_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | confignode grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_flow_control_window + +| 名字 | schema_region_ratis_grpc_flow_control_window | +| ------------ | -------------------------------------------- | +| 描述 | schema region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_flow_control_window + +| 名字 | data_region_ratis_grpc_flow_control_window | +| ------------ | ------------------------------------------ | +| 描述 | data region grpc 流式拥塞窗口大小 | +| 类型 | int32 | +| 默认值 | 4194304 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_grpc_leader_outstanding_appends_max + +| 名字 | config_node_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | config node grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | schema_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ------------------------------------------------------- | +| 描述 | schema region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_grpc_leader_outstanding_appends_max + +| 名字 | data_region_ratis_grpc_leader_outstanding_appends_max | +| ------------ | ----------------------------------------------------- | +| 描述 | data region grpc 流水线并发阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_force_sync_num + +| 名字 | config_node_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | config node fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_force_sync_num + +| 名字 | schema_region_ratis_log_force_sync_num | +| ------------ | -------------------------------------- | +| 描述 | schema region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_force_sync_num + +| 名字 | data_region_ratis_log_force_sync_num | +| ------------ | ------------------------------------ | +| 描述 | data region fsync 阈值 | +| 类型 | int32 | +| 默认值 | 128 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_min_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_min_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最小值 | +| 类型 | int32 | +| 默认值 | 2000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | config_node_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | confignode leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | schema_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ------------------------------------------------------ | +| 描述 | schema region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_rpc_leader_election_timeout_max_ms + +| 名字 | data_region_ratis_rpc_leader_election_timeout_max_ms | +| ------------ | ---------------------------------------------------- | +| 描述 | data region leader 选举超时最大值 | +| 类型 | int32 | +| 默认值 | 4000ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_request_timeout_ms + +| 名字 | config_node_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_request_timeout_ms + +| 名字 | schema_region_ratis_request_timeout_ms | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_request_timeout_ms + +| 名字 | data_region_ratis_request_timeout_ms | +| ------------ | ------------------------------------ | +| 描述 | data region Raft 客户端重试超时 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_retry_attempts + +| 名字 | config_node_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | confignode Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_initial_sleep_time_ms + +| 名字 | config_node_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | confignode Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_max_sleep_time_ms + +| 名字 | config_node_ratis_max_sleep_time_ms | +| ------------ | ------------------------------------- | +| 描述 | confignode Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 10000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_retry_attempts + +| 名字 | schema_region_ratis_max_retry_attempts | +| ------------ | -------------------------------------- | +| 描述 | schema region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_initial_sleep_time_ms + +| 名字 | schema_region_ratis_initial_sleep_time_ms | +| ------------ | ----------------------------------------- | +| 描述 | schema region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_max_sleep_time_ms + +| 名字 | schema_region_ratis_max_sleep_time_ms | +| ------------ | ---------------------------------------- | +| 描述 | schema region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_retry_attempts + +| 名字 | data_region_ratis_max_retry_attempts | +| ------------ | ------------------------------------ | +| 描述 | data region Raft客户端最大重试次数 | +| 类型 | int32 | +| 默认值 | 10 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_initial_sleep_time_ms + +| 名字 | data_region_ratis_initial_sleep_time_ms | +| ------------ | --------------------------------------- | +| 描述 | data region Raft客户端初始重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 100ms | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_max_sleep_time_ms + +| 名字 | data_region_ratis_max_sleep_time_ms | +| ------------ | -------------------------------------- | +| 描述 | data region Raft客户端最大重试睡眠时长 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_min_ms + +| 名字 | ratis_first_election_timeout_min_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最小超时时间 | +| 类型 | int64 | +| 默认值 | 50 (ms) | +| 改后生效方式 | 重启服务生效 | + +- ratis_first_election_timeout_max_ms + +| 名字 | ratis_first_election_timeout_max_ms | +| ------------ | ----------------------------------- | +| 描述 | Ratis协议首次选举最大超时时间 | +| 类型 | int64 | +| 默认值 | 150 (ms) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_preserve_logs_num_when_purge + +| 名字 | config_node_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | confignode snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_preserve_logs_num_when_purge + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_preserve_logs_num_when_purge + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region snapshot后保持一定数量日志不删除 | +| 类型 | int32 | +| 默认值 | 1000 | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_log_max_size + +| 名字 | config_node_ratis_log_max_size | +| ------------ | ----------------------------------- | +| 描述 | config node磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_log_max_size + +| 名字 | schema_region_ratis_log_max_size | +| ------------ | -------------------------------------- | +| 描述 | schema region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 2147483648 (2GB) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_log_max_size + +| 名字 | data_region_ratis_log_max_size | +| ------------ | ------------------------------------ | +| 描述 | data region 磁盘Raft Log最大占用空间 | +| 类型 | int64 | +| 默认值 | 21474836480 (20GB) | +| 改后生效方式 | 重启服务生效 | + +- config_node_ratis_periodic_snapshot_interval + +| 名字 | config_node_ratis_periodic_snapshot_interval | +| ------------ | -------------------------------------------- | +| 描述 | config node定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- schema_region_ratis_periodic_snapshot_interval + +| 名字 | schema_region_ratis_preserve_logs_num_when_purge | +| ------------ | ------------------------------------------------ | +| 描述 | schema region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +- data_region_ratis_periodic_snapshot_interval + +| 名字 | data_region_ratis_preserve_logs_num_when_purge | +| ------------ | ---------------------------------------------- | +| 描述 | data region定期snapshot的间隔时间 | +| 类型 | int64 | +| 默认值 | 86400 (秒) | +| 改后生效方式 | 重启服务生效 | + +### 3.31 IoTConsensusV2配置 + +- iot_consensus_v2_pipeline_size + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ------------------------------------------------------------ | +| 描述 | IoTConsensus V2中连接器(connector)和接收器(receiver)的默认事件缓冲区大小。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +- iot_consensus_v2_mode + +| 名字 | iot_consensus_v2_pipeline_size | +| ------------ | ----------------------------------- | +| 描述 | IoTConsensus V2使用的共识协议模式。 | +| 类型 | String | +| 默认值 | batch | +| 改后生效方式 | 重启服务生效 | + +### 3.32 Procedure 配置 + +- procedure_core_worker_thread_count + +| 名字 | procedure_core_worker_thread_count | +| ------------ | ---------------------------------- | +| 描述 | 工作线程数量 | +| 类型 | int32 | +| 默认值 | 4 | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_clean_interval + +| 名字 | procedure_completed_clean_interval | +| ------------ | ---------------------------------- | +| 描述 | 清理已完成的 procedure 时间间隔 | +| 类型 | int32 | +| 默认值 | 30(s) | +| 改后生效方式 | 重启服务生效 | + +- procedure_completed_evict_ttl + +| 名字 | procedure_completed_evict_ttl | +| ------------ | --------------------------------- | +| 描述 | 已完成的 procedure 的数据保留时间 | +| 类型 | int32 | +| 默认值 | 60(s) | +| 改后生效方式 | 重启服务生效 | + +### 3.33 MQTT代理配置 + +- enable_mqtt_service + +| 名字 | enable_mqtt_service。 | +| ------------ | --------------------- | +| 描述 | 是否开启MQTT服务 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +- mqtt_host + +| 名字 | mqtt_host | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的host。 | +| 类型 | String | +| 默认值 | 127.0.0.1 | +| 改后生效方式 | 热加载 | + +- mqtt_port + +| 名字 | mqtt_port | +| ------------ | -------------------- | +| 描述 | MQTT服务绑定的port。 | +| 类型 | int32 | +| 默认值 | 1883 | +| 改后生效方式 | 热加载 | + +- mqtt_handler_pool_size + +| 名字 | mqtt_handler_pool_size | +| ------------ | ---------------------------------- | +| 描述 | 用于处理MQTT消息的处理程序池大小。 | +| 类型 | int32 | +| 默认值 | 1 | +| 改后生效方式 | 热加载 | + +- mqtt_payload_formatter + +| 名字 | mqtt_payload_formatter | +| ------------ | ---------------------------- | +| 描述 | MQTT消息有效负载格式化程序。 | +| 类型 | String | +| 默认值 | json | +| 改后生效方式 | 热加载 | + +- mqtt_max_message_size + +| 名字 | mqtt_max_message_size | +| ------------ | ------------------------------------ | +| 描述 | MQTT消息的最大长度(以字节为单位)。 | +| 类型 | int32 | +| 默认值 | 1048576 | +| 改后生效方式 | 热加载 | + +### 3.34 审计日志配置 + +- enable_audit_log + +| 名字 | enable_audit_log | +| ------------ | ------------------------------ | +| 描述 | 用于控制是否启用审计日志功能。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 重启服务生效 | + +- audit_log_storage + +| 名字 | audit_log_storage | +| ------------ | -------------------------- | +| 描述 | 定义了审计日志的输出位置。 | +| 类型 | String | +| 默认值 | IOTDB,LOGGER | +| 改后生效方式 | 重启服务生效 | + +- audit_log_operation + +| 名字 | audit_log_operation | +| ------------ | -------------------------------------- | +| 描述 | 定义了哪些类型的操作需要记录审计日志。 | +| 类型 | String | +| 默认值 | DML,DDL,QUERY | +| 改后生效方式 | 重启服务生效 | + +- enable_audit_log_for_native_insert_api + +| 名字 | enable_audit_log_for_native_insert_api | +| ------------ | -------------------------------------- | +| 描述 | 用于控制本地写入API是否记录审计日志。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 重启服务生效 | + +### 3.35 白名单配置 +- enable_white_list + +| 名字 | enable_white_list | +| ------------ | ----------------- | +| 描述 | 是否启用白名单。 | +| 类型 | Boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | + +### 3.36 IoTDB-AI 配置 + +- model_inference_execution_thread_count + +| 名字 | model_inference_execution_thread_count | +| ------------ | -------------------------------------- | +| 描述 | 用于模型推理操作的线程数。 | +| 类型 | int | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + +### 3.37 TsFile 主动监听&加载功能配置 + +- load_clean_up_task_execution_delay_time_seconds + +| 名字 | load_clean_up_task_execution_delay_time_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在加载TsFile失败后,系统将等待多长时间才会执行清理任务来清除这些未成功加载的TsFile。 | +| 类型 | int | +| 默认值 | 1800 | +| 改后生效方式 | 热加载 | + +- load_write_throughput_bytes_per_second + +| 名字 | load_write_throughput_bytes_per_second | +| ------------ | -------------------------------------- | +| 描述 | 加载TsFile时磁盘写入的最大字节数每秒。 | +| 类型 | int | +| 默认值 | -1 | +| 改后生效方式 | 热加载 | + +- load_active_listening_enable + +| 名字 | load_active_listening_enable | +| ------------ | ------------------------------------------------------------ | +| 描述 | 是否开启 DataNode 主动监听并且加载 tsfile 的功能(默认开启)。 | +| 类型 | Boolean | +| 默认值 | true | +| 改后生效方式 | 热加载 | + +- load_active_listening_dirs + +| 名字 | load_active_listening_dirs | +| ------------ | ------------------------------------------------------------ | +| 描述 | 需要监听的目录(自动包括目录中的子目录),如有多个使用 “,“ 隔开默认的目录为 ext/load/pending(支持热装载)。 | +| 类型 | String | +| 默认值 | ext/load/pending | +| 改后生效方式 | 热加载 | + +- load_active_listening_fail_dir + +| 名字 | load_active_listening_fail_dir | +| ------------ | ---------------------------------------------------------- | +| 描述 | 执行加载 tsfile 文件失败后将文件转存的目录,只能配置一个。 | +| 类型 | String | +| 默认值 | ext/load/failed | +| 改后生效方式 | 热加载 | + +- load_active_listening_max_thread_num + +| 名字 | load_active_listening_max_thread_num | +| ------------ | ------------------------------------------------------------ | +| 描述 | 同时执行加载 tsfile 任务的最大线程数,参数被注释掉时的默值为 max(1, CPU 核心数 / 2),当用户设置的值不在这个区间[1, CPU核心数 /2]内时,会设置为默认值 (1, CPU 核心数 / 2)。 | +| 类型 | Long | +| 默认值 | 0 | +| 改后生效方式 | 重启服务生效 | + +- load_active_listening_check_interval_seconds + +| 名字 | load_active_listening_check_interval_seconds | +| ------------ | ------------------------------------------------------------ | +| 描述 | 主动监听轮询间隔,单位秒。主动监听 tsfile 的功能是通过轮询检查文件夹实现的。该配置指定了两次检查 load_active_listening_dirs 的时间间隔,每次检查完成 load_active_listening_check_interval_seconds 秒后,会执行下一次检查。当用户设置的轮询间隔小于 1 时,会被设置为默认值 5 秒。 | +| 类型 | Long | +| 默认值 | 5 | +| 改后生效方式 | 重启服务生效 | + + +* last_cache_operation_on_load + +|名字| last_cache_operation_on_load | +|:---:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 当成功加载一个 TsFile 时,对 LastCache 执行的操作。`UPDATE`:使用 TsFile 中的数据更新 LastCache;`UPDATE_NO_BLOB`:与 UPDATE 类似,但会使 blob 序列的 LastCache 失效;`CLEAN_DEVICE`:使 TsFile 中包含的设备的 LastCache 失效;`CLEAN_ALL`:清空整个 LastCache。 | +|类型| String | +|默认值| UPDATE_NO_BLOB | +|改后生效方式| 重启后生效 | + +* cache_last_values_for_load + +|名字| cache_last_values_for_load | +|:---:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|描述| 在加载 TsFile 之前是否缓存最新值(last values)。仅在 `last_cache_operation_on_load=UPDATE_NO_BLOB` 或 `last_cache_operation_on_load=UPDATE` 时生效。当设置为 true 时,即使 `last_cache_operation_on_load=UPDATE`,也会忽略 blob 序列。启用此选项会在加载 TsFile 期间增加内存占用。 | +|类型| Boolean | +|默认值| true | +|改后生效方式| 重启后生效 | + +* cache_last_values_memory_budget_in_byte + +|名字| cache_last_values_memory_budget_in_byte | +|:---:|:----------------------------------------------------------------------------------------------------| +|描述| 当 `cache_last_values_for_load=true` 时,用于缓存最新值的最大内存大小(以字节为单位)。如果超过该值,缓存的值将被丢弃,并以流式方式直接从 TsFile 中读取最新值。 | +|类型| int32 | +|默认值| 4194304 | +|改后生效方式| 重启后生效 | + + +### 3.38 分发重试配置 + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | ------------------------------------------------------------ | +| 描述 | 在遇到未知错误时,写请求远程分发的最大重试时间,单位是毫秒。 | +| 类型 | Long | +| 默认值 | 60000 | +| 改后生效方式 | 热加载 | + +- enable_retry_for_unknown_error + +| 名字 | enable_retry_for_unknown_error | +| ------------ | -------------------------------- | +| 描述 | 用于控制是否对未知错误进行重试。 | +| 类型 | boolean | +| 默认值 | false | +| 改后生效方式 | 热加载 | \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/Reference/System-Tables_timecho.md b/src/zh/UserGuide/latest-Table/Reference/System-Tables_timecho.md index 45b71a510..e13fa2c59 100644 --- a/src/zh/UserGuide/latest-Table/Reference/System-Tables_timecho.md +++ b/src/zh/UserGuide/latest-Table/Reference/System-Tables_timecho.md @@ -369,7 +369,7 @@ IoTDB> select * from information_schema.views ### 2.11 MODELS 表 -> 该系统表从 V 2.0.5 版本开始提供,从V 2.0.8-beta 版本开始不再提供 +> 该系统表从 V 2.0.5 版本开始提供,从V 2.0.8 版本开始不再提供 * 包含数据库内所有的模型信息 * 表结构如下表所示: @@ -589,7 +589,7 @@ IoTDB> select * from information_schema.data_nodes ### 2.18 CONNECTIONS 表 -> 该系统表从 V 2.0.8-beta 版本开始提供 +> 该系统表从 V 2.0.8 版本开始提供 * 包含集群中所有连接。 * 表结构如下表所示: @@ -616,7 +616,7 @@ IoTDB> select * from information_schema.connections; ### 2.19 CURRENT\_QUERIES 表 -> 该系统表从 V 2.0.8-beta 版本开始提供 +> 该系统表从 V 2.0.8 版本开始提供 * 包含所有执行结束时间在 `[now() - query_cost_stat_window, now())` 范围内的所有查询,也包括当前正在执行的查询。其中`query_cost_stat_window `代表查询耗时统计的窗口,默认值为 0 ,可通过配置文件`iotdb-system.properties`进行配置。 * 表结构如下表所示: @@ -647,7 +647,7 @@ IoTDB> select * from information_schema.current_queries; ### 2.20 QUERIES\_COSTS\_HISTOGRAM 表 -> 该系统表从 V 2.0.8-beta 版本开始提供 +> 该系统表从 V 2.0.8 版本开始提供 * 包含过去 `query_cost_stat_window` 时间内的查询耗时的直方图(仅统计已经执行结束的 SQL),其中`query_cost_stat_window `代表查询耗时统计的窗口,默认值为 0 ,可通过配置文件`iotdb-system.properties`进行配置。 * 表结构如下表所示: diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function.md index 16a9bc3f0..46a8529cd 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function.md @@ -1,3 +1,6 @@ +--- +redirectTo: Basis-Function_apache.html +--- - -# 基础函数 - -## 1. 比较函数和运算符 - -### 1.1 基本比较运算符 - -比较运算符用于比较两个值,并返回比较结果(true或false)。 - -| 运算符 | 描述 | -| ------ | ---------- | -| < | 小于 | -| > | 大于 | -| <= | 小于或等于 | -| >= | 大于或等于 | -| = | 等于 | -| <> | 不等于 | -| != | 不等于 | - -#### 1.1.1 比较规则: - -1. 所有类型都可以与自身进行比较 -2. 数值类型(INT32, INT64, FLOAT, DOUBLE, TIMESTAMP)之间可以相互比较 -3. 字符类型(STRING, TEXT)之间也可以相互比较 -4. 除上述规则外的类型进行比较时,均会报错。 - -### 1.2 BETWEEN 运算符 - -1. `BETWEEN` 操作符用于判断一个值是否在指定的范围内。 -2. `NOT BETWEEN`操作符用于判断一个值是否不在指定范围内。 -3. `BETWEEN` 和 `NOT BETWEEN` 操作符可用于评估任何可排序的类型。 -4. `BETWEEN` 和 `NOT BETWEEN` 的值、最小值和最大值参数必须是同一类型,否则会报错。 - -**语法**: - -```SQL - value BETWEEN min AND max: - value NOT BETWEEN min AND max: -``` - -示例 1 :BETWEEN - -```SQL --- 查询 temperature 在 85.0 和 90.0 之间的记录 -SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; -``` - -示例 2 :NOT BETWEEN - -```SQL -3-- 查询 humidity 不在 35.0 和 40.0 之间的记录 -SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; -``` - -### 1.3 IS NULL 运算符 - -1. `IS NULL` 和 `IS NOT NULL` 运算符用于判断一个值是否为 NULL。 -2. 这两个运算符适用于所有数据类型。 - -示例1:查询 temperature 为 NULL 的记录 - -```SQL -SELECT * FROM table1 WHERE temperature IS NULL; -``` - -示例2:查询 humidity 不为 NULL 的记录 - -```SQL -SELECT * FROM table1 WHERE humidity IS NOT NULL; -``` - -### 1.4 IN 运算符 - -1. `IN` 操作符可用于 `WHERE` 子句中,比较一列中的一些值。 -2. 这些值可以由静态数组、标量表达式。 - -**语法:** - -```SQL -... WHERE column [NOT] IN ('value1','value2', expression1) -``` - -示例 1:静态数组:查询 region 为 '北京' 或 '上海' 的记录 - -```SQL -SELECT * FROM table1 WHERE region IN ('北京', '上海'); ---等价于 -SELECT * FROM region WHERE name = '北京' OR name = '上海'; -``` - -示例 2:标量表达式:查询 temperature 在特定值中的记录 - -```SQL -SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); -``` - -示例 3:查询 region 不为 '北京' 或 '上海' 的记录 - -```SQL -SELECT * FROM table1 WHERE region NOT IN ('北京', '上海'); -``` - -### 1.5 GREATEST 和 LEAST - -`Greatest` 函数用于返回参数列表中的最大值,`Least` 函数用于返回参数列表中的最小值,返回数据类型与输入类型相同。 -1. 空值处理:若所有参数均为 NULL,则返回 NULL。 -2. 参数要求:必须提供 至少 2 个参数。 -3. 类型约束:仅支持 相同数据类型 的参数比较。 -4. 支持类型: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` - -**语法:** - -```sql - greatest(value1, value2, ..., valueN) - least(value1, value2, ..., valueN) -``` - -**示例:** - -```sql --- 查询 table2 中 temperature 和 humidity 的最大记录 -SELECT GREATEST(temperature,humidity) FROM table2; - --- 查询 table2 中 temperature 和 humidity 的最小记录 -SELECT LEAST(temperature,humidity) FROM table2; -``` - - -## 2. 聚合函数 - -### 2.1 概述 - -1. 聚合函数是多对一函数。它们对一组值进行聚合计算,得到单个聚合结果。 -2. 除了 `COUNT()`之外,其他所有聚合函数都忽略空值,并在没有输入行或所有值为空时返回空值。 例如,`SUM()` 返回 null 而不是零,而 `AVG()` 在计数中不包括 null 值。 - -### 2.2 支持的聚合函数 - -| 函数名 | 功能描述 | 允许的输入类型 | 输出类型 | -|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|------------------| -| COUNT | 计算数据点数。 | 所有类型 | INT64 | -| COUNT_IF | COUNT_IF(exp) 用于统计满足指定布尔表达式的记录行数 | exp 必须是一个布尔类型的表达式,例如 count_if(temperature>20) | INT64 | -| APPROX_COUNT_DISTINCT | APPROX_COUNT_DISTINCT(x[,maxStandardError]) 函数提供 COUNT(DISTINCT x) 的近似值,返回不同输入值的近似个数。 | `x`:待计算列,支持所有类型;
`maxStandardError`:指定该函数应产生的最大标准误差,取值范围[0.0040625, 0.26],未指定值时默认0.023。 | INT64 | -| APPROX_MOST_FREQUENT | APPROX_MOST_FREQUENT(x, k, capacity) 函数用于近似计算数据集中出现频率最高的前 k 个元素。它返回一个JSON 格式的字符串,其中键是该元素的值,值是该元素对应的近似频率。(V 2.0.5.1 及以后版本支持) | `x`:待计算列,支持 IoTDB 现有所有的数据类型;
`k`:返回出现频率最高的 k 个值;
`capacity`: 用于计算的桶的数量,跟内存占用相关:其值越大误差越小,但占用内存更大,反之capacity值越小误差越大,但占用内存更小。 | STRING | -| SUM | 求和。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| AVG | 求平均值。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| MAX | 求最大值。 | 所有类型 | 与输入类型一致 | -| MIN | 求最小值。 | 所有类型 | 与输入类型一致 | -| FIRST | 求时间戳最小且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | -| LAST | 求时间戳最大且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | -| STDDEV | STDDEV_SAMP 的别名,求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_POP | 求总体标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| STDDEV_SAMP | 求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VARIANCE | VAR_SAMP 的别名,求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_POP | 求总体方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| VAR_SAMP | 求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | -| EXTREME | 求具有最大绝对值的值。如果正值和负值的最大绝对值相等,则返回正值。 | INT32 INT64 FLOAT DOUBLE | 与输入类型一致 | -| MODE | 求众数。注意: 1.输入序列的不同值个数过多时会有内存异常风险; 2.如果所有元素出现的频次相同,即没有众数,则随机返回一个元素; 3.如果有多个众数,则随机返回一个众数; 4. NULL 值也会被统计频次,所以即使输入序列的值不全为 NULL,最终结果也可能为 NULL。 | 所有类型 | 与输入类型一致 | -| MAX_BY | MAX_BY(x, y) 求二元输入 x 和 y 在 y 最大时对应的 x 的值。MAX_BY(time, x) 返回 x 取最大值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | -| MIN_BY | MIN_BY(x, y) 求二元输入 x 和 y 在 y 最小时对应的 x 的值。MIN_BY(time, x) 返回 x 取最小值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | -| FIRST_BY | FIRST_BY(x, y) 求当 y 为第一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | -| LAST_BY | LAST_BY(x, y) 求当 y 为最后一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | - - -### 2.3 示例 - -#### 2.3.1 示例数据 - -在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 - -#### 2.3.2 Count - -统计的是整张表的行数和 `temperature` 列非 NULL 值的数量。 - -```SQL -IoTDB> select count(*), count(temperature) from table1; -``` - -执行结果如下: - -> 注意:只有COUNT函数可以与*一起使用,否则将抛出错误。 - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 18| 12| -+-----+-----+ -Total line number = 1 -It costs 0.834s -``` - - -#### 2.3.3 Count_if - -统计 `table2` 中 到达时间 `arrival_time` 不是 `null` 的记录行数。 - -```sql -IoTDB> select count_if(arrival_time is not null) from table2; -``` - -执行结果如下: - -```sql -+-----+ -|_col0| -+-----+ -| 4| -+-----+ -Total line number = 1 -It costs 0.047s -``` - -#### 2.3.4 Approx_count_distinct - -查询 `table1` 中 `temperature` 列不同值的个数。 - -```sql -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; -IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; -``` - -执行结果如下: - -```sql -+------+------+ -|origin|approx| -+------+------+ -| 3| 3| -+------+------+ -Total line number = 1 -It costs 0.022s -``` - -#### 2.3.5 Approx_most_frequent - -查询 `table1` 中 `temperature` 列出现频次最高的2个值 - -```sql -IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; -``` - -执行结果如下: - -```sql -+-------------------+ -| topk| -+-------------------+ -|{"85.0":6,"90.0":5}| -+-------------------+ -Total line number = 1 -It costs 0.064s -``` - - -#### 2.3.6 First - -查询`temperature`列、`humidity`列时间戳最小且不为 NULL 的值。 - -```SQL -IoTDB> select first(temperature), first(humidity) from table1; -``` - -执行结果如下: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 35.1| -+-----+-----+ -Total line number = 1 -It costs 0.170s -``` - -#### 2.3.7 Last - -查询`temperature`列、`humidity`列时间戳最大且不为 NULL 的值。 - -```SQL -IoTDB> select last(temperature), last(humidity) from table1; -``` - -执行结果如下: - -```SQL -+-----+-----+ -|_col0|_col1| -+-----+-----+ -| 90.0| 34.8| -+-----+-----+ -Total line number = 1 -It costs 0.211s -``` - -#### 2.3.8 First_by - -查询 `temperature` 列中非 NULL 且时间戳最小的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最小的行的 `humidity` 值。 - -```SQL -IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-26T13:37:00.000+08:00| 35.1| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.269s -``` - -#### 2.3.9 Last_by - -查询`temperature` 列中非 NULL 且时间戳最大的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最大的行的 `humidity` 值。 - -```SQL -IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T14:30:00.000+08:00| 34.8| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.070s -``` - -#### 2.3.10 Max_by - -查询`temperature` 列中最大值所在行的 `time` 值,以及`temperature` 列中最大值所在行的 `humidity` 值。 - -```SQL -IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-30T09:30:00.000+08:00| 35.2| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.172s -``` - -#### 2.3.11 Min_by - -查询`temperature` 列中最小值所在行的 `time` 值,以及`temperature` 列中最小值所在行的 `humidity` 值。 - -```SQL -select min_by(time, temperature), min_by(humidity, temperature) from table1; -``` - -执行结果如下: - -```SQL -+-----------------------------+-----+ -| _col0|_col1| -+-----------------------------+-----+ -|2024-11-29T10:00:00.000+08:00| null| -+-----------------------------+-----+ -Total line number = 1 -It costs 0.244s -``` - - -## 3. 逻辑运算符 - -### 3.1 概述 - -逻辑运算符用于组合条件或否定条件,返回布尔结果(`true` 或 `false`)。 - -以下是常用的逻辑运算符及其描述: - -| 运算符 | 描述 | 示例 | -| ------ | ----------------------------- | ------- | -| AND | 仅当两个值都为 true 时为 true | a AND b | -| OR | 任一值为 true 时为 true | a OR b | -| NOT | 当值为 false 时为 true | NOT a | - -### 3.2 NULL 对逻辑运算符的影响 - -#### 3.2.1 AND 运算符 - -- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 -- 如果 `AND` 运算符的一侧为 `FALSE`,则表达式结果为 `FALSE`。 - -示例: - -```SQL -NULL AND true -- null -NULL AND false -- false -NULL AND NULL -- null -``` - -#### 3.2.2 OR 运算符 - -- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 -- 如果 `OR` 运算符的一侧为 `TRUE`,则表达式结果为 `TRUE`。 - -示例: - -```SQL -NULL OR NULL -- null -NULL OR false -- null -NULL OR true -- true -``` - -##### 3.2.2.1 真值表 - -以下真值表展示了 `NULL` 在 `AND` 和 `OR` 运算符中的处理方式: - -| a | b | a AND b | a OR b | -| ----- | ----- | ------- | ------ | -| TRUE | TRUE | TRUE | TRUE | -| TRUE | FALSE | FALSE | TRUE | -| TRUE | NULL | NULL | TRUE | -| FALSE | TRUE | FALSE | TRUE | -| FALSE | FALSE | FALSE | FALSE | -| FALSE | NULL | FALSE | NULL | -| NULL | TRUE | NULL | TRUE | -| NULL | FALSE | FALSE | NULL | -| NULL | NULL | NULL | NULL | - -#### 3.2.3 NOT 运算符 - -NULL 的逻辑否定仍然是 NULL - -示例: - -```SQL -NOT NULL -- null -``` - -##### 3.2.3.1真值表 - -以下真值表展示了 `NULL` 在 `NOT` 运算符中的处理方式: - -| a | NOT a | -| ----- | ----- | -| TRUE | FALSE | -| FALSE | TRUE | -| NULL | NULL | - - -## 4. 日期和时间函数和运算符 - -### 4.1 now() -> Timestamp - -返回当前时间的时间戳。 - -### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp - -`date_bin` 函数是一种用于处理时间数据的函数,作用是将一个时间戳(Timestamp)舍入到指定的时间间隔(interval)的边界上。 - -**语法:** - -```SQL --- 从时间戳为 0 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 -date_bin(interval,source) - --- 从时间戳为 origin 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 -date_bin(interval,source,origin) - --- interval支持的时间单位有: --- 年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 --- source必须为时间戳类型。 -``` - -**参数:** - -| 参数 | 含义 | -| -------- | ------------------------------------------------------------ | -| interval | 时间间隔支持的时间单位有:年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 | -| source | 待计算时间列,也可以是表达式。必须为时间戳类型。 | -| origin | 起始时间戳 | - -#### 4.2.1 语法约定: - -1. 不传入 `origin` 时,起始时间戳从 1970-01-01T00:00:00Z 开始计算(北京时间为 1970-01-01 08:00:00)。 -2. `interval` 为一个非负数,且必须带上时间单位。`interval` 为 0ms 时,不进行计算,直接返回 `source`。 -3. 当传入 `origin` 或 `source` 为负时,表示纪元时间之前的某个时间点,`date_bin` 会正常计算并返回与该时间点相关的时间段。 -4. 如果 `source` 中的值为 `null`,则返回 `null`。 -5. 不支持月份和非月份时间单位混用,例如 `1 MONTH 1 DAY`,这种时间间隔有歧义。 - -> 假设是起始时间是 2000 年 4 月 30 日进行计算,那么在一个时间间隔后,如果是先算 DAY再算MONTH,则会得到 2000 年 6 月 1 日,如果先算 MONTH 再算 DAY 则会得到 2000 年 5 月 31 日,二者得出的时间日期不同。 - -#### 4.2.2 示例 - -##### 示例数据 - -在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 - -示例 1:不指定起始时间戳 - -```SQL -SELECT - time, - date_bin(1h,time) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.683s -``` - -示例 2:指定起始时间戳 - -```SQL -SELECT - time, - date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.056s -``` - -示例 3:`origin` 为负数的情况 - -```SQL -SELECT - time, - date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.203s -``` - -示例 4:`interval` 为 0 的情况 - -```SQL -SELECT - time, - date_bin(0ms, time) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| time| time_bin| -+-----------------------------+-----------------------------+ -|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| -|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| -|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| -|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| -|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| -|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| -|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| -|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| -|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| -|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| -|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| -|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| -|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| -|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| -|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.107s -``` - -示例 5:`source` 为 null 的情况 - -```SQL -SELECT - arrival_time, - date_bin(1h,arrival_time) as time_bin -FROM - table1; -``` - -结果: - -```Plain -+-----------------------------+-----------------------------+ -| arrival_time| time_bin| -+-----------------------------+-----------------------------+ -| null| null| -|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| -|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| -|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| -|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -| null| null| -|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| -| null| null| -|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| -|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| -| null| null| -|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| -|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| -|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| -|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| -+-----------------------------+-----------------------------+ -Total line number = 18 -It costs 0.319s -``` - -### 4.3 Extract 函数 - -该函数用于提取日期对应部分的值。(V2.0.6 版本起支持) - -#### 4.3.1 语法定义 - -```SQL -EXTRACT (identifier FROM expression) -``` -* 参数说明 - * **expression**: `TIMESTAMP` 类型或时间常量 - * **identifier** :取值范围及对应的返回值见下表 - - | 取值范围 | 返回值类型 | 返回值范围 | - | -------------------------- | ------------- | ------------- | - | `YEAR` | `INT64` | `/` | - | `QUARTER` | `INT64` | `1-4` | - | `MONTH` | `INT64` | `1-12` | - | `WEEK` | `INT64` | `1-53` | - | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | - | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | - | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | - | `HOUR` | `INT64` | `0-23` | - | `MINUTE` | `INT64` | `0-59` | - | `SECOND` | `INT64` | `0-59` | - | `MS` | `INT64` | `0-999` | - | `US` | `INT64` | `0-999` | - | `NS` | `INT64` | `0-999` | - - -#### 4.3.2 使用示例 - -以[示例数据](../Reference/Sample-Data.md)中的 table1 为源数据,查询某段时间每天前12个小时的温度平均值 - -```SQL -IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) -+----------+-----+ -| fmtdate|avgtp| -+----------+-----+ -|2024-11-28| 86.0| -|2024-11-29| 85.0| -|2024-11-30| 90.0| -+----------+-----+ -Total line number = 3 -It costs 0.041s -``` - -`Format` 函数介绍:[Format 函数](../SQL-Manual/Basis-Function.md#_7-2-format-函数) - -`Date_bin` 函数介绍:[Date_bin 函数](../SQL-Manual/Basis-Function.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) - - -## 5. 数学函数和运算符 - -### 5.1 数学运算符 - -| **运算符** | **描述** | -| ---------- | ------------------------ | -| + | 加法 | -| - | 减法 | -| * | 乘法 | -| / | 除法(整数除法执行截断) | -| % | 模(余数) | -| - | 取反 | - -### 5.2 数学函数 - -| 函数名 | 描述 | 输入 | 输出 | 用法 | -|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------| ---------------------- | ---------- | -| sin | 正弦函数 | double、float、INT64、INT32 | double | sin(x) | -| cos | 余弦函数 | double、float、INT64、INT32 | double | cos(x) | -| tan | 正切函数 | double、float、INT64、INT32 | double | tan(x) | -| asin | 反正弦函数 | double、float、INT64、INT32 | double | asin(x) | -| acos | 反余弦函数 | double、float、INT64、INT32 | double | acos(x) | -| atan | 反正切函数 | double、float、INT64、INT32 | double | atan(x) | -| sinh | 双曲正弦函数 | double、float、INT64、INT32 | double | sinh(x) | -| cosh | 双曲余弦函数 | double、float、INT64、INT32 | double | cosh(x) | -| tanh | 双曲正切函数 | double、float、INT64、INT32 | double | tanh(x) | -| degrees | 将弧度角 x 转换为度 | double、float、INT64、INT32 | double | degrees(x) | -| radians | 将度转换为弧度 | double、float、INT64、INT32 | double | radians(x) | -| abs | 绝对值 | double、float、INT64、INT32 | 返回与输入类型相同的值 | abs(x) | -| sign | 返回 x 的符号函数,即:如果参数为 0,则返回 0,如果参数大于 0,则返回 1,如果参数小于 0,则返回 -1。对于 double/float 类型的参数,函数还会返回:如果参数为 NaN,则返回 NaN,如果参数为 +Infinity,则返回 1.0,如果参数为 -Infinity,则返回 -1.0。 | double、float、INT64、INT32 | 返回与输入类型相同的值 | sign(x) | -| ceil | 返回 x 向上取整到最近的整数。 | double、float、INT64、INT32 | double | ceil(x) | -| floor | 返回 x 向下取整到最近的整数。 | double、float、INT64、INT32 | double | floor(x) | -| exp | 返回欧拉数 e 的 x 次幂。 | double、float、INT64、INT32 | double | exp(x) | -| ln | 返回 x 的自然对数。 | double、float、INT64、INT32 | double | ln(x) | -| log10 | 返回 x 的以 10 为底的对数。 | double、float、INT64、INT32 | double | log10(x) | -| round | 返回 x 四舍五入到最近的整数。 | double、float、INT64、INT32 | double | round(x) | -| round | 返回 x 四舍五入到 d 位小数。 | double、float、INT64、INT32 | double | round(x, d) | -| sqrt | 返回 x 的平方根。 | double、float、INT64、INT32 | double | sqrt(x) | -| e | 自然指数 | | double | e() | -| pi | π | | double | pi() | - - -## 6. 位运算函数 - -> V 2.0.6 版本起支持 - -示例原始数据如下: - -```SQL -IoTDB:database1> select * from bit_table -+-----------------------------+---------+------+-----+ -| time|device_id|length|width| -+-----------------------------+---------+------+-----+ -|2025-10-29T15:59:42.957+08:00| d1| 14| 12| -|2025-10-29T15:58:59.399+08:00| d3| 15| 10| -|2025-10-29T15:59:32.769+08:00| d2| 13| 12| -+-----------------------------+---------+------+-----+ - ---建表语句 -CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); - ---写入数据 -INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); -``` - -### 6.1 bit\_count(num, bits) - -`bit_count(num, bits)` 函数用于统计整数 `num`在指定位宽 `bits`下的二进制表示中 1 的个数。 - -#### 6.1.1 语法定义 - -```SQL -bit_count(num, bits) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * **​num:​**任意整型数值(int32 或者 int64) - * **​bits:​**整型数值,取值范围为2\~64 - -注意:如果 bits 位数不够表示 num,会报错(此处是​**有符号补码**​):`Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` - -* 调用方式 - * 两个具体数值:`bit_count(9, 64)` - * 列与数值:`bit_count(column1, 64)` - * 两列之间:`bit_count(column1, column2)` - -#### 6.1.2 使用示例 - -```SQL --- 两个具体数值 -IoTDB:database1> select distinct bit_count(2,8) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ --- 两个具体数值 -IoTDB:database1> select distinct bit_count(-5,8) from bit_table -+-----+ -|_col0| -+-----+ -| 7| -+-----+ ---列与数值 -IoTDB:database1> select length,bit_count(length,8) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 3| -| 15| 4| -| 13| 3| -+------+-----+ ---bits位数不够 -IoTDB:database1> select length,bit_count(length,2) from bit_table -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. -``` - -### 6.2 bitwise\_and(x, y) - -`bitwise_and(x, y)`函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑与操作,并返回其按位与(bitwise AND)的运算结果。 - -#### 6.2.1 语法定义 - -```SQL -bitwise_and(x, y) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 两个具体数值:`bitwise_and(19, 25)` - * 列与数值:`bitwise_and(column1, 25)` - * 两列之间:`bitwise_and(column1, column2)` - -#### 6.2.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_and(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 17| -+-----+ ---列与数值 -IoTDB:database1> select length, bitwise_and(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 8| -| 15| 9| -| 13| 9| -+------+-----+ ---俩列之间 -IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 12| -| 15| 10| 10| -| 13| 12| 12| -+------+-----+-----+ -``` - -### 6.3 bitwise\_not(x) - -`bitwise_not(x)` 函数基于二进制补码表示法,对整数 x 的每一位进行逻辑非操作,并返回其按位取反(bitwise NOT)的运算结果。 - -#### 6.3.1 语法定义 - -```SQL -bitwise_not(x) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 具体数值:`bitwise_not(5)` - * 单列操作:`bitwise_not(column1)` - -#### 6.3.2 使用示例 - -```SQL --- 具体数值 -IoTDB:database1> select distinct bitwise_not(5) from bit_table -+-----+ -|_col0| -+-----+ -| -6| -+-----+ --- 单列 -IoTDB:database1> select length, bitwise_not(length) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| -15| -| 15| -16| -| 13| -14| -+------+-----+ -``` - -### 6.4 bitwise\_or(x, y) - -`bitwise_or(x,y)` 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑或操作,并返回其按位或(bitwise OR)的运算结果。 - -#### 6.4.1 语法定义 - -```SQL -bitwise_or(x, y) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 两个具体数值:`bitwise_or(19, 25)` - * 列与数值:`bitwise_or(column1, 25)` - * 两列之间:`bitwise_or(column1, column2)` - -#### 6.4.2 使用示例 - -```SQL --- 两个具体数值 -IoTDB:database1> select distinct bitwise_or(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 27| -+-----+ --- 列与数值 -IoTDB:database1> select length,bitwise_or(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 31| -| 15| 31| -| 13| 29| -+------+-----+ --- 两列之间 -IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 14| -| 15| 10| 15| -| 13| 12| 13| -+------+-----+-----+ -``` - -### 6.5 bitwise\_xor(x, y) - -bitwise\_xor(x,y) 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑异或操作,并返回其按位异或(bitwise XOR)的运算结果。异或规则:相同为0,不同为1。 - -#### 6.5.1 语法定义 - -```SQL -bitwise_xor(x, y) -> INT64 --返回结果类型为 Int64 -``` - -* 参数说明 - * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 -* 调用方式 - * 两个具体数值:`bitwise_xor(19, 25)` - * 列与数值:`bitwise_xor(column1, 25)` - * 两列之间:`bitwise_xor(column1, column2)` - -#### 6.5.2 使用示例 - -```SQL --- 两个具体数值 -IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table -+-----+ -|_col0| -+-----+ -| 10| -+-----+ --- 列与数值 -IoTDB:database1> select length,bitwise_xor(length,25) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 23| -| 15| 22| -| 13| 20| -+------+-----+ --- 两列之间 -IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 2| -| 15| 10| 5| -| 13| 12| 1| -+------+-----+-----+ -``` - -### 6.6 bitwise\_left\_shift(value, shift) - -`bitwise_left_shift(value, shift)` 函数返回将整数 `value`的二进制表示左移 `shift`位后的结果。左移操作将二进制位向高位方向移动,右侧空出的位用 0 填充,左侧溢出的位直接丢弃。等价于: `value << shift`。 - -#### 6.6.1 语法定义 - -```SQL -bitwise_left_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 -``` - -* 参数说明 - * ​**value**​: 要左移的整数值,必须是 Int32 或 Int64 数据类型 - * ​**shift**​: 左移的位数,必须是 Int32 或 Int64 数据类型 -* 调用方式 - * 两个具体数值:`bitwise_left_shift(1, 2)` - * 列与数值:`bitwise_left_shift(column1, 2)` - * 两列之间:`bitwise_left_shift(column1, column2)` - -#### 6.6.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table -+-----+ -|_col0| -+-----+ -| 4| -+-----+ --- 列与数值 -IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 56| -| 15| 60| -| 13| 52| -+------+-----+ --- 两列之间 -IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - -### 6.7 bitwise\_right\_shift(value, shift) - -`bitwise_right_shift(value, shift)`函数返回将整数 `value`的二进制表示逻辑右移(无符号右移) `shift`位后的结果。逻辑右移操作将二进制位向低位方向移动,左侧空出的高位用 0 填充,右侧溢出的低位直接丢弃。 - -#### 6.7.1 语法定义 - -```SQL -bitwise_right_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 -``` - -* 参数说明 - * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 - * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 -* 调用方式 - * 两个具体数值:`bitwise_right_shift(8, 3)` - * 列与数值:`bitwise_right_shift(column1, 3)` - * 两列之间:`bitwise_right_shift(column1, column2)` - -#### 6.7.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table -+-----+ -|_col0| -+-----+ -| 1| -+-----+ ---列与数值 -IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---两列之间 -IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -``` - -### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) - -`bitwise_right_shift_arithmetic(value, shift)`函数返回将整数 `value`的二进制表示算术右移 `shift`位后的结果。算术右移操作将二进制位向低位方向移动,右侧溢出的低位直接丢弃,左侧空出的高位用符号位填充(正数补0,负数补1),以保持数值的符号不变。 - -#### 6.8.1 语法定义 - -```SQL -bitwise_right_shift_arithmetic(value, shift) -> [same as value]--返回结果类型与value数据类型相同 -``` - -* 参数说明 - * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 - * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 -* 调用方式: - * 两个具体数值:`bitwise_right_shift_arithmetic(12, 2)` - * 列与数值:`bitwise_right_shift_arithmetic(column1, 64)` - * 两列之间:`bitwise_right_shift_arithmetic(column1, column2)` - -#### 6.8.2 使用示例 - -```SQL ---两个具体数值 -IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table -+-----+ -|_col0| -+-----+ -| 3| -+-----+ --- 列与数值 -IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table -+------+-----+ -|length|_col1| -+------+-----+ -| 14| 1| -| 15| 1| -| 13| 1| -+------+-----+ ---两列之间 -IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table -+------+-----+-----+ -|length|width|_col2| -+------+-----+-----+ -| 14| 12| 0| -| 15| 10| 0| -| 13| 12| 0| -+------+-----+-----+ -``` - -## 7. 条件表达式 - -### 7.1 CASE 表达式 - -CASE 表达式有两种形式:简单形式、搜索形式 - -#### 7.1.1 简单形式 - -简单形式从左到右搜索每个值表达式,直到找到一个与表达式相等的值: - -```SQL -CASE expression - WHEN value THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -如果找到匹配的值,则返回相应的结果。如果没有找到匹配项,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: - -```SQL -SELECT a, - CASE a - WHEN 1 THEN 'one' - WHEN 2 THEN 'two' - ELSE 'many' - END -``` - -#### 7.1.2 搜索形式 - -搜索形式从左到右评估每个布尔条件,直到找到一个为真的条件,并返回相应的结果: - -```SQL -CASE - WHEN condition THEN result - [ WHEN ... ] - [ ELSE result ] -END -``` - -如果没有条件为真,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: - -```SQL -SELECT a, b, - CASE - WHEN a = 1 THEN 'aaa' - WHEN b = 2 THEN 'bbb' - ELSE 'ccc' - END -``` - -### 7.2 COALESCE 函数 - -返回参数列表中的第一个非空值。 - -```SQL -coalesce(value1, value2[, ...]) -``` - -## 8. 转换函数 - -### 8.1 转换函数 - -#### 8.1.1 cast(value AS type) → type - -1. 显式地将一个值转换为指定类型。 -2. 可以用于将字符串(varchar)转换为数值类型,或数值转换为字符串类型,V2.0.8-beta 版本起支持 OBJECT 类型强转成 STRING 类型。 -3. 如果转换失败,将抛出运行时错误。 - -示例: - -```SQL -SELECT * - FROM table1 - WHERE CAST(time AS DATE) - IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); -``` - -#### 8.1.2 try_cast(value AS type) → type - -1. 与 `cast()` 类似。 -2. 如果转换失败,则返回 `null`。 - -示例: - -```SQL -SELECT * - FROM table1 - WHERE try_cast(time AS DATE) - IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); -``` - -### 8.2 Format 函数 -该函数基于指定的格式字符串与输入参数,生成并返回格式化后的字符串输出。其功能与 Java 语言中的`String.format` 方法及 C 语言中的`printf`函数相类似,支持开发者通过占位符语法构建动态字符串模板,其中预设的格式标识符将被传入的对应参数值精准替换,最终形成符合特定格式要求的完整字符串。 - -#### 8.2.1 语法介绍 - -```SQL -format(pattern,...args) -> String -``` - -**参数定义** - -* `pattern`: 格式字符串,可包含静态文本及一个或多个格式说明符(如 `%s`, `%d` 等),或任意返回类型为 `STRING/TEXT` 的表达式。 -* `args`: 用于替换格式说明符的输入参数。需满足以下条件: - * 参数数量 ≥ 1 - * 若存在多个参数,以逗号`,`分隔(如 `arg1,arg2`) - * 参数总数可多于 `pattern` 中的占位符数量,但不可少于,否则触发异常 - -**返回值** - -* 类型为 `STRING` 的格式化结果字符串 - -#### 8.2.2 使用示例 - -1. 格式化浮点数 - -```SQL -IoTDB:database1> select format('%.5f',humidity) from table1 where humidity = 35.4 -+--------+ -| _col0| -+--------+ -|35.40000| -+--------+ -``` - -2. 格式化整数 - -```SQL -IoTDB:database1> select format('%03d',8) from table1 limit 1 -+-----+ -|_col0| -+-----+ -| 008| -+-----+ -``` - -3. 格式化日期和时间戳 - -* Locale-specific日期 - -```SQL -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) from table1 limit 1 -+--------------------+ -| _col0| -+--------------------+ -|星期一, 一月 1, 2024| -+--------------------+ -``` - -* 去除时区信息 - -```SQL -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 -+-----------------------+ -| _col0| -+-----------------------+ -|2024-01-01 00:00:00.000| -+-----------------------+ -``` - -* 获取秒级时间戳精度 - -```SQL -IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 -+-------------------+ -| _col0| -+-------------------+ -|2024-01-01 00:00:00| -+-------------------+ -``` - -* 日期符号说明如下 - -| **符号** | **​ 描述** | -| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| 'H' | 24 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 00 - 23。 | -| 'I' | 12 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 01 - 12。 | -| 'k' | 24 小时制的小时数,i.e. 0 - 23。 | -| 'l' | 12 小时制的小时数,i.e. 1 - 12。 | -| 'M' | 小时内的分钟,格式为两位数,必要时加上前导零,i.e. 00 - 59。 | -| 'S' | 分钟内的秒数,格式为两位数,必要时加上前导零,i.e. 00 - 60(“60 ”是支持闰秒所需的特殊值)。 | -| 'L' | 秒内毫秒,格式为三位数,必要时加前导零,i.e. 000 - 999。 | -| 'N' | 秒内的纳秒,格式为九位数,必要时加前导零,i.e. 000000000 - 999999999。 | -| 'p' | 当地特定的[上午或下午](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getAmPmStrings())标记,小写,如 “am ”或 “pm”。使用转换前缀 “T ”会强制输出为大写。 | -| 'z' | 从格林尼治标准时间偏移的[RFC 822](http://www.ietf.org/rfc/rfc0822.txt)式数字时区,例如 -0800。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是 Java 虚拟机此实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。 | -| 'Z' | 表示时区缩写的字符串。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是此 Java 虚拟机实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。Formatter 的时区将取代参数的时区(如果有)。 | -| 's' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的秒数,i.e. Long.MIN\_VALUE/1000 至 Long.MAX\_VALUE/1000。 | -| 'Q' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的毫秒数,i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | - -* 用于格式化常见的日期/时间组成的转换字符说明如下 - -| **符号** | **描述** | -| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 'B' | 特定于区域设置[的完整月份名称](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getMonths()),例如 “January”、“February”。 | -| 'b' | 当地特定月份的缩写名称,如"1 月"、"2 月"。 | -| 'h' | 与"b "相同。 | -| 'A' | 一周中某一天在当地的全称,如"星期日"、"星期一"。 | -| 'a' | 当地特有的星期简短名称,例如"星期日"、"星期一 | -| 'C' | 四位数年份除以100,格式为两位数,必要时加上前导零,即00 - 99 | -| 'Y' | 年份,格式为至少四位数,必要时加上前导零,例如0092相当于公历92年。 | -| 'y' | 年份的最后两位数,格式为必要的前导零,即00 - 99。 | -| 'j' | 年号,格式为三位数,必要时加前导零,例如公历为001 - 366。 | -| 'm' | 月份,格式为两位数,必要时加前导零,即01 - 13。 | -| 'd' | 月日,格式为两位数,必要时加前导零,即01 - 31 | -| 'e' | 月日,格式为两位数,即1 - 31。 | - -4. 格式化字符串 - -```SQL -IoTDB:database1> SELECT format('The measurement status is :%s',status) FROM table2 limit 1 -+-------------------------------+ -| _col0| -+-------------------------------+ -|The measurement status is :true| -+-------------------------------+ -``` - -5. 格式化百分号 - -```SQL -IoTDB:database1> SELECT format('%s%%', 99.9) from table1 limit 1 -+-----+ -|_col0| -+-----+ -|99.9%| -+-----+ -``` - -#### 8.2.3 **格式转换失败场景说明** - -1. 类型不匹配错误 - -* 时间戳类型冲突 若格式说明符中包含时间相关标记(如 `%Y-%m-%d`),但参数提供: - * 非 `DATE`/`TIMESTAMP` 类型值 - * 或涉及日期细粒度单位(如 `%H` 小时、`%M` 分钟)时,参数仅支持 `TIMESTAMP` 类型,否则将抛出类型异常 - -```SQL --- 示例1 -IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) - --- 示例2 -IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) -``` - -* 浮点数类型冲突 若使用 `%f` 等浮点格式说明符,但参数提供非数值类型(如字符串、布尔值),将触发类型转换错误 - -```SQL -IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) -``` - -2. 参数数量不匹配错误 - -* 实际提供的参数数量 必须等于或大于 格式字符串中格式说明符的数量 -* 若参数数量少于格式说明符数量,将抛出 `ArgumentCountMismatch` 异常 - -```SQL -IoTDB:database1> select format('%.5f %03d', humidity) from table1 where humidity = 35.4 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') -``` - -3. 无效调用错误 - -* 当函数参数满足以下任一条件时,视为非法调用: - * 参数总数 小于 2(必须包含格式字符串及至少一个参数) - * 格式字符串(`pattern`)类型非 `STRING/TEXT` - -```SQL --- 示例1 -IoTDB:database1> select format('%s') from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. - ---示例2 -IoTDB:database1> select format(123, humidity) from table1 limit 1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. -``` - - - -## 9. 字符串函数和操作符 - -### 9.1 字符串操作符 - -#### 9.1.1 || 操作符 - -`||` 操作符用于字符串连接,功能与 `concat` 函数相同。 - -#### 9.1.2 LIKE 语句 - -`LIKE` 语句用于模式匹配,具体用法在[模式匹配:LIKE](#1-like-运算符) 中有详细文档。 - -### 9.2 字符串函数 - -| 函数名 | 描述 | 输入 | 输出 | 用法 | -| ----------- |---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------ | ------------------------------------------------------------ | -| length | 返回字符串的字符长度,而不是字符数组的长度。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | INT32 | length(string) | -| upper | 将字符串中的字母转换为大写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | upper(string) | -| lower | 将字符串中的字母转换为小写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | lower(string) | -| trim | 从源字符串中删除指定的开头和/或结尾字符。 | 支持三个参数**specification(可选)**:指定从哪边去掉字符,可以是:`BOTH`:两边都去掉(默认)。`LEADING`:只去掉开头的字符。`TRAILING`:只去掉结尾的字符。**trimcharacter(可选)**:要去掉的字符,如果没指定,默认去掉空格。**string**:要处理的字符串。 | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) 示例:`trim('!' FROM '!foo!');` —— `'foo'` | -| strpos | 返回子字符串在字符串中第一次出现的起始位置。位置从 1 开始计数。如果未找到,返回 0。注意:起始位置是基于字符而不是字节数组确定的。 | 仅支持两个参数,类型可以是字符串或文本。**sourceStr**:要搜索的字符串。**subStr**:要找的子字符串。 | INT32 | strpos(sourceStr, subStr) | -| starts_with | 测试子字符串是否是字符串的前缀。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串,类型可以是字符串或文本。**prefix**:前缀子字符串,类型可以是字符串或文本。 | Boolean | starts_with(sourceStr, prefix) | -| ends_with | 测试字符串是否以指定的后缀结束。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串。**suffix**:后缀子字符串。 | Boolean | ends_with(sourceStr, suffix) | -| concat | 返回字符串 `string1`、`string2`、...、`stringN` 的连接结果。功能与连接操作符 `\|\|` 相同。 | 至少两个参数,所有参数类型必须是字符串或文本。 | String | concat(str1, str2, ...) 或 str1 \|\| str2 ... | -| strcmp | 比较两个字符串的字母序。 | 支持两个参数,两个参数类型必须是字符串或文本。**string1**:第一个要比较的字符串。**string2**:第二个要比较的字符串。 | 返回一个整数值INT32如果 `str1 < str2`,返回 `-1`如果 `str1 = str2`,返回 `0`如果 `str1 > str2`,返回 `1`如果 `str1` 或 `str2` 为 `NULL`,返回 `NULL` | strcmp(str1, str2) | -| replace | 从字符串中删除所有 `search` 的实例。 | 支持两个参数,可以是字符串或文本类型。**string**:原始字符串,要从中删除内容的字符串。**search**:要删除的子字符串。 | String | replace(string, string) | -| replace | 将字符串中所有 `search` 的实例替换为 `replace`。 | 支持三个参数,可以是字符串或文本类型。**string**:原始字符串,要从中替换内容的字符串。**search**:要替换掉的子字符串。**replace**:用来替换的新字符串。 | String | replace(string, string, string) | -| substring | 从指定位置提取字符到字符串末尾。需要注意的是,起始位置是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持两个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。 | String:返回一个字符串,从 `start_index` 位置开始到字符串末尾的所有字符。**注意事项**:`start_index` 从 1 开始,即数组的第 0 个位置是 1参数为 null时,返回 `null`start_index 大于字符串长度时,结果报错。 | substring(string from start_index)或 substring(string, start_index) | -| substring | 从一个字符串中提取从指定位置开始、指定长度的子字符串注意:起始位置和长度是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持三个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。**length**:要提取的子字符串的长度。 | String:返回一个字符串,从 `start_index` 位置开始,提取 `length` 个字符。**注意事项**:参数为 null时,返回 `null`如果 `start_index` 大于字符串的长度,结果报错。如果 `length` 小于 0,结果报错。极端情况,`start_index + length` 超过 `int.MAX` 并变成负数,将导致异常结果。 | substring(string from start_index for length) 或 substring(string, start_index, length) | - -## 10. 模式匹配函数 - -### 10.1 LIKE 运算符 - -#### 10.1.1 用途 - -`LIKE` 运算符用于将值与模式进行比较。它通常用于 `WHERE` 子句中,用于匹配字符串中的特定模式。 - -#### 10.1.2 语法 - -```SQL -... column [NOT] LIKE 'pattern' ESCAPE 'character'; -``` - -#### 10.1.3 匹配规则 - -- 匹配字符是区分大小写的。 -- 模式支持两个匹配符号: - - `_`:匹配任意单个字符。 - - `%`:匹配0个或多个字符。 - -#### 10.1.4 注意事项 - -- `LIKE` 模式匹配总是覆盖整个字符串。如果需要匹配字符串中的任意位置,模式必须以 `%` 开头和结尾。 -- 如果需要匹配 `%` 或 `_` 作为普通字符,必须使用转义字符。 - -#### 10.1.5 示例 - -示例 1:匹配以特定字符开头的字符串 - -- **说明**:查找所有以字母 `E` 开头的名称,例如 `Europe`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'E%'; -``` - -示例 2:排除特定模式 - -- **说明**:查找所有不以字母 `E` 开头的名称。 - -```SQL -SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; -``` - -示例 3:匹配特定长度的字符串 - -- **说明**:查找所有以 `A` 开头、以 `a` 结尾且中间有两个字符的名称,例如 `Asia`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'A__a'; -``` - -示例 4:转义特殊字符 - -- **说明**:查找所有以 `South_` 开头的名称。这里使用了转义字符 `\` 来转义 `_` 等特殊字符,例如`South_America`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; -``` - -示例 5:匹配转义字符本身 - -- **说明**:如果需要匹配转义字符本身,可以使用双转义字符 `\\`。 - -```SQL -SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; -``` - -### 10.2 regexp_like 函数 - -#### 10.2.1 用途 - -`regexp_like` 函数用于评估正则表达式模式,并确定该模式是否包含在字符串中。 - -#### 10.2.2 语法 - -```SQL -regexp_like(string, pattern); -``` - -#### 10.2.3 注意事项 - -- `regexp_like` 的模式只需包含在字符串中,而不需要匹配整个字符串。 -- 如果需要匹配整个字符串,可以使用正则表达式的锚点 `^` 和 `$`。 -- `^` 表示“字符串的开头”,`$` 表示“字符串的结尾”。 -- 正则表达式采用 Java 定义的正则语法,但存在以下需要注意的例外情况: - - **多行模式** - 1. 启用方式:`(?m)`。 - 2. 只识别`\n`作为行终止符。 - 3. 不支持`(?d)`标志,且禁止使用。 - - **不区分大小写匹配** - 1. 启用方式:`(?i)`。 - 2. 基于Unicode规则,不支持上下文相关和本地化匹配。 - 3. 不支持`(?u)`标志,且禁止使用。 - - **字符类** - 1. 在字符类(如`[A-Z123]`)中,`\Q`和`\E`不被支持,被视为普通字面量。 - - **Unicode字符类(**`\p{prop}`**)** - 1. **名称下划线**:名称中的所有下划线必须删除(如`OldItalic`而非`Old_Italic`)。 - 2. **文字(Scripts)**:直接指定,无需`Is`、`script=`或`sc=`前缀(如`\p{Hiragana}`)。 - 3. **区块(Blocks)**:必须使用`In`前缀,不支持`block=`或`blk=`前缀(如`\p{InMongolian}`)。 - 4. **类别(Categories)**:直接指定,无需`Is`、`general_category=`或`gc=`前缀(如`\p{L}`)。 - 5. **二元属性(Binary Properties)**:直接指定,无需`Is`(如`\p{NoncharacterCodePoint}`)。 - -#### 10.2.4 示例 - -示例 1:匹配包含特定模式的字符串 - -```SQL -SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true -``` - -- **说明**:检查字符串 `'1a 2b 14m'` 是否包含模式 `\d+b`。 - - `\d+` 表示“一个或多个数字”。 - - `b` 表示字母 `b`。 - - 在 `'1a 2b 14m'` 中,`2b` 符合这个模式,所以返回 `true`。 - -示例 2:匹配整个字符串 - -```SQL -SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false -``` - -- **说明**:检查字符串 `'1a 2b 14m'` 是否完全匹配模式 `^\\d+b$`。 - - `\d+` 表示“一个或多个数字”。 - - `b` 表示字母 `b`。 - - `'1a 2b 14m'` 并不符合这个模式,因为它不是从数字开始,也不是以 `b` 结束,所以返回 `false`。 - -## 11. 时序分窗函数 - -原始示例数据如下: - -```SQL -IoTDB> SELECT * FROM bid; -+-----------------------------+--------+-----+ -| time|stock_id|price| -+-----------------------------+--------+-----+ -|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+--------+-----+ - --- 创建语句 -CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); --- 插入数据 -INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); -``` - -### 11.1 HOP - -#### 11.1.1 功能描述 - -HOP 函数用于按时间分段分窗分析,识别每一行数据所属的时间窗口。该函数通过指定固定窗口大小(size)和窗口滑动步长(SLIDE),将数据按时间戳分配到所有与其时间戳重叠的窗口中。若窗口之间存在重叠(步长 < 窗口大小),数据会自动复制到多个窗口。 - -#### 11.1.2 函数定义 - -```SQL -HOP(data, timecol, size, slide[, origin]) -``` - -#### 11.1.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | --------------------------------- | -------------------- | -| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | -| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小 | -| SLIDE | 标量参数 | 长整数类型 | 窗口滑动步长 | -| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | - -#### 11.1.4 返回结果 - -HOP 函数的返回结果列包含: - -* window\_start: 窗口开始时间(闭区间) -* window\_end: 窗口结束时间(开区间) -* 映射列:DATA 参数的所有输入列 - -#### 11.1.5 使用示例 - -```SQL -IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.2 SESSION - -#### 11.2.1 功能描述 - -SESSION 函数用于按会话间隔对数据进行分窗。系统逐行检查与前一行的时间间隔,小于阈值(GAP)则归入当前窗口,超过则归入下一个窗口。 - -#### 11.2.2 函数定义 - -```SQL -SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) -``` -#### 11.2.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | -------------------------- | ---------------------------------------- | -| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | -| TIMECOL | 标量参数 | 字符串类型默认值:'time' | 时间列名 -| -| GAP | 标量参数 | 长整数类型 | 会话间隔阈值 | - -#### 11.2.4 返回结果 - -SESSION 函数的返回结果列包含: - -* window\_start: 会话窗口内的第一条数据的时间 -* window\_end: 会话窗口内的最后一条数据的时间 -* 映射列:DATA 参数的所有输入列 - -#### 11.2.5 使用示例 - -```SQL -IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY SESSION -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.3 VARIATION - -#### 11.3.1 功能描述 - -VARIATION 函数用于按数据差值分窗,将第一条数据作为首个窗口的基准值,每个数据点会与基准值进行差值运算,如果差值小于给定的阈值(delta)则加入当前窗口;如果超过阈值,则分为下一个窗口,将该值作为下一个窗口的基准值。 - -#### 11.3.2 函数定义 - -```sql -VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) -``` - -#### 11.3.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| -------- | ---------- | -------------------------- | ---------------------------------------- | -| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | -| COL | 标量参数 | 字符串类型 | 标识对哪一列计算差值 | -| DELTA | 标量参数 | 浮点数类型 | 差值阈值 | - -#### 11.3.4 返回结果 - -VARIATION 函数的返回结果列包含: - -* window\_index: 窗口编号 -* 映射列:DATA 参数的所有输入列 - -#### 11.3.5 使用示例 - -```sql -IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY VARIATION -IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.4 CAPACITY - -#### 11.4.1 功能描述 - -CAPACITY 函数用于按数据点数(行数)分窗,每个窗口最多有 SIZE 行数据。 - -#### 11.4.2 函数定义 - -```sql -CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) -``` - -#### 11.4.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| -------- | ---------- | -------------------------- | ---------------------------------------- | -| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小 | - -#### 11.4.4 返回结果 - -CAPACITY 函数的返回结果列包含: - -* window\_index: 窗口编号 -* 映射列:DATA 参数的所有输入列 - -#### 11.4.5 使用示例 - -```sql -IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); -+------------+-----------------------------+--------+-----+ -|window_index| time|stock_id|price| -+------------+-----------------------------+--------+-----+ -| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| -| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| -| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| -| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY COUNT -IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; -+-----------------------------+-----------------------------+--------+-----+ -| start_time| end_time|stock_id| avg| -+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| -|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| -|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+--------+-----+ -``` - -### 11.5 TUMBLE - -#### 11.5.1 功能描述 - -TUMBLE 函数用于通过时间属性字段为每行数据分配一个窗口,滚动窗口的大小固定且不重复。 - -#### 11.5.2 函数定义 - -```sql -TUMBLE(data, timecol, size[, origin]) -``` -#### 11.5.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | --------------------------------- | -------------------- | -| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | -| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小,需为正数 | -| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | - -#### 11.5.4 返回结果 - -TUBMLE 函数的返回结果列包含: - -* window\_start: 窗口开始时间(闭区间) -* window\_end: 窗口结束时间(开区间) -* 映射列:DATA 参数的所有输入列 - -#### 11.5.5 使用示例 - -```SQL -IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` - -### 11.6 CUMULATE - -#### 11.6.1 功能描述 - -Cumulate 函数用于从初始的窗口开始,创建相同窗口开始但窗口结束步长不同的窗口,直到达到最大的窗口大小。每个窗口包含其区间内的元素。例如:1小时步长,24小时大小的累计窗口,每天可以获得如下这些窗口:`[00:00, 01:00)`,`[00:00, 02:00)`,`[00:00, 03:00)`, …, `[00:00, 24:00)` - -#### 11.6.2 函数定义 - -```sql -CUMULATE(data, timecol, size, step[, origin]) -``` - -#### 11.6.3 参数说明 - -| 参数名 | 参数类型 | 参数属性 | 描述 | -| --------- | ---------- | --------------------------------- | -------------------------------------------- | -| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | -| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | -| SIZE | 标量参数 | 长整数类型 | 窗口大小,SIZE必须是STEP的整数倍,需为正数 | -| STEP | 标量参数 | 长整数类型 | 窗口步长,需为正数 | -| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | - -> 注意:size 如果不是 step 的整数倍,则会报错`Cumulative table function requires size must be an integral multiple of step` - -#### 11.6.4 返回结果 - -CUMULATE函数的返回结果列包含: - -* window\_start: 窗口开始时间(闭区间) -* window\_end: 窗口结束时间(开区间) -* 映射列:DATA 参数的所有输入列 - -#### 11.6.5 使用示例 - -```sql -IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -| window_start| window_end| time|stock_id|price| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| -+-----------------------------+-----------------------------+-----------------------------+--------+-----+ - --- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME -IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; -+-----------------------------+-----------------------------+--------+------------------+ -| window_start| window_end|stock_id| avg| -+-----------------------------+-----------------------------+--------+------------------+ -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| -|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| -|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| -+-----------------------------+-----------------------------+--------+------------------+ -``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function_apache.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function_apache.md new file mode 100644 index 000000000..a3e47d977 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function_apache.md @@ -0,0 +1,2019 @@ + + +# 基础函数 + +## 1. 比较函数和运算符 + +### 1.1 基本比较运算符 + +比较运算符用于比较两个值,并返回比较结果(true或false)。 + +| 运算符 | 描述 | +| ------ | ---------- | +| < | 小于 | +| > | 大于 | +| <= | 小于或等于 | +| >= | 大于或等于 | +| = | 等于 | +| <> | 不等于 | +| != | 不等于 | + +#### 1.1.1 比较规则: + +1. 所有类型都可以与自身进行比较 +2. 数值类型(INT32, INT64, FLOAT, DOUBLE, TIMESTAMP)之间可以相互比较 +3. 字符类型(STRING, TEXT)之间也可以相互比较 +4. 除上述规则外的类型进行比较时,均会报错。 + +### 1.2 BETWEEN 运算符 + +1. `BETWEEN` 操作符用于判断一个值是否在指定的范围内。 +2. `NOT BETWEEN`操作符用于判断一个值是否不在指定范围内。 +3. `BETWEEN` 和 `NOT BETWEEN` 操作符可用于评估任何可排序的类型。 +4. `BETWEEN` 和 `NOT BETWEEN` 的值、最小值和最大值参数必须是同一类型,否则会报错。 + +**语法**: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +示例 1 :BETWEEN + +```SQL +-- 查询 temperature 在 85.0 和 90.0 之间的记录 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +示例 2 :NOT BETWEEN + +```SQL +3-- 查询 humidity 不在 35.0 和 40.0 之间的记录 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL 运算符 + +1. `IS NULL` 和 `IS NOT NULL` 运算符用于判断一个值是否为 NULL。 +2. 这两个运算符适用于所有数据类型。 + +示例1:查询 temperature 为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +示例2:查询 humidity 不为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN 运算符 + +1. `IN` 操作符可用于 `WHERE` 子句中,比较一列中的一些值。 +2. 这些值可以由静态数组、标量表达式。 + +**语法:** + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +示例 1:静态数组:查询 region 为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region IN ('北京', '上海'); +--等价于 +SELECT * FROM region WHERE name = '北京' OR name = '上海'; +``` + +示例 2:标量表达式:查询 temperature 在特定值中的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +示例 3:查询 region 不为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('北京', '上海'); +``` + +### 1.5 GREATEST 和 LEAST + +`Greatest` 函数用于返回参数列表中的最大值,`Least` 函数用于返回参数列表中的最小值,返回数据类型与输入类型相同。 +1. 空值处理:若所有参数均为 NULL,则返回 NULL。 +2. 参数要求:必须提供 至少 2 个参数。 +3. 类型约束:仅支持 相同数据类型 的参数比较。 +4. 支持类型: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**语法:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**示例:** + +```sql +-- 查询 table2 中 temperature 和 humidity 的最大记录 +SELECT GREATEST(temperature,humidity) FROM table2; + +-- 查询 table2 中 temperature 和 humidity 的最小记录 +SELECT LEAST(temperature,humidity) FROM table2; +``` + + +## 2. 聚合函数 + +### 2.1 概述 + +1. 聚合函数是多对一函数。它们对一组值进行聚合计算,得到单个聚合结果。 +2. 除了 `COUNT()`之外,其他所有聚合函数都忽略空值,并在没有输入行或所有值为空时返回空值。 例如,`SUM()` 返回 null 而不是零,而 `AVG()` 在计数中不包括 null 值。 + +### 2.2 支持的聚合函数 + +| 函数名 | 功能描述 | 允许的输入类型 | 输出类型 | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|------------------| +| COUNT | 计算数据点数。 | 所有类型 | INT64 | +| COUNT_IF | COUNT_IF(exp) 用于统计满足指定布尔表达式的记录行数 | exp 必须是一个布尔类型的表达式,例如 count_if(temperature>20) | INT64 | +| APPROX_COUNT_DISTINCT | APPROX_COUNT_DISTINCT(x[,maxStandardError]) 函数提供 COUNT(DISTINCT x) 的近似值,返回不同输入值的近似个数。 | `x`:待计算列,支持所有类型;
`maxStandardError`:指定该函数应产生的最大标准误差,取值范围[0.0040625, 0.26],未指定值时默认0.023。 | INT64 | +| APPROX_MOST_FREQUENT | APPROX_MOST_FREQUENT(x, k, capacity) 函数用于近似计算数据集中出现频率最高的前 k 个元素。它返回一个JSON 格式的字符串,其中键是该元素的值,值是该元素对应的近似频率。(V 2.0.5.1 及以后版本支持) | `x`:待计算列,支持 IoTDB 现有所有的数据类型;
`k`:返回出现频率最高的 k 个值;
`capacity`: 用于计算的桶的数量,跟内存占用相关:其值越大误差越小,但占用内存更大,反之capacity值越小误差越大,但占用内存更小。 | STRING | +| SUM | 求和。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | 求平均值。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | 求最大值。 | 所有类型 | 与输入类型一致 | +| MIN | 求最小值。 | 所有类型 | 与输入类型一致 | +| FIRST | 求时间戳最小且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| LAST | 求时间戳最大且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| STDDEV | STDDEV_SAMP 的别名,求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | 求总体标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | 求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | VAR_SAMP 的别名,求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | 求总体方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | 求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | 求具有最大绝对值的值。如果正值和负值的最大绝对值相等,则返回正值。 | INT32 INT64 FLOAT DOUBLE | 与输入类型一致 | +| MODE | 求众数。注意: 1.输入序列的不同值个数过多时会有内存异常风险; 2.如果所有元素出现的频次相同,即没有众数,则随机返回一个元素; 3.如果有多个众数,则随机返回一个众数; 4. NULL 值也会被统计频次,所以即使输入序列的值不全为 NULL,最终结果也可能为 NULL。 | 所有类型 | 与输入类型一致 | +| MAX_BY | MAX_BY(x, y) 求二元输入 x 和 y 在 y 最大时对应的 x 的值。MAX_BY(time, x) 返回 x 取最大值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| MIN_BY | MIN_BY(x, y) 求二元输入 x 和 y 在 y 最小时对应的 x 的值。MIN_BY(time, x) 返回 x 取最小值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| FIRST_BY | FIRST_BY(x, y) 求当 y 为第一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| LAST_BY | LAST_BY(x, y) 求当 y 为最后一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | + + +### 2.3 示例 + +#### 2.3.1 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +#### 2.3.2 Count + +统计的是整张表的行数和 `temperature` 列非 NULL 值的数量。 + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +执行结果如下: + +> 注意:只有COUNT函数可以与*一起使用,否则将抛出错误。 + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +统计 `table2` 中 到达时间 `arrival_time` 不是 `null` 的记录行数。 + +```sql +IoTDB> select count_if(arrival_time is not null) from table2; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +查询 `table1` 中 `temperature` 列不同值的个数。 + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +执行结果如下: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +查询 `table1` 中 `temperature` 列出现频次最高的2个值 + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +执行结果如下: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +查询`temperature`列、`humidity`列时间戳最小且不为 NULL 的值。 + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +查询`temperature`列、`humidity`列时间戳最大且不为 NULL 的值。 + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +查询 `temperature` 列中非 NULL 且时间戳最小的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最小的行的 `humidity` 值。 + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +查询`temperature` 列中非 NULL 且时间戳最大的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最大的行的 `humidity` 值。 + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +查询`temperature` 列中最大值所在行的 `time` 值,以及`temperature` 列中最大值所在行的 `humidity` 值。 + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +查询`temperature` 列中最小值所在行的 `time` 值,以及`temperature` 列中最小值所在行的 `humidity` 值。 + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. 逻辑运算符 + +### 3.1 概述 + +逻辑运算符用于组合条件或否定条件,返回布尔结果(`true` 或 `false`)。 + +以下是常用的逻辑运算符及其描述: + +| 运算符 | 描述 | 示例 | +| ------ | ----------------------------- | ------- | +| AND | 仅当两个值都为 true 时为 true | a AND b | +| OR | 任一值为 true 时为 true | a OR b | +| NOT | 当值为 false 时为 true | NOT a | + +### 3.2 NULL 对逻辑运算符的影响 + +#### 3.2.1 AND 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `AND` 运算符的一侧为 `FALSE`,则表达式结果为 `FALSE`。 + +示例: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `OR` 运算符的一侧为 `TRUE`,则表达式结果为 `TRUE`。 + +示例: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 真值表 + +以下真值表展示了 `NULL` 在 `AND` 和 `OR` 运算符中的处理方式: + +| a | b | a AND b | a OR b | +| ----- | ----- | ------- | ------ | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT 运算符 + +NULL 的逻辑否定仍然是 NULL + +示例: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1真值表 + +以下真值表展示了 `NULL` 在 `NOT` 运算符中的处理方式: + +| a | NOT a | +| ----- | ----- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + + +## 4. 日期和时间函数和运算符 + +### 4.1 now() -> Timestamp + +返回当前时间的时间戳。 + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +`date_bin` 函数是一种用于处理时间数据的函数,作用是将一个时间戳(Timestamp)舍入到指定的时间间隔(interval)的边界上。 + +**语法:** + +```SQL +-- 从时间戳为 0 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source) + +-- 从时间戳为 origin 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source,origin) + +-- interval支持的时间单位有: +-- 年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 +-- source必须为时间戳类型。 +``` + +**参数:** + +| 参数 | 含义 | +| -------- | ------------------------------------------------------------ | +| interval | 时间间隔支持的时间单位有:年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 | +| source | 待计算时间列,也可以是表达式。必须为时间戳类型。 | +| origin | 起始时间戳 | + +#### 4.2.1 语法约定: + +1. 不传入 `origin` 时,起始时间戳从 1970-01-01T00:00:00Z 开始计算(北京时间为 1970-01-01 08:00:00)。 +2. `interval` 为一个非负数,且必须带上时间单位。`interval` 为 0ms 时,不进行计算,直接返回 `source`。 +3. 当传入 `origin` 或 `source` 为负时,表示纪元时间之前的某个时间点,`date_bin` 会正常计算并返回与该时间点相关的时间段。 +4. 如果 `source` 中的值为 `null`,则返回 `null`。 +5. 不支持月份和非月份时间单位混用,例如 `1 MONTH 1 DAY`,这种时间间隔有歧义。 + +> 假设是起始时间是 2000 年 4 月 30 日进行计算,那么在一个时间间隔后,如果是先算 DAY再算MONTH,则会得到 2000 年 6 月 1 日,如果先算 MONTH 再算 DAY 则会得到 2000 年 5 月 31 日,二者得出的时间日期不同。 + +#### 4.2.2 示例 + +##### 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +示例 1:不指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +示例 2:指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +示例 3:`origin` 为负数的情况 + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +示例 4:`interval` 为 0 的情况 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +示例 5:`source` 为 null 的情况 + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract 函数 + +该函数用于提取日期对应部分的值。(V2.0.6 版本起支持) + +#### 4.3.1 语法定义 + +```SQL +EXTRACT (identifier FROM expression) +``` +* 参数说明 + * **expression**: `TIMESTAMP` 类型或时间常量 + * **identifier** :取值范围及对应的返回值见下表 + + | 取值范围 | 返回值类型 | 返回值范围 | + | -------------------------- | ------------- | ------------- | + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 使用示例 + +以[示例数据](../Reference/Sample-Data.md)中的 table1 为源数据,查询某段时间每天前12个小时的温度平均值 + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +`Format` 函数介绍:[Format 函数](../SQL-Manual/Basis-Function_apache.md#_7-2-format-函数) + +`Date_bin` 函数介绍:[Date_bin 函数](../SQL-Manual/Basis-Function_apache.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. 数学函数和运算符 + +### 5.1 数学运算符 + +| **运算符** | **描述** | +| ---------- | ------------------------ | +| + | 加法 | +| - | 减法 | +| * | 乘法 | +| / | 除法(整数除法执行截断) | +| % | 模(余数) | +| - | 取反 | + +### 5.2 数学函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------| ---------------------- | ---------- | +| sin | 正弦函数 | double、float、INT64、INT32 | double | sin(x) | +| cos | 余弦函数 | double、float、INT64、INT32 | double | cos(x) | +| tan | 正切函数 | double、float、INT64、INT32 | double | tan(x) | +| asin | 反正弦函数 | double、float、INT64、INT32 | double | asin(x) | +| acos | 反余弦函数 | double、float、INT64、INT32 | double | acos(x) | +| atan | 反正切函数 | double、float、INT64、INT32 | double | atan(x) | +| sinh | 双曲正弦函数 | double、float、INT64、INT32 | double | sinh(x) | +| cosh | 双曲余弦函数 | double、float、INT64、INT32 | double | cosh(x) | +| tanh | 双曲正切函数 | double、float、INT64、INT32 | double | tanh(x) | +| degrees | 将弧度角 x 转换为度 | double、float、INT64、INT32 | double | degrees(x) | +| radians | 将度转换为弧度 | double、float、INT64、INT32 | double | radians(x) | +| abs | 绝对值 | double、float、INT64、INT32 | 返回与输入类型相同的值 | abs(x) | +| sign | 返回 x 的符号函数,即:如果参数为 0,则返回 0,如果参数大于 0,则返回 1,如果参数小于 0,则返回 -1。对于 double/float 类型的参数,函数还会返回:如果参数为 NaN,则返回 NaN,如果参数为 +Infinity,则返回 1.0,如果参数为 -Infinity,则返回 -1.0。 | double、float、INT64、INT32 | 返回与输入类型相同的值 | sign(x) | +| ceil | 返回 x 向上取整到最近的整数。 | double、float、INT64、INT32 | double | ceil(x) | +| floor | 返回 x 向下取整到最近的整数。 | double、float、INT64、INT32 | double | floor(x) | +| exp | 返回欧拉数 e 的 x 次幂。 | double、float、INT64、INT32 | double | exp(x) | +| ln | 返回 x 的自然对数。 | double、float、INT64、INT32 | double | ln(x) | +| log10 | 返回 x 的以 10 为底的对数。 | double、float、INT64、INT32 | double | log10(x) | +| round | 返回 x 四舍五入到最近的整数。 | double、float、INT64、INT32 | double | round(x) | +| round | 返回 x 四舍五入到 d 位小数。 | double、float、INT64、INT32 | double | round(x, d) | +| sqrt | 返回 x 的平方根。 | double、float、INT64、INT32 | double | sqrt(x) | +| e | 自然指数 | | double | e() | +| pi | π | | double | pi() | + + +## 6. 位运算函数 + +> V 2.0.6 版本起支持 + +示例原始数据如下: + +```SQL +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +--建表语句 +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +--写入数据 +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +`bit_count(num, bits)` 函数用于统计整数 `num`在指定位宽 `bits`下的二进制表示中 1 的个数。 + +#### 6.1.1 语法定义 + +```SQL +bit_count(num, bits) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * **​num:​**任意整型数值(int32 或者 int64) + * **​bits:​**整型数值,取值范围为2\~64 + +注意:如果 bits 位数不够表示 num,会报错(此处是​**有符号补码**​):`Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* 调用方式 + * 两个具体数值:`bit_count(9, 64)` + * 列与数值:`bit_count(column1, 64)` + * 两列之间:`bit_count(column1, column2)` + +#### 6.1.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +--列与数值 +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +--bits位数不够 +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +`bitwise_and(x, y)`函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑与操作,并返回其按位与(bitwise AND)的运算结果。 + +#### 6.2.1 语法定义 + +```SQL +bitwise_and(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_and(19, 25)` + * 列与数值:`bitwise_and(column1, 25)` + * 两列之间:`bitwise_and(column1, column2)` + +#### 6.2.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--俩列之间 +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +`bitwise_not(x)` 函数基于二进制补码表示法,对整数 x 的每一位进行逻辑非操作,并返回其按位取反(bitwise NOT)的运算结果。 + +#### 6.3.1 语法定义 + +```SQL +bitwise_not(x) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 具体数值:`bitwise_not(5)` + * 单列操作:`bitwise_not(column1)` + +#### 6.3.2 使用示例 + +```SQL +-- 具体数值 +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- 单列 +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +`bitwise_or(x,y)` 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑或操作,并返回其按位或(bitwise OR)的运算结果。 + +#### 6.4.1 语法定义 + +```SQL +bitwise_or(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_or(19, 25)` + * 列与数值:`bitwise_or(column1, 25)` + * 两列之间:`bitwise_or(column1, column2)` + +#### 6.4.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +bitwise\_xor(x,y) 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑异或操作,并返回其按位异或(bitwise XOR)的运算结果。异或规则:相同为0,不同为1。 + +#### 6.5.1 语法定义 + +```SQL +bitwise_xor(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_xor(19, 25)` + * 列与数值:`bitwise_xor(column1, 25)` + * 两列之间:`bitwise_xor(column1, column2)` + +#### 6.5.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +`bitwise_left_shift(value, shift)` 函数返回将整数 `value`的二进制表示左移 `shift`位后的结果。左移操作将二进制位向高位方向移动,右侧空出的位用 0 填充,左侧溢出的位直接丢弃。等价于: `value << shift`。 + +#### 6.6.1 语法定义 + +```SQL +bitwise_left_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要左移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 左移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_left_shift(1, 2)` + * 列与数值:`bitwise_left_shift(column1, 2)` + * 两列之间:`bitwise_left_shift(column1, column2)` + +#### 6.6.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +`bitwise_right_shift(value, shift)`函数返回将整数 `value`的二进制表示逻辑右移(无符号右移) `shift`位后的结果。逻辑右移操作将二进制位向低位方向移动,左侧空出的高位用 0 填充,右侧溢出的低位直接丢弃。 + +#### 6.7.1 语法定义 + +```SQL +bitwise_right_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_right_shift(8, 3)` + * 列与数值:`bitwise_right_shift(column1, 3)` + * 两列之间:`bitwise_right_shift(column1, column2)` + +#### 6.7.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +`bitwise_right_shift_arithmetic(value, shift)`函数返回将整数 `value`的二进制表示算术右移 `shift`位后的结果。算术右移操作将二进制位向低位方向移动,右侧溢出的低位直接丢弃,左侧空出的高位用符号位填充(正数补0,负数补1),以保持数值的符号不变。 + +#### 6.8.1 语法定义 + +```SQL +bitwise_right_shift_arithmetic(value, shift) -> [same as value]--返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式: + * 两个具体数值:`bitwise_right_shift_arithmetic(12, 2)` + * 列与数值:`bitwise_right_shift_arithmetic(column1, 64)` + * 两列之间:`bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +## 7. 条件表达式 + +### 7.1 CASE 表达式 + +CASE 表达式有两种形式:简单形式、搜索形式 + +#### 7.1.1 简单形式 + +简单形式从左到右搜索每个值表达式,直到找到一个与表达式相等的值: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果找到匹配的值,则返回相应的结果。如果没有找到匹配项,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 搜索形式 + +搜索形式从左到右评估每个布尔条件,直到找到一个为真的条件,并返回相应的结果: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果没有条件为真,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE 函数 + +返回参数列表中的第一个非空值。 + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. 转换函数 + +### 8.1 转换函数 + +#### 8.1.1 cast(value AS type) → type + +1. 显式地将一个值转换为指定类型。 +2. 可以用于将字符串(varchar)转换为数值类型,或数值转换为字符串类型,V2.0.8-beta 版本起支持 OBJECT 类型强转成 STRING 类型。 +3. 如果转换失败,将抛出运行时错误。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +1. 与 `cast()` 类似。 +2. 如果转换失败,则返回 `null`。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format 函数 +该函数基于指定的格式字符串与输入参数,生成并返回格式化后的字符串输出。其功能与 Java 语言中的`String.format` 方法及 C 语言中的`printf`函数相类似,支持开发者通过占位符语法构建动态字符串模板,其中预设的格式标识符将被传入的对应参数值精准替换,最终形成符合特定格式要求的完整字符串。 + +#### 8.2.1 语法介绍 + +```SQL +format(pattern,...args) -> String +``` + +**参数定义** + +* `pattern`: 格式字符串,可包含静态文本及一个或多个格式说明符(如 `%s`, `%d` 等),或任意返回类型为 `STRING/TEXT` 的表达式。 +* `args`: 用于替换格式说明符的输入参数。需满足以下条件: + * 参数数量 ≥ 1 + * 若存在多个参数,以逗号`,`分隔(如 `arg1,arg2`) + * 参数总数可多于 `pattern` 中的占位符数量,但不可少于,否则触发异常 + +**返回值** + +* 类型为 `STRING` 的格式化结果字符串 + +#### 8.2.2 使用示例 + +1. 格式化浮点数 + +```SQL +IoTDB:database1> select format('%.5f',humidity) from table1 where humidity = 35.4 ++--------+ +| _col0| ++--------+ +|35.40000| ++--------+ +``` + +2. 格式化整数 + +```SQL +IoTDB:database1> select format('%03d',8) from table1 limit 1 ++-----+ +|_col0| ++-----+ +| 008| ++-----+ +``` + +3. 格式化日期和时间戳 + +* Locale-specific日期 + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) from table1 limit 1 ++--------------------+ +| _col0| ++--------------------+ +|星期一, 一月 1, 2024| ++--------------------+ +``` + +* 去除时区信息 + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* 获取秒级时间戳精度 + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* 日期符号说明如下 + +| **符号** | **​ 描述** | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 'H' | 24 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 00 - 23。 | +| 'I' | 12 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 01 - 12。 | +| 'k' | 24 小时制的小时数,i.e. 0 - 23。 | +| 'l' | 12 小时制的小时数,i.e. 1 - 12。 | +| 'M' | 小时内的分钟,格式为两位数,必要时加上前导零,i.e. 00 - 59。 | +| 'S' | 分钟内的秒数,格式为两位数,必要时加上前导零,i.e. 00 - 60(“60 ”是支持闰秒所需的特殊值)。 | +| 'L' | 秒内毫秒,格式为三位数,必要时加前导零,i.e. 000 - 999。 | +| 'N' | 秒内的纳秒,格式为九位数,必要时加前导零,i.e. 000000000 - 999999999。 | +| 'p' | 当地特定的[上午或下午](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getAmPmStrings())标记,小写,如 “am ”或 “pm”。使用转换前缀 “T ”会强制输出为大写。 | +| 'z' | 从格林尼治标准时间偏移的[RFC 822](http://www.ietf.org/rfc/rfc0822.txt)式数字时区,例如 -0800。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是 Java 虚拟机此实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。 | +| 'Z' | 表示时区缩写的字符串。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是此 Java 虚拟机实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。Formatter 的时区将取代参数的时区(如果有)。 | +| 's' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的秒数,i.e. Long.MIN\_VALUE/1000 至 Long.MAX\_VALUE/1000。 | +| 'Q' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的毫秒数,i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* 用于格式化常见的日期/时间组成的转换字符说明如下 + +| **符号** | **描述** | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'B' | 特定于区域设置[的完整月份名称](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getMonths()),例如 “January”、“February”。 | +| 'b' | 当地特定月份的缩写名称,如"1 月"、"2 月"。 | +| 'h' | 与"b "相同。 | +| 'A' | 一周中某一天在当地的全称,如"星期日"、"星期一"。 | +| 'a' | 当地特有的星期简短名称,例如"星期日"、"星期一 | +| 'C' | 四位数年份除以100,格式为两位数,必要时加上前导零,即00 - 99 | +| 'Y' | 年份,格式为至少四位数,必要时加上前导零,例如0092相当于公历92年。 | +| 'y' | 年份的最后两位数,格式为必要的前导零,即00 - 99。 | +| 'j' | 年号,格式为三位数,必要时加前导零,例如公历为001 - 366。 | +| 'm' | 月份,格式为两位数,必要时加前导零,即01 - 13。 | +| 'd' | 月日,格式为两位数,必要时加前导零,即01 - 31 | +| 'e' | 月日,格式为两位数,即1 - 31。 | + +4. 格式化字符串 + +```SQL +IoTDB:database1> SELECT format('The measurement status is :%s',status) FROM table2 limit 1 ++-------------------------------+ +| _col0| ++-------------------------------+ +|The measurement status is :true| ++-------------------------------+ +``` + +5. 格式化百分号 + +```SQL +IoTDB:database1> SELECT format('%s%%', 99.9) from table1 limit 1 ++-----+ +|_col0| ++-----+ +|99.9%| ++-----+ +``` + +#### 8.2.3 **格式转换失败场景说明** + +1. 类型不匹配错误 + +* 时间戳类型冲突 若格式说明符中包含时间相关标记(如 `%Y-%m-%d`),但参数提供: + * 非 `DATE`/`TIMESTAMP` 类型值 + * 或涉及日期细粒度单位(如 `%H` 小时、`%M` 分钟)时,参数仅支持 `TIMESTAMP` 类型,否则将抛出类型异常 + +```SQL +-- 示例1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- 示例2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* 浮点数类型冲突 若使用 `%f` 等浮点格式说明符,但参数提供非数值类型(如字符串、布尔值),将触发类型转换错误 + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. 参数数量不匹配错误 + +* 实际提供的参数数量 必须等于或大于 格式字符串中格式说明符的数量 +* 若参数数量少于格式说明符数量,将抛出 `ArgumentCountMismatch` 异常 + +```SQL +IoTDB:database1> select format('%.5f %03d', humidity) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') +``` + +3. 无效调用错误 + +* 当函数参数满足以下任一条件时,视为非法调用: + * 参数总数 小于 2(必须包含格式字符串及至少一个参数) + * 格式字符串(`pattern`)类型非 `STRING/TEXT` + +```SQL +-- 示例1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--示例2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + + +## 9. 字符串函数和操作符 + +### 9.1 字符串操作符 + +#### 9.1.1 || 操作符 + +`||` 操作符用于字符串连接,功能与 `concat` 函数相同。 + +#### 9.1.2 LIKE 语句 + +`LIKE` 语句用于模式匹配,具体用法在[模式匹配:LIKE](#1-like-运算符) 中有详细文档。 + +### 9.2 字符串函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +| ----------- |---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------ | ------------------------------------------------------------ | +| length | 返回字符串的字符长度,而不是字符数组的长度。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | INT32 | length(string) | +| upper | 将字符串中的字母转换为大写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | upper(string) | +| lower | 将字符串中的字母转换为小写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | lower(string) | +| trim | 从源字符串中删除指定的开头和/或结尾字符。 | 支持三个参数**specification(可选)**:指定从哪边去掉字符,可以是:`BOTH`:两边都去掉(默认)。`LEADING`:只去掉开头的字符。`TRAILING`:只去掉结尾的字符。**trimcharacter(可选)**:要去掉的字符,如果没指定,默认去掉空格。**string**:要处理的字符串。 | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) 示例:`trim('!' FROM '!foo!');` —— `'foo'` | +| strpos | 返回子字符串在字符串中第一次出现的起始位置。位置从 1 开始计数。如果未找到,返回 0。注意:起始位置是基于字符而不是字节数组确定的。 | 仅支持两个参数,类型可以是字符串或文本。**sourceStr**:要搜索的字符串。**subStr**:要找的子字符串。 | INT32 | strpos(sourceStr, subStr) | +| starts_with | 测试子字符串是否是字符串的前缀。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串,类型可以是字符串或文本。**prefix**:前缀子字符串,类型可以是字符串或文本。 | Boolean | starts_with(sourceStr, prefix) | +| ends_with | 测试字符串是否以指定的后缀结束。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串。**suffix**:后缀子字符串。 | Boolean | ends_with(sourceStr, suffix) | +| concat | 返回字符串 `string1`、`string2`、...、`stringN` 的连接结果。功能与连接操作符 `\|\|` 相同。 | 至少两个参数,所有参数类型必须是字符串或文本。 | String | concat(str1, str2, ...) 或 str1 \|\| str2 ... | +| strcmp | 比较两个字符串的字母序。 | 支持两个参数,两个参数类型必须是字符串或文本。**string1**:第一个要比较的字符串。**string2**:第二个要比较的字符串。 | 返回一个整数值INT32如果 `str1 < str2`,返回 `-1`如果 `str1 = str2`,返回 `0`如果 `str1 > str2`,返回 `1`如果 `str1` 或 `str2` 为 `NULL`,返回 `NULL` | strcmp(str1, str2) | +| replace | 从字符串中删除所有 `search` 的实例。 | 支持两个参数,可以是字符串或文本类型。**string**:原始字符串,要从中删除内容的字符串。**search**:要删除的子字符串。 | String | replace(string, string) | +| replace | 将字符串中所有 `search` 的实例替换为 `replace`。 | 支持三个参数,可以是字符串或文本类型。**string**:原始字符串,要从中替换内容的字符串。**search**:要替换掉的子字符串。**replace**:用来替换的新字符串。 | String | replace(string, string, string) | +| substring | 从指定位置提取字符到字符串末尾。需要注意的是,起始位置是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持两个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。 | String:返回一个字符串,从 `start_index` 位置开始到字符串末尾的所有字符。**注意事项**:`start_index` 从 1 开始,即数组的第 0 个位置是 1参数为 null时,返回 `null`start_index 大于字符串长度时,结果报错。 | substring(string from start_index)或 substring(string, start_index) | +| substring | 从一个字符串中提取从指定位置开始、指定长度的子字符串注意:起始位置和长度是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持三个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。**length**:要提取的子字符串的长度。 | String:返回一个字符串,从 `start_index` 位置开始,提取 `length` 个字符。**注意事项**:参数为 null时,返回 `null`如果 `start_index` 大于字符串的长度,结果报错。如果 `length` 小于 0,结果报错。极端情况,`start_index + length` 超过 `int.MAX` 并变成负数,将导致异常结果。 | substring(string from start_index for length) 或 substring(string, start_index, length) | + +## 10. 模式匹配函数 + +### 10.1 LIKE 运算符 + +#### 10.1.1 用途 + +`LIKE` 运算符用于将值与模式进行比较。它通常用于 `WHERE` 子句中,用于匹配字符串中的特定模式。 + +#### 10.1.2 语法 + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 匹配规则 + +- 匹配字符是区分大小写的。 +- 模式支持两个匹配符号: + - `_`:匹配任意单个字符。 + - `%`:匹配0个或多个字符。 + +#### 10.1.4 注意事项 + +- `LIKE` 模式匹配总是覆盖整个字符串。如果需要匹配字符串中的任意位置,模式必须以 `%` 开头和结尾。 +- 如果需要匹配 `%` 或 `_` 作为普通字符,必须使用转义字符。 + +#### 10.1.5 示例 + +示例 1:匹配以特定字符开头的字符串 + +- **说明**:查找所有以字母 `E` 开头的名称,例如 `Europe`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +示例 2:排除特定模式 + +- **说明**:查找所有不以字母 `E` 开头的名称。 + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +示例 3:匹配特定长度的字符串 + +- **说明**:查找所有以 `A` 开头、以 `a` 结尾且中间有两个字符的名称,例如 `Asia`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +示例 4:转义特殊字符 + +- **说明**:查找所有以 `South_` 开头的名称。这里使用了转义字符 `\` 来转义 `_` 等特殊字符,例如`South_America`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +示例 5:匹配转义字符本身 + +- **说明**:如果需要匹配转义字符本身,可以使用双转义字符 `\\`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like 函数 + +#### 10.2.1 用途 + +`regexp_like` 函数用于评估正则表达式模式,并确定该模式是否包含在字符串中。 + +#### 10.2.2 语法 + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 注意事项 + +- `regexp_like` 的模式只需包含在字符串中,而不需要匹配整个字符串。 +- 如果需要匹配整个字符串,可以使用正则表达式的锚点 `^` 和 `$`。 +- `^` 表示“字符串的开头”,`$` 表示“字符串的结尾”。 +- 正则表达式采用 Java 定义的正则语法,但存在以下需要注意的例外情况: + - **多行模式** + 1. 启用方式:`(?m)`。 + 2. 只识别`\n`作为行终止符。 + 3. 不支持`(?d)`标志,且禁止使用。 + - **不区分大小写匹配** + 1. 启用方式:`(?i)`。 + 2. 基于Unicode规则,不支持上下文相关和本地化匹配。 + 3. 不支持`(?u)`标志,且禁止使用。 + - **字符类** + 1. 在字符类(如`[A-Z123]`)中,`\Q`和`\E`不被支持,被视为普通字面量。 + - **Unicode字符类(**`\p{prop}`**)** + 1. **名称下划线**:名称中的所有下划线必须删除(如`OldItalic`而非`Old_Italic`)。 + 2. **文字(Scripts)**:直接指定,无需`Is`、`script=`或`sc=`前缀(如`\p{Hiragana}`)。 + 3. **区块(Blocks)**:必须使用`In`前缀,不支持`block=`或`blk=`前缀(如`\p{InMongolian}`)。 + 4. **类别(Categories)**:直接指定,无需`Is`、`general_category=`或`gc=`前缀(如`\p{L}`)。 + 5. **二元属性(Binary Properties)**:直接指定,无需`Is`(如`\p{NoncharacterCodePoint}`)。 + +#### 10.2.4 示例 + +示例 1:匹配包含特定模式的字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否包含模式 `\d+b`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - 在 `'1a 2b 14m'` 中,`2b` 符合这个模式,所以返回 `true`。 + +示例 2:匹配整个字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否完全匹配模式 `^\\d+b$`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - `'1a 2b 14m'` 并不符合这个模式,因为它不是从数字开始,也不是以 `b` 结束,所以返回 `false`。 + +## 11. 时序分窗函数 + +原始示例数据如下: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- 创建语句 +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- 插入数据 +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 功能描述 + +HOP 函数用于按时间分段分窗分析,识别每一行数据所属的时间窗口。该函数通过指定固定窗口大小(size)和窗口滑动步长(SLIDE),将数据按时间戳分配到所有与其时间戳重叠的窗口中。若窗口之间存在重叠(步长 < 窗口大小),数据会自动复制到多个窗口。 + +#### 11.1.2 函数定义 + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | +| SLIDE | 标量参数 | 长整数类型 | 窗口滑动步长 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.1.4 返回结果 + +HOP 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.1.5 使用示例 + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 功能描述 + +SESSION 函数用于按会话间隔对数据进行分窗。系统逐行检查与前一行的时间间隔,小于阈值(GAP)则归入当前窗口,超过则归入下一个窗口。 + +#### 11.2.2 函数定义 + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| TIMECOL | 标量参数 | 字符串类型默认值:'time' | 时间列名 +| +| GAP | 标量参数 | 长整数类型 | 会话间隔阈值 | + +#### 11.2.4 返回结果 + +SESSION 函数的返回结果列包含: + +* window\_start: 会话窗口内的第一条数据的时间 +* window\_end: 会话窗口内的最后一条数据的时间 +* 映射列:DATA 参数的所有输入列 + +#### 11.2.5 使用示例 + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY SESSION +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 功能描述 + +VARIATION 函数用于按数据差值分窗,将第一条数据作为首个窗口的基准值,每个数据点会与基准值进行差值运算,如果差值小于给定的阈值(delta)则加入当前窗口;如果超过阈值,则分为下一个窗口,将该值作为下一个窗口的基准值。 + +#### 11.3.2 函数定义 + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| COL | 标量参数 | 字符串类型 | 标识对哪一列计算差值 | +| DELTA | 标量参数 | 浮点数类型 | 差值阈值 | + +#### 11.3.4 返回结果 + +VARIATION 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.3.5 使用示例 + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY VARIATION +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 功能描述 + +CAPACITY 函数用于按数据点数(行数)分窗,每个窗口最多有 SIZE 行数据。 + +#### 11.4.2 函数定义 + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | + +#### 11.4.4 返回结果 + +CAPACITY 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.4.5 使用示例 + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY COUNT +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 功能描述 + +TUMBLE 函数用于通过时间属性字段为每行数据分配一个窗口,滚动窗口的大小固定且不重复。 + +#### 11.5.2 函数定义 + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.5.4 返回结果 + +TUBMLE 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.5.5 使用示例 + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 功能描述 + +Cumulate 函数用于从初始的窗口开始,创建相同窗口开始但窗口结束步长不同的窗口,直到达到最大的窗口大小。每个窗口包含其区间内的元素。例如:1小时步长,24小时大小的累计窗口,每天可以获得如下这些窗口:`[00:00, 01:00)`,`[00:00, 02:00)`,`[00:00, 03:00)`, …, `[00:00, 24:00)` + +#### 11.6.2 函数定义 + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------------------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,SIZE必须是STEP的整数倍,需为正数 | +| STEP | 标量参数 | 长整数类型 | 窗口步长,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +> 注意:size 如果不是 step 的整数倍,则会报错`Cumulative table function requires size must be an integral multiple of step` + +#### 11.6.4 返回结果 + +CUMULATE函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.6.5 使用示例 + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function_timecho.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function_timecho.md new file mode 100644 index 000000000..210eb2613 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Basis-Function_timecho.md @@ -0,0 +1,2019 @@ + + +# 基础函数 + +## 1. 比较函数和运算符 + +### 1.1 基本比较运算符 + +比较运算符用于比较两个值,并返回比较结果(true或false)。 + +| 运算符 | 描述 | +| ------ | ---------- | +| < | 小于 | +| > | 大于 | +| <= | 小于或等于 | +| >= | 大于或等于 | +| = | 等于 | +| <> | 不等于 | +| != | 不等于 | + +#### 1.1.1 比较规则: + +1. 所有类型都可以与自身进行比较 +2. 数值类型(INT32, INT64, FLOAT, DOUBLE, TIMESTAMP)之间可以相互比较 +3. 字符类型(STRING, TEXT)之间也可以相互比较 +4. 除上述规则外的类型进行比较时,均会报错。 + +### 1.2 BETWEEN 运算符 + +1. `BETWEEN` 操作符用于判断一个值是否在指定的范围内。 +2. `NOT BETWEEN`操作符用于判断一个值是否不在指定范围内。 +3. `BETWEEN` 和 `NOT BETWEEN` 操作符可用于评估任何可排序的类型。 +4. `BETWEEN` 和 `NOT BETWEEN` 的值、最小值和最大值参数必须是同一类型,否则会报错。 + +**语法**: + +```SQL + value BETWEEN min AND max: + value NOT BETWEEN min AND max: +``` + +示例 1 :BETWEEN + +```SQL +-- 查询 temperature 在 85.0 和 90.0 之间的记录 +SELECT * FROM table1 WHERE temperature BETWEEN 85.0 AND 90.0; +``` + +示例 2 :NOT BETWEEN + +```SQL +3-- 查询 humidity 不在 35.0 和 40.0 之间的记录 +SELECT * FROM table1 WHERE humidity NOT BETWEEN 35.0 AND 40.0; +``` + +### 1.3 IS NULL 运算符 + +1. `IS NULL` 和 `IS NOT NULL` 运算符用于判断一个值是否为 NULL。 +2. 这两个运算符适用于所有数据类型。 + +示例1:查询 temperature 为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IS NULL; +``` + +示例2:查询 humidity 不为 NULL 的记录 + +```SQL +SELECT * FROM table1 WHERE humidity IS NOT NULL; +``` + +### 1.4 IN 运算符 + +1. `IN` 操作符可用于 `WHERE` 子句中,比较一列中的一些值。 +2. 这些值可以由静态数组、标量表达式。 + +**语法:** + +```SQL +... WHERE column [NOT] IN ('value1','value2', expression1) +``` + +示例 1:静态数组:查询 region 为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region IN ('北京', '上海'); +--等价于 +SELECT * FROM region WHERE name = '北京' OR name = '上海'; +``` + +示例 2:标量表达式:查询 temperature 在特定值中的记录 + +```SQL +SELECT * FROM table1 WHERE temperature IN (85.0, 90.0); +``` + +示例 3:查询 region 不为 '北京' 或 '上海' 的记录 + +```SQL +SELECT * FROM table1 WHERE region NOT IN ('北京', '上海'); +``` + +### 1.5 GREATEST 和 LEAST + +`Greatest` 函数用于返回参数列表中的最大值,`Least` 函数用于返回参数列表中的最小值,返回数据类型与输入类型相同。 +1. 空值处理:若所有参数均为 NULL,则返回 NULL。 +2. 参数要求:必须提供 至少 2 个参数。 +3. 类型约束:仅支持 相同数据类型 的参数比较。 +4. 支持类型: `BOOLEAN`、`FLOAT`、`DOUBLE`、`INT32`、`INT64`、`STRING`、`TEXT`、`TIMESTAMP`、`DATE` + +**语法:** + +```sql + greatest(value1, value2, ..., valueN) + least(value1, value2, ..., valueN) +``` + +**示例:** + +```sql +-- 查询 table2 中 temperature 和 humidity 的最大记录 +SELECT GREATEST(temperature,humidity) FROM table2; + +-- 查询 table2 中 temperature 和 humidity 的最小记录 +SELECT LEAST(temperature,humidity) FROM table2; +``` + + +## 2. 聚合函数 + +### 2.1 概述 + +1. 聚合函数是多对一函数。它们对一组值进行聚合计算,得到单个聚合结果。 +2. 除了 `COUNT()`之外,其他所有聚合函数都忽略空值,并在没有输入行或所有值为空时返回空值。 例如,`SUM()` 返回 null 而不是零,而 `AVG()` 在计数中不包括 null 值。 + +### 2.2 支持的聚合函数 + +| 函数名 | 功能描述 | 允许的输入类型 | 输出类型 | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|------------------| +| COUNT | 计算数据点数。 | 所有类型 | INT64 | +| COUNT_IF | COUNT_IF(exp) 用于统计满足指定布尔表达式的记录行数 | exp 必须是一个布尔类型的表达式,例如 count_if(temperature>20) | INT64 | +| APPROX_COUNT_DISTINCT | APPROX_COUNT_DISTINCT(x[,maxStandardError]) 函数提供 COUNT(DISTINCT x) 的近似值,返回不同输入值的近似个数。 | `x`:待计算列,支持所有类型;
`maxStandardError`:指定该函数应产生的最大标准误差,取值范围[0.0040625, 0.26],未指定值时默认0.023。 | INT64 | +| APPROX_MOST_FREQUENT | APPROX_MOST_FREQUENT(x, k, capacity) 函数用于近似计算数据集中出现频率最高的前 k 个元素。它返回一个JSON 格式的字符串,其中键是该元素的值,值是该元素对应的近似频率。(V 2.0.5.1 及以后版本支持) | `x`:待计算列,支持 IoTDB 现有所有的数据类型;
`k`:返回出现频率最高的 k 个值;
`capacity`: 用于计算的桶的数量,跟内存占用相关:其值越大误差越小,但占用内存更大,反之capacity值越小误差越大,但占用内存更小。 | STRING | +| SUM | 求和。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| AVG | 求平均值。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| MAX | 求最大值。 | 所有类型 | 与输入类型一致 | +| MIN | 求最小值。 | 所有类型 | 与输入类型一致 | +| FIRST | 求时间戳最小且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| LAST | 求时间戳最大且不为 NULL 的值。 | 所有类型 | 与输入类型一致 | +| STDDEV | STDDEV_SAMP 的别名,求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_POP | 求总体标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| STDDEV_SAMP | 求样本标准差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VARIANCE | VAR_SAMP 的别名,求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_POP | 求总体方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| VAR_SAMP | 求样本方差。 | INT32 INT64 FLOAT DOUBLE | DOUBLE | +| EXTREME | 求具有最大绝对值的值。如果正值和负值的最大绝对值相等,则返回正值。 | INT32 INT64 FLOAT DOUBLE | 与输入类型一致 | +| MODE | 求众数。注意: 1.输入序列的不同值个数过多时会有内存异常风险; 2.如果所有元素出现的频次相同,即没有众数,则随机返回一个元素; 3.如果有多个众数,则随机返回一个众数; 4. NULL 值也会被统计频次,所以即使输入序列的值不全为 NULL,最终结果也可能为 NULL。 | 所有类型 | 与输入类型一致 | +| MAX_BY | MAX_BY(x, y) 求二元输入 x 和 y 在 y 最大时对应的 x 的值。MAX_BY(time, x) 返回 x 取最大值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| MIN_BY | MIN_BY(x, y) 求二元输入 x 和 y 在 y 最小时对应的 x 的值。MIN_BY(time, x) 返回 x 取最小值时对应的时间戳。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| FIRST_BY | FIRST_BY(x, y) 求当 y 为第一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | +| LAST_BY | LAST_BY(x, y) 求当 y 为最后一个不为 NULL 的值时,同一行里对应的 x 值。 | x 和 y 可以是任意类型 | 与第一个输入 x 的数据类型一致 | + + +### 2.3 示例 + +#### 2.3.1 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +#### 2.3.2 Count + +统计的是整张表的行数和 `temperature` 列非 NULL 值的数量。 + +```SQL +IoTDB> select count(*), count(temperature) from table1; +``` + +执行结果如下: + +> 注意:只有COUNT函数可以与*一起使用,否则将抛出错误。 + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 18| 12| ++-----+-----+ +Total line number = 1 +It costs 0.834s +``` + + +#### 2.3.3 Count_if + +统计 `table2` 中 到达时间 `arrival_time` 不是 `null` 的记录行数。 + +```sql +IoTDB> select count_if(arrival_time is not null) from table2; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +Total line number = 1 +It costs 0.047s +``` + +#### 2.3.4 Approx_count_distinct + +查询 `table1` 中 `temperature` 列不同值的个数。 + +```sql +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature) as approx FROM table1; +IoTDB> SELECT COUNT(DISTINCT temperature) as origin, APPROX_COUNT_DISTINCT(temperature,0.006) as approx FROM table1; +``` + +执行结果如下: + +```sql ++------+------+ +|origin|approx| ++------+------+ +| 3| 3| ++------+------+ +Total line number = 1 +It costs 0.022s +``` + +#### 2.3.5 Approx_most_frequent + +查询 `table1` 中 `temperature` 列出现频次最高的2个值 + +```sql +IoTDB> select approx_most_frequent(temperature,2,100) as topk from table1; +``` + +执行结果如下: + +```sql ++-------------------+ +| topk| ++-------------------+ +|{"85.0":6,"90.0":5}| ++-------------------+ +Total line number = 1 +It costs 0.064s +``` + + +#### 2.3.6 First + +查询`temperature`列、`humidity`列时间戳最小且不为 NULL 的值。 + +```SQL +IoTDB> select first(temperature), first(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 35.1| ++-----+-----+ +Total line number = 1 +It costs 0.170s +``` + +#### 2.3.7 Last + +查询`temperature`列、`humidity`列时间戳最大且不为 NULL 的值。 + +```SQL +IoTDB> select last(temperature), last(humidity) from table1; +``` + +执行结果如下: + +```SQL ++-----+-----+ +|_col0|_col1| ++-----+-----+ +| 90.0| 34.8| ++-----+-----+ +Total line number = 1 +It costs 0.211s +``` + +#### 2.3.8 First_by + +查询 `temperature` 列中非 NULL 且时间戳最小的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最小的行的 `humidity` 值。 + +```SQL +IoTDB> select first_by(time, temperature), first_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-26T13:37:00.000+08:00| 35.1| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.269s +``` + +#### 2.3.9 Last_by + +查询`temperature` 列中非 NULL 且时间戳最大的行的 `time` 值,以及 `temperature` 列中非 NULL 且时间戳最大的行的 `humidity` 值。 + +```SQL +IoTDB> select last_by(time, temperature), last_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T14:30:00.000+08:00| 34.8| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.070s +``` + +#### 2.3.10 Max_by + +查询`temperature` 列中最大值所在行的 `time` 值,以及`temperature` 列中最大值所在行的 `humidity` 值。 + +```SQL +IoTDB> select max_by(time, temperature), max_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-30T09:30:00.000+08:00| 35.2| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.172s +``` + +#### 2.3.11 Min_by + +查询`temperature` 列中最小值所在行的 `time` 值,以及`temperature` 列中最小值所在行的 `humidity` 值。 + +```SQL +select min_by(time, temperature), min_by(humidity, temperature) from table1; +``` + +执行结果如下: + +```SQL ++-----------------------------+-----+ +| _col0|_col1| ++-----------------------------+-----+ +|2024-11-29T10:00:00.000+08:00| null| ++-----------------------------+-----+ +Total line number = 1 +It costs 0.244s +``` + + +## 3. 逻辑运算符 + +### 3.1 概述 + +逻辑运算符用于组合条件或否定条件,返回布尔结果(`true` 或 `false`)。 + +以下是常用的逻辑运算符及其描述: + +| 运算符 | 描述 | 示例 | +| ------ | ----------------------------- | ------- | +| AND | 仅当两个值都为 true 时为 true | a AND b | +| OR | 任一值为 true 时为 true | a OR b | +| NOT | 当值为 false 时为 true | NOT a | + +### 3.2 NULL 对逻辑运算符的影响 + +#### 3.2.1 AND 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `AND` 运算符的一侧为 `FALSE`,则表达式结果为 `FALSE`。 + +示例: + +```SQL +NULL AND true -- null +NULL AND false -- false +NULL AND NULL -- null +``` + +#### 3.2.2 OR 运算符 + +- 如果表达式的一侧或两侧为 `NULL`,结果可能为 `NULL`。 +- 如果 `OR` 运算符的一侧为 `TRUE`,则表达式结果为 `TRUE`。 + +示例: + +```SQL +NULL OR NULL -- null +NULL OR false -- null +NULL OR true -- true +``` + +##### 3.2.2.1 真值表 + +以下真值表展示了 `NULL` 在 `AND` 和 `OR` 运算符中的处理方式: + +| a | b | a AND b | a OR b | +| ----- | ----- | ------- | ------ | +| TRUE | TRUE | TRUE | TRUE | +| TRUE | FALSE | FALSE | TRUE | +| TRUE | NULL | NULL | TRUE | +| FALSE | TRUE | FALSE | TRUE | +| FALSE | FALSE | FALSE | FALSE | +| FALSE | NULL | FALSE | NULL | +| NULL | TRUE | NULL | TRUE | +| NULL | FALSE | FALSE | NULL | +| NULL | NULL | NULL | NULL | + +#### 3.2.3 NOT 运算符 + +NULL 的逻辑否定仍然是 NULL + +示例: + +```SQL +NOT NULL -- null +``` + +##### 3.2.3.1真值表 + +以下真值表展示了 `NULL` 在 `NOT` 运算符中的处理方式: + +| a | NOT a | +| ----- | ----- | +| TRUE | FALSE | +| FALSE | TRUE | +| NULL | NULL | + + +## 4. 日期和时间函数和运算符 + +### 4.1 now() -> Timestamp + +返回当前时间的时间戳。 + +### 4.2 date_bin(interval, Timestamp[, Timestamp]) -> Timestamp + +`date_bin` 函数是一种用于处理时间数据的函数,作用是将一个时间戳(Timestamp)舍入到指定的时间间隔(interval)的边界上。 + +**语法:** + +```SQL +-- 从时间戳为 0 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source) + +-- 从时间戳为 origin 开始计算时间间隔,返回最接近指定时间戳的时间间隔起始点 +date_bin(interval,source,origin) + +-- interval支持的时间单位有: +-- 年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 +-- source必须为时间戳类型。 +``` + +**参数:** + +| 参数 | 含义 | +| -------- | ------------------------------------------------------------ | +| interval | 时间间隔支持的时间单位有:年y、月mo、周week、日d、小时h、分钟M、秒s、毫秒ms、微秒µs、纳秒ns。 | +| source | 待计算时间列,也可以是表达式。必须为时间戳类型。 | +| origin | 起始时间戳 | + +#### 4.2.1 语法约定: + +1. 不传入 `origin` 时,起始时间戳从 1970-01-01T00:00:00Z 开始计算(北京时间为 1970-01-01 08:00:00)。 +2. `interval` 为一个非负数,且必须带上时间单位。`interval` 为 0ms 时,不进行计算,直接返回 `source`。 +3. 当传入 `origin` 或 `source` 为负时,表示纪元时间之前的某个时间点,`date_bin` 会正常计算并返回与该时间点相关的时间段。 +4. 如果 `source` 中的值为 `null`,则返回 `null`。 +5. 不支持月份和非月份时间单位混用,例如 `1 MONTH 1 DAY`,这种时间间隔有歧义。 + +> 假设是起始时间是 2000 年 4 月 30 日进行计算,那么在一个时间间隔后,如果是先算 DAY再算MONTH,则会得到 2000 年 6 月 1 日,如果先算 MONTH 再算 DAY 则会得到 2000 年 5 月 31 日,二者得出的时间日期不同。 + +#### 4.2.2 示例 + +##### 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +示例 1:不指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h,time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.683s +``` + +示例 2:指定起始时间戳 + +```SQL +SELECT + time, + date_bin(1h, time, 2024-11-29T18:30:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T09:30:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:30:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T10:30:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T07:30:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T08:30:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T09:30:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T10:30:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:30:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:30:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.056s +``` + +示例 3:`origin` 为负数的情况 + +```SQL +SELECT + time, + date_bin(1h, time, 1969-12-31 00:00:00.000) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:00:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.203s +``` + +示例 4:`interval` 为 0 的情况 + +```SQL +SELECT + time, + date_bin(0ms, time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| time| time_bin| ++-----------------------------+-----------------------------+ +|2024-11-30T09:30:00.000+08:00|2024-11-30T09:30:00.000+08:00| +|2024-11-30T14:30:00.000+08:00|2024-11-30T14:30:00.000+08:00| +|2024-11-29T10:00:00.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:38:00.000+08:00|2024-11-27T16:38:00.000+08:00| +|2024-11-27T16:39:00.000+08:00|2024-11-27T16:39:00.000+08:00| +|2024-11-27T16:40:00.000+08:00|2024-11-27T16:40:00.000+08:00| +|2024-11-27T16:41:00.000+08:00|2024-11-27T16:41:00.000+08:00| +|2024-11-27T16:42:00.000+08:00|2024-11-27T16:42:00.000+08:00| +|2024-11-27T16:43:00.000+08:00|2024-11-27T16:43:00.000+08:00| +|2024-11-27T16:44:00.000+08:00|2024-11-27T16:44:00.000+08:00| +|2024-11-29T11:00:00.000+08:00|2024-11-29T11:00:00.000+08:00| +|2024-11-29T18:30:00.000+08:00|2024-11-29T18:30:00.000+08:00| +|2024-11-28T08:00:00.000+08:00|2024-11-28T08:00:00.000+08:00| +|2024-11-28T09:00:00.000+08:00|2024-11-28T09:00:00.000+08:00| +|2024-11-28T10:00:00.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:00.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:00.000+08:00|2024-11-26T13:37:00.000+08:00| +|2024-11-26T13:38:00.000+08:00|2024-11-26T13:38:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.107s +``` + +示例 5:`source` 为 null 的情况 + +```SQL +SELECT + arrival_time, + date_bin(1h,arrival_time) as time_bin +FROM + table1; +``` + +结果: + +```Plain ++-----------------------------+-----------------------------+ +| arrival_time| time_bin| ++-----------------------------+-----------------------------+ +| null| null| +|2024-11-30T14:30:17.000+08:00|2024-11-30T14:00:00.000+08:00| +|2024-11-29T10:00:13.000+08:00|2024-11-29T10:00:00.000+08:00| +|2024-11-27T16:37:01.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-27T16:37:03.000+08:00|2024-11-27T16:00:00.000+08:00| +|2024-11-27T16:37:04.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +| null| null| +|2024-11-27T16:37:08.000+08:00|2024-11-27T16:00:00.000+08:00| +| null| null| +|2024-11-29T18:30:15.000+08:00|2024-11-29T18:00:00.000+08:00| +|2024-11-28T08:00:09.000+08:00|2024-11-28T08:00:00.000+08:00| +| null| null| +|2024-11-28T10:00:11.000+08:00|2024-11-28T10:00:00.000+08:00| +|2024-11-28T11:00:12.000+08:00|2024-11-28T11:00:00.000+08:00| +|2024-11-26T13:37:34.000+08:00|2024-11-26T13:00:00.000+08:00| +|2024-11-26T13:38:25.000+08:00|2024-11-26T13:00:00.000+08:00| ++-----------------------------+-----------------------------+ +Total line number = 18 +It costs 0.319s +``` + +### 4.3 Extract 函数 + +该函数用于提取日期对应部分的值。(V2.0.6 版本起支持) + +#### 4.3.1 语法定义 + +```SQL +EXTRACT (identifier FROM expression) +``` +* 参数说明 + * **expression**: `TIMESTAMP` 类型或时间常量 + * **identifier** :取值范围及对应的返回值见下表 + + | 取值范围 | 返回值类型 | 返回值范围 | + | -------------------------- | ------------- | ------------- | + | `YEAR` | `INT64` | `/` | + | `QUARTER` | `INT64` | `1-4` | + | `MONTH` | `INT64` | `1-12` | + | `WEEK` | `INT64` | `1-53` | + | `DAY_OF_MONTH (DAY)` | `INT64` | `1-31` | + | `DAY_OF_WEEK (DOW)` | `INT64` | `1-7` | + | `DAY_OF_YEAR (DOY)` | `INT64` | `1-366` | + | `HOUR` | `INT64` | `0-23` | + | `MINUTE` | `INT64` | `0-59` | + | `SECOND` | `INT64` | `0-59` | + | `MS` | `INT64` | `0-999` | + | `US` | `INT64` | `0-999` | + | `NS` | `INT64` | `0-999` | + + +#### 4.3.2 使用示例 + +以[示例数据](../Reference/Sample-Data.md)中的 table1 为源数据,查询某段时间每天前12个小时的温度平均值 + +```SQL +IoTDB:database1> select format('%1$tY-%1$tm-%1$td',date_bin(1d,time)) as fmtdate,avg(temperature) as avgtp from table1 where time >= 2024-11-26T00:00:00 and time <= 2024-11-30T23:59:59 and extract(hour from time) <= 12 group by date_bin(1d,time) order by date_bin(1d,time) ++----------+-----+ +| fmtdate|avgtp| ++----------+-----+ +|2024-11-28| 86.0| +|2024-11-29| 85.0| +|2024-11-30| 90.0| ++----------+-----+ +Total line number = 3 +It costs 0.041s +``` + +`Format` 函数介绍:[Format 函数](../SQL-Manual/Basis-Function_timecho.md#_7-2-format-函数) + +`Date_bin` 函数介绍:[Date_bin 函数](../SQL-Manual/Basis-Function_timecho.md#_4-2-date-bin-interval-timestamp-timestamp-timestamp) + + +## 5. 数学函数和运算符 + +### 5.1 数学运算符 + +| **运算符** | **描述** | +| ---------- | ------------------------ | +| + | 加法 | +| - | 减法 | +| * | 乘法 | +| / | 除法(整数除法执行截断) | +| % | 模(余数) | +| - | 取反 | + +### 5.2 数学函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------| ---------------------- | ---------- | +| sin | 正弦函数 | double、float、INT64、INT32 | double | sin(x) | +| cos | 余弦函数 | double、float、INT64、INT32 | double | cos(x) | +| tan | 正切函数 | double、float、INT64、INT32 | double | tan(x) | +| asin | 反正弦函数 | double、float、INT64、INT32 | double | asin(x) | +| acos | 反余弦函数 | double、float、INT64、INT32 | double | acos(x) | +| atan | 反正切函数 | double、float、INT64、INT32 | double | atan(x) | +| sinh | 双曲正弦函数 | double、float、INT64、INT32 | double | sinh(x) | +| cosh | 双曲余弦函数 | double、float、INT64、INT32 | double | cosh(x) | +| tanh | 双曲正切函数 | double、float、INT64、INT32 | double | tanh(x) | +| degrees | 将弧度角 x 转换为度 | double、float、INT64、INT32 | double | degrees(x) | +| radians | 将度转换为弧度 | double、float、INT64、INT32 | double | radians(x) | +| abs | 绝对值 | double、float、INT64、INT32 | 返回与输入类型相同的值 | abs(x) | +| sign | 返回 x 的符号函数,即:如果参数为 0,则返回 0,如果参数大于 0,则返回 1,如果参数小于 0,则返回 -1。对于 double/float 类型的参数,函数还会返回:如果参数为 NaN,则返回 NaN,如果参数为 +Infinity,则返回 1.0,如果参数为 -Infinity,则返回 -1.0。 | double、float、INT64、INT32 | 返回与输入类型相同的值 | sign(x) | +| ceil | 返回 x 向上取整到最近的整数。 | double、float、INT64、INT32 | double | ceil(x) | +| floor | 返回 x 向下取整到最近的整数。 | double、float、INT64、INT32 | double | floor(x) | +| exp | 返回欧拉数 e 的 x 次幂。 | double、float、INT64、INT32 | double | exp(x) | +| ln | 返回 x 的自然对数。 | double、float、INT64、INT32 | double | ln(x) | +| log10 | 返回 x 的以 10 为底的对数。 | double、float、INT64、INT32 | double | log10(x) | +| round | 返回 x 四舍五入到最近的整数。 | double、float、INT64、INT32 | double | round(x) | +| round | 返回 x 四舍五入到 d 位小数。 | double、float、INT64、INT32 | double | round(x, d) | +| sqrt | 返回 x 的平方根。 | double、float、INT64、INT32 | double | sqrt(x) | +| e | 自然指数 | | double | e() | +| pi | π | | double | pi() | + + +## 6. 位运算函数 + +> V 2.0.6 版本起支持 + +示例原始数据如下: + +```SQL +IoTDB:database1> select * from bit_table ++-----------------------------+---------+------+-----+ +| time|device_id|length|width| ++-----------------------------+---------+------+-----+ +|2025-10-29T15:59:42.957+08:00| d1| 14| 12| +|2025-10-29T15:58:59.399+08:00| d3| 15| 10| +|2025-10-29T15:59:32.769+08:00| d2| 13| 12| ++-----------------------------+---------+------+-----+ + +--建表语句 +CREATE TABLE bit_table(time TIMESTAMP TIME, device_id STRING TAG, length INT32 FIELD, width INT32 FIELD); + +--写入数据 +INSERT INTO bit_table values(2025-10-29 15:59:42.957, 'd1', 14, 12),(2025-10-29 15:58:59.399, 'd3', 15, 10),(2025-10-29 15:59:32.769, 'd2', 13, 12); +``` + +### 6.1 bit\_count(num, bits) + +`bit_count(num, bits)` 函数用于统计整数 `num`在指定位宽 `bits`下的二进制表示中 1 的个数。 + +#### 6.1.1 语法定义 + +```SQL +bit_count(num, bits) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * **​num:​**任意整型数值(int32 或者 int64) + * **​bits:​**整型数值,取值范围为2\~64 + +注意:如果 bits 位数不够表示 num,会报错(此处是​**有符号补码**​):`Argument exception, the scalar function num must be representable with the bits specified. [num] cannot be represented with [bits] bits.` + +* 调用方式 + * 两个具体数值:`bit_count(9, 64)` + * 列与数值:`bit_count(column1, 64)` + * 两列之间:`bit_count(column1, column2)` + +#### 6.1.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(2,8) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +-- 两个具体数值 +IoTDB:database1> select distinct bit_count(-5,8) from bit_table ++-----+ +|_col0| ++-----+ +| 7| ++-----+ +--列与数值 +IoTDB:database1> select length,bit_count(length,8) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 3| +| 15| 4| +| 13| 3| ++------+-----+ +--bits位数不够 +IoTDB:database1> select length,bit_count(length,2) from bit_table +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Argument exception, the scalar function num must be representable with the bits specified. 13 cannot be represented with 2 bits. +``` + +### 6.2 bitwise\_and(x, y) + +`bitwise_and(x, y)`函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑与操作,并返回其按位与(bitwise AND)的运算结果。 + +#### 6.2.1 语法定义 + +```SQL +bitwise_and(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_and(19, 25)` + * 列与数值:`bitwise_and(column1, 25)` + * 两列之间:`bitwise_and(column1, column2)` + +#### 6.2.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_and(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 17| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_and(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 8| +| 15| 9| +| 13| 9| ++------+-----+ +--俩列之间 +IoTDB:database1> select length, width, bitwise_and(length, width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 12| +| 15| 10| 10| +| 13| 12| 12| ++------+-----+-----+ +``` + +### 6.3 bitwise\_not(x) + +`bitwise_not(x)` 函数基于二进制补码表示法,对整数 x 的每一位进行逻辑非操作,并返回其按位取反(bitwise NOT)的运算结果。 + +#### 6.3.1 语法定义 + +```SQL +bitwise_not(x) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 具体数值:`bitwise_not(5)` + * 单列操作:`bitwise_not(column1)` + +#### 6.3.2 使用示例 + +```SQL +-- 具体数值 +IoTDB:database1> select distinct bitwise_not(5) from bit_table ++-----+ +|_col0| ++-----+ +| -6| ++-----+ +-- 单列 +IoTDB:database1> select length, bitwise_not(length) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| -15| +| 15| -16| +| 13| -14| ++------+-----+ +``` + +### 6.4 bitwise\_or(x, y) + +`bitwise_or(x,y)` 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑或操作,并返回其按位或(bitwise OR)的运算结果。 + +#### 6.4.1 语法定义 + +```SQL +bitwise_or(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_or(19, 25)` + * 列与数值:`bitwise_or(column1, 25)` + * 两列之间:`bitwise_or(column1, column2)` + +#### 6.4.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_or(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 27| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_or(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 31| +| 15| 31| +| 13| 29| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_or(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 14| +| 15| 10| 15| +| 13| 12| 13| ++------+-----+-----+ +``` + +### 6.5 bitwise\_xor(x, y) + +bitwise\_xor(x,y) 函数基于二进制补码表示法,对两个整数 x 和 y 的每一位进行逻辑异或操作,并返回其按位异或(bitwise XOR)的运算结果。异或规则:相同为0,不同为1。 + +#### 6.5.1 语法定义 + +```SQL +bitwise_xor(x, y) -> INT64 --返回结果类型为 Int64 +``` + +* 参数说明 + * ​**x, y**​: 必须是 Int32 或 Int64 数据类型的整数值 +* 调用方式 + * 两个具体数值:`bitwise_xor(19, 25)` + * 列与数值:`bitwise_xor(column1, 25)` + * 两列之间:`bitwise_xor(column1, column2)` + +#### 6.5.2 使用示例 + +```SQL +-- 两个具体数值 +IoTDB:database1> select distinct bitwise_xor(19,25) from bit_table ++-----+ +|_col0| ++-----+ +| 10| ++-----+ +-- 列与数值 +IoTDB:database1> select length,bitwise_xor(length,25) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 23| +| 15| 22| +| 13| 20| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_xor(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 2| +| 15| 10| 5| +| 13| 12| 1| ++------+-----+-----+ +``` + +### 6.6 bitwise\_left\_shift(value, shift) + +`bitwise_left_shift(value, shift)` 函数返回将整数 `value`的二进制表示左移 `shift`位后的结果。左移操作将二进制位向高位方向移动,右侧空出的位用 0 填充,左侧溢出的位直接丢弃。等价于: `value << shift`。 + +#### 6.6.1 语法定义 + +```SQL +bitwise_left_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要左移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 左移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_left_shift(1, 2)` + * 列与数值:`bitwise_left_shift(column1, 2)` + * 两列之间:`bitwise_left_shift(column1, column2)` + +#### 6.6.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_left_shift(1,2) from bit_table ++-----+ +|_col0| ++-----+ +| 4| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_left_shift(length,2) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 56| +| 15| 60| +| 13| 52| ++------+-----+ +-- 两列之间 +IoTDB:database1> select length, width, bitwise_left_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +### 6.7 bitwise\_right\_shift(value, shift) + +`bitwise_right_shift(value, shift)`函数返回将整数 `value`的二进制表示逻辑右移(无符号右移) `shift`位后的结果。逻辑右移操作将二进制位向低位方向移动,左侧空出的高位用 0 填充,右侧溢出的低位直接丢弃。 + +#### 6.7.1 语法定义 + +```SQL +bitwise_right_shift(value, shift) -> [same as value] --返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式 + * 两个具体数值:`bitwise_right_shift(8, 3)` + * 列与数值:`bitwise_right_shift(column1, 3)` + * 两列之间:`bitwise_right_shift(column1, column2)` + +#### 6.7.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift(8,3) from bit_table ++-----+ +|_col0| ++-----+ +| 1| ++-----+ +--列与数值 +IoTDB:database1> select length, bitwise_right_shift(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| +``` + +### 6.8 bitwise\_right\_shift\_arithmetic(value, shift) + +`bitwise_right_shift_arithmetic(value, shift)`函数返回将整数 `value`的二进制表示算术右移 `shift`位后的结果。算术右移操作将二进制位向低位方向移动,右侧溢出的低位直接丢弃,左侧空出的高位用符号位填充(正数补0,负数补1),以保持数值的符号不变。 + +#### 6.8.1 语法定义 + +```SQL +bitwise_right_shift_arithmetic(value, shift) -> [same as value]--返回结果类型与value数据类型相同 +``` + +* 参数说明 + * ​**value**​: 要右移的整数值,必须是 Int32 或 Int64 数据类型 + * ​**shift**​: 右移的位数,必须是 Int32 或 Int64 数据类型 +* 调用方式: + * 两个具体数值:`bitwise_right_shift_arithmetic(12, 2)` + * 列与数值:`bitwise_right_shift_arithmetic(column1, 64)` + * 两列之间:`bitwise_right_shift_arithmetic(column1, column2)` + +#### 6.8.2 使用示例 + +```SQL +--两个具体数值 +IoTDB:database1> select distinct bitwise_right_shift_arithmetic(12,2) from bit_table ++-----+ +|_col0| ++-----+ +| 3| ++-----+ +-- 列与数值 +IoTDB:database1> select length, bitwise_right_shift_arithmetic(length,3) from bit_table ++------+-----+ +|length|_col1| ++------+-----+ +| 14| 1| +| 15| 1| +| 13| 1| ++------+-----+ +--两列之间 +IoTDB:database1> select length, width, bitwise_right_shift_arithmetic(length,width) from bit_table ++------+-----+-----+ +|length|width|_col2| ++------+-----+-----+ +| 14| 12| 0| +| 15| 10| 0| +| 13| 12| 0| ++------+-----+-----+ +``` + +## 7. 条件表达式 + +### 7.1 CASE 表达式 + +CASE 表达式有两种形式:简单形式、搜索形式 + +#### 7.1.1 简单形式 + +简单形式从左到右搜索每个值表达式,直到找到一个与表达式相等的值: + +```SQL +CASE expression + WHEN value THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果找到匹配的值,则返回相应的结果。如果没有找到匹配项,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, + CASE a + WHEN 1 THEN 'one' + WHEN 2 THEN 'two' + ELSE 'many' + END +``` + +#### 7.1.2 搜索形式 + +搜索形式从左到右评估每个布尔条件,直到找到一个为真的条件,并返回相应的结果: + +```SQL +CASE + WHEN condition THEN result + [ WHEN ... ] + [ ELSE result ] +END +``` + +如果没有条件为真,则返回 ELSE 子句中的结果(如果存在),否则返回 null。例如: + +```SQL +SELECT a, b, + CASE + WHEN a = 1 THEN 'aaa' + WHEN b = 2 THEN 'bbb' + ELSE 'ccc' + END +``` + +### 7.2 COALESCE 函数 + +返回参数列表中的第一个非空值。 + +```SQL +coalesce(value1, value2[, ...]) +``` + +## 8. 转换函数 + +### 8.1 转换函数 + +#### 8.1.1 cast(value AS type) → type + +1. 显式地将一个值转换为指定类型。 +2. 可以用于将字符串(varchar)转换为数值类型,或数值转换为字符串类型,V2.0.8 版本起支持 OBJECT 类型强转成 STRING 类型。 +3. 如果转换失败,将抛出运行时错误。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE CAST(time AS DATE) + IN (CAST('2024-11-27' AS DATE), CAST('2024-11-28' AS DATE)); +``` + +#### 8.1.2 try_cast(value AS type) → type + +1. 与 `cast()` 类似。 +2. 如果转换失败,则返回 `null`。 + +示例: + +```SQL +SELECT * + FROM table1 + WHERE try_cast(time AS DATE) + IN (try_cast('2024-11-27' AS DATE), try_cast('2024-11-28' AS DATE)); +``` + +### 8.2 Format 函数 +该函数基于指定的格式字符串与输入参数,生成并返回格式化后的字符串输出。其功能与 Java 语言中的`String.format` 方法及 C 语言中的`printf`函数相类似,支持开发者通过占位符语法构建动态字符串模板,其中预设的格式标识符将被传入的对应参数值精准替换,最终形成符合特定格式要求的完整字符串。 + +#### 8.2.1 语法介绍 + +```SQL +format(pattern,...args) -> String +``` + +**参数定义** + +* `pattern`: 格式字符串,可包含静态文本及一个或多个格式说明符(如 `%s`, `%d` 等),或任意返回类型为 `STRING/TEXT` 的表达式。 +* `args`: 用于替换格式说明符的输入参数。需满足以下条件: + * 参数数量 ≥ 1 + * 若存在多个参数,以逗号`,`分隔(如 `arg1,arg2`) + * 参数总数可多于 `pattern` 中的占位符数量,但不可少于,否则触发异常 + +**返回值** + +* 类型为 `STRING` 的格式化结果字符串 + +#### 8.2.2 使用示例 + +1. 格式化浮点数 + +```SQL +IoTDB:database1> select format('%.5f',humidity) from table1 where humidity = 35.4 ++--------+ +| _col0| ++--------+ +|35.40000| ++--------+ +``` + +2. 格式化整数 + +```SQL +IoTDB:database1> select format('%03d',8) from table1 limit 1 ++-----+ +|_col0| ++-----+ +| 008| ++-----+ +``` + +3. 格式化日期和时间戳 + +* Locale-specific日期 + +```SQL +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', 2024-01-01) from table1 limit 1 ++--------------------+ +| _col0| ++--------------------+ +|星期一, 一月 1, 2024| ++--------------------+ +``` + +* 去除时区信息 + +```SQL +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-----------------------+ +| _col0| ++-----------------------+ +|2024-01-01 00:00:00.000| ++-----------------------+ +``` + +* 获取秒级时间戳精度 + +```SQL +IoTDB:database1> SELECT format('%1$tF %1$tT', 2024-01-01T00:00:00.000+08:00) from table1 limit 1 ++-------------------+ +| _col0| ++-------------------+ +|2024-01-01 00:00:00| ++-------------------+ +``` + +* 日期符号说明如下 + +| **符号** | **​ 描述** | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 'H' | 24 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 00 - 23。 | +| 'I' | 12 小时制的小时数,格式为两位数,必要时加上前导零,i.e. 01 - 12。 | +| 'k' | 24 小时制的小时数,i.e. 0 - 23。 | +| 'l' | 12 小时制的小时数,i.e. 1 - 12。 | +| 'M' | 小时内的分钟,格式为两位数,必要时加上前导零,i.e. 00 - 59。 | +| 'S' | 分钟内的秒数,格式为两位数,必要时加上前导零,i.e. 00 - 60(“60 ”是支持闰秒所需的特殊值)。 | +| 'L' | 秒内毫秒,格式为三位数,必要时加前导零,i.e. 000 - 999。 | +| 'N' | 秒内的纳秒,格式为九位数,必要时加前导零,i.e. 000000000 - 999999999。 | +| 'p' | 当地特定的[上午或下午](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getAmPmStrings())标记,小写,如 “am ”或 “pm”。使用转换前缀 “T ”会强制输出为大写。 | +| 'z' | 从格林尼治标准时间偏移的[RFC 822](http://www.ietf.org/rfc/rfc0822.txt)式数字时区,例如 -0800。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是 Java 虚拟机此实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。 | +| 'Z' | 表示时区缩写的字符串。该值将根据夏令时的需要进行调整。对于 long、[Long](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/lang/Long.html)和[Date](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/Date.html),使用的时区是此 Java 虚拟机实例的[默认时区](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/util/TimeZone.html#getDefault())。Formatter 的时区将取代参数的时区(如果有)。 | +| 's' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的秒数,i.e. Long.MIN\_VALUE/1000 至 Long.MAX\_VALUE/1000。 | +| 'Q' | 自 1970 年 1 月 1 日 00:00:00 UTC 开始的纪元起的毫秒数,i.e. Long.MIN\_VALUE 至 Long.MAX\_VALUE。 | + +* 用于格式化常见的日期/时间组成的转换字符说明如下 + +| **符号** | **描述** | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 'B' | 特定于区域设置[的完整月份名称](https://docs.oracle.com/en/java/javase/23/docs/api/java.base/java/text/DateFormatSymbols.html#getMonths()),例如 “January”、“February”。 | +| 'b' | 当地特定月份的缩写名称,如"1 月"、"2 月"。 | +| 'h' | 与"b "相同。 | +| 'A' | 一周中某一天在当地的全称,如"星期日"、"星期一"。 | +| 'a' | 当地特有的星期简短名称,例如"星期日"、"星期一 | +| 'C' | 四位数年份除以100,格式为两位数,必要时加上前导零,即00 - 99 | +| 'Y' | 年份,格式为至少四位数,必要时加上前导零,例如0092相当于公历92年。 | +| 'y' | 年份的最后两位数,格式为必要的前导零,即00 - 99。 | +| 'j' | 年号,格式为三位数,必要时加前导零,例如公历为001 - 366。 | +| 'm' | 月份,格式为两位数,必要时加前导零,即01 - 13。 | +| 'd' | 月日,格式为两位数,必要时加前导零,即01 - 31 | +| 'e' | 月日,格式为两位数,即1 - 31。 | + +4. 格式化字符串 + +```SQL +IoTDB:database1> SELECT format('The measurement status is :%s',status) FROM table2 limit 1 ++-------------------------------+ +| _col0| ++-------------------------------+ +|The measurement status is :true| ++-------------------------------+ +``` + +5. 格式化百分号 + +```SQL +IoTDB:database1> SELECT format('%s%%', 99.9) from table1 limit 1 ++-----+ +|_col0| ++-----+ +|99.9%| ++-----+ +``` + +#### 8.2.3 **格式转换失败场景说明** + +1. 类型不匹配错误 + +* 时间戳类型冲突 若格式说明符中包含时间相关标记(如 `%Y-%m-%d`),但参数提供: + * 非 `DATE`/`TIMESTAMP` 类型值 + * 或涉及日期细粒度单位(如 `%H` 小时、`%M` 分钟)时,参数仅支持 `TIMESTAMP` 类型,否则将抛出类型异常 + +```SQL +-- 示例1 +IoTDB:database1> SELECT format('%1$tA, %1$tB %1$te, %1$tY', humidity) from table2 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tA, %1$tB %1$te, %1$tY (IllegalFormatConversion: A != java.lang.Float) + +-- 示例2 +IoTDB:database1> SELECT format('%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL', humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL (IllegalFormatConversion: Y != java.lang.Float) +``` + +* 浮点数类型冲突 若使用 `%f` 等浮点格式说明符,但参数提供非数值类型(如字符串、布尔值),将触发类型转换错误 + +```SQL +IoTDB:database1> select format('%.5f',status) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f (IllegalFormatConversion: f != java.lang.Boolean) +``` + +2. 参数数量不匹配错误 + +* 实际提供的参数数量 必须等于或大于 格式字符串中格式说明符的数量 +* 若参数数量少于格式说明符数量,将抛出 `ArgumentCountMismatch` 异常 + +```SQL +IoTDB:database1> select format('%.5f %03d', humidity) from table1 where humidity = 35.4 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Invalid format string: %.5f %03d (MissingFormatArgument: Format specifier '%03d') +``` + +3. 无效调用错误 + +* 当函数参数满足以下任一条件时,视为非法调用: + * 参数总数 小于 2(必须包含格式字符串及至少一个参数) + * 格式字符串(`pattern`)类型非 `STRING/TEXT` + +```SQL +-- 示例1 +IoTDB:database1> select format('%s') from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. + +--示例2 +IoTDB:database1> select format(123, humidity) from table1 limit 1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Scalar function format must have at least two arguments, and first argument pattern must be TEXT or STRING type. +``` + + + +## 9. 字符串函数和操作符 + +### 9.1 字符串操作符 + +#### 9.1.1 || 操作符 + +`||` 操作符用于字符串连接,功能与 `concat` 函数相同。 + +#### 9.1.2 LIKE 语句 + +`LIKE` 语句用于模式匹配,具体用法在[模式匹配:LIKE](#1-like-运算符) 中有详细文档。 + +### 9.2 字符串函数 + +| 函数名 | 描述 | 输入 | 输出 | 用法 | +| ----------- |---------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| ------------------------------------------------------------ | ------------------------------------------------------------ | +| length | 返回字符串的字符长度,而不是字符数组的长度。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | INT32 | length(string) | +| upper | 将字符串中的字母转换为大写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | upper(string) | +| lower | 将字符串中的字母转换为小写。 | 支持一个参数,类型可以是字符串或文本。**string**:要计算长度的字符串。 | String | lower(string) | +| trim | 从源字符串中删除指定的开头和/或结尾字符。 | 支持三个参数**specification(可选)**:指定从哪边去掉字符,可以是:`BOTH`:两边都去掉(默认)。`LEADING`:只去掉开头的字符。`TRAILING`:只去掉结尾的字符。**trimcharacter(可选)**:要去掉的字符,如果没指定,默认去掉空格。**string**:要处理的字符串。 | String | trim([ [ specification ] [ trimcharacter ] FROM ] string) 示例:`trim('!' FROM '!foo!');` —— `'foo'` | +| strpos | 返回子字符串在字符串中第一次出现的起始位置。位置从 1 开始计数。如果未找到,返回 0。注意:起始位置是基于字符而不是字节数组确定的。 | 仅支持两个参数,类型可以是字符串或文本。**sourceStr**:要搜索的字符串。**subStr**:要找的子字符串。 | INT32 | strpos(sourceStr, subStr) | +| starts_with | 测试子字符串是否是字符串的前缀。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串,类型可以是字符串或文本。**prefix**:前缀子字符串,类型可以是字符串或文本。 | Boolean | starts_with(sourceStr, prefix) | +| ends_with | 测试字符串是否以指定的后缀结束。 | 支持两个参数,类型可以是字符串或文本。**sourceStr**:要检查的字符串。**suffix**:后缀子字符串。 | Boolean | ends_with(sourceStr, suffix) | +| concat | 返回字符串 `string1`、`string2`、...、`stringN` 的连接结果。功能与连接操作符 `\|\|` 相同。 | 至少两个参数,所有参数类型必须是字符串或文本。 | String | concat(str1, str2, ...) 或 str1 \|\| str2 ... | +| strcmp | 比较两个字符串的字母序。 | 支持两个参数,两个参数类型必须是字符串或文本。**string1**:第一个要比较的字符串。**string2**:第二个要比较的字符串。 | 返回一个整数值INT32如果 `str1 < str2`,返回 `-1`如果 `str1 = str2`,返回 `0`如果 `str1 > str2`,返回 `1`如果 `str1` 或 `str2` 为 `NULL`,返回 `NULL` | strcmp(str1, str2) | +| replace | 从字符串中删除所有 `search` 的实例。 | 支持两个参数,可以是字符串或文本类型。**string**:原始字符串,要从中删除内容的字符串。**search**:要删除的子字符串。 | String | replace(string, string) | +| replace | 将字符串中所有 `search` 的实例替换为 `replace`。 | 支持三个参数,可以是字符串或文本类型。**string**:原始字符串,要从中替换内容的字符串。**search**:要替换掉的子字符串。**replace**:用来替换的新字符串。 | String | replace(string, string, string) | +| substring | 从指定位置提取字符到字符串末尾。需要注意的是,起始位置是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持两个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。 | String:返回一个字符串,从 `start_index` 位置开始到字符串末尾的所有字符。**注意事项**:`start_index` 从 1 开始,即数组的第 0 个位置是 1参数为 null时,返回 `null`start_index 大于字符串长度时,结果报错。 | substring(string from start_index)或 substring(string, start_index) | +| substring | 从一个字符串中提取从指定位置开始、指定长度的子字符串注意:起始位置和长度是基于字符而不是字节数组确定的。`start_index` 从 1 开始计数,长度从 `start_index` 位置计算。 | 支持三个参数**string**:要提取子字符串的源字符串,可以是字符串或文本类型。**start_index**:从哪个索引开始提取子字符串,索引从 1 开始计数。**length**:要提取的子字符串的长度。 | String:返回一个字符串,从 `start_index` 位置开始,提取 `length` 个字符。**注意事项**:参数为 null时,返回 `null`如果 `start_index` 大于字符串的长度,结果报错。如果 `length` 小于 0,结果报错。极端情况,`start_index + length` 超过 `int.MAX` 并变成负数,将导致异常结果。 | substring(string from start_index for length) 或 substring(string, start_index, length) | + +## 10. 模式匹配函数 + +### 10.1 LIKE 运算符 + +#### 10.1.1 用途 + +`LIKE` 运算符用于将值与模式进行比较。它通常用于 `WHERE` 子句中,用于匹配字符串中的特定模式。 + +#### 10.1.2 语法 + +```SQL +... column [NOT] LIKE 'pattern' ESCAPE 'character'; +``` + +#### 10.1.3 匹配规则 + +- 匹配字符是区分大小写的。 +- 模式支持两个匹配符号: + - `_`:匹配任意单个字符。 + - `%`:匹配0个或多个字符。 + +#### 10.1.4 注意事项 + +- `LIKE` 模式匹配总是覆盖整个字符串。如果需要匹配字符串中的任意位置,模式必须以 `%` 开头和结尾。 +- 如果需要匹配 `%` 或 `_` 作为普通字符,必须使用转义字符。 + +#### 10.1.5 示例 + +示例 1:匹配以特定字符开头的字符串 + +- **说明**:查找所有以字母 `E` 开头的名称,例如 `Europe`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'E%'; +``` + +示例 2:排除特定模式 + +- **说明**:查找所有不以字母 `E` 开头的名称。 + +```SQL +SELECT * FROM table1 WHERE continent NOT LIKE 'E%'; +``` + +示例 3:匹配特定长度的字符串 + +- **说明**:查找所有以 `A` 开头、以 `a` 结尾且中间有两个字符的名称,例如 `Asia`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'A__a'; +``` + +示例 4:转义特殊字符 + +- **说明**:查找所有以 `South_` 开头的名称。这里使用了转义字符 `\` 来转义 `_` 等特殊字符,例如`South_America`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\_%' ESCAPE '\'; +``` + +示例 5:匹配转义字符本身 + +- **说明**:如果需要匹配转义字符本身,可以使用双转义字符 `\\`。 + +```SQL +SELECT * FROM table1 WHERE continent LIKE 'South\\%' ESCAPE '\'; +``` + +### 10.2 regexp_like 函数 + +#### 10.2.1 用途 + +`regexp_like` 函数用于评估正则表达式模式,并确定该模式是否包含在字符串中。 + +#### 10.2.2 语法 + +```SQL +regexp_like(string, pattern); +``` + +#### 10.2.3 注意事项 + +- `regexp_like` 的模式只需包含在字符串中,而不需要匹配整个字符串。 +- 如果需要匹配整个字符串,可以使用正则表达式的锚点 `^` 和 `$`。 +- `^` 表示“字符串的开头”,`$` 表示“字符串的结尾”。 +- 正则表达式采用 Java 定义的正则语法,但存在以下需要注意的例外情况: + - **多行模式** + 1. 启用方式:`(?m)`。 + 2. 只识别`\n`作为行终止符。 + 3. 不支持`(?d)`标志,且禁止使用。 + - **不区分大小写匹配** + 1. 启用方式:`(?i)`。 + 2. 基于Unicode规则,不支持上下文相关和本地化匹配。 + 3. 不支持`(?u)`标志,且禁止使用。 + - **字符类** + 1. 在字符类(如`[A-Z123]`)中,`\Q`和`\E`不被支持,被视为普通字面量。 + - **Unicode字符类(**`\p{prop}`**)** + 1. **名称下划线**:名称中的所有下划线必须删除(如`OldItalic`而非`Old_Italic`)。 + 2. **文字(Scripts)**:直接指定,无需`Is`、`script=`或`sc=`前缀(如`\p{Hiragana}`)。 + 3. **区块(Blocks)**:必须使用`In`前缀,不支持`block=`或`blk=`前缀(如`\p{InMongolian}`)。 + 4. **类别(Categories)**:直接指定,无需`Is`、`general_category=`或`gc=`前缀(如`\p{L}`)。 + 5. **二元属性(Binary Properties)**:直接指定,无需`Is`(如`\p{NoncharacterCodePoint}`)。 + +#### 10.2.4 示例 + +示例 1:匹配包含特定模式的字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '\\d+b'); -- true +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否包含模式 `\d+b`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - 在 `'1a 2b 14m'` 中,`2b` 符合这个模式,所以返回 `true`。 + +示例 2:匹配整个字符串 + +```SQL +SELECT regexp_like('1a 2b 14m', '^\\d+b$'); -- false +``` + +- **说明**:检查字符串 `'1a 2b 14m'` 是否完全匹配模式 `^\\d+b$`。 + - `\d+` 表示“一个或多个数字”。 + - `b` 表示字母 `b`。 + - `'1a 2b 14m'` 并不符合这个模式,因为它不是从数字开始,也不是以 `b` 结束,所以返回 `false`。 + +## 11. 时序分窗函数 + +原始示例数据如下: + +```SQL +IoTDB> SELECT * FROM bid; ++-----------------------------+--------+-----+ +| time|stock_id|price| ++-----------------------------+--------+-----+ +|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+--------+-----+ + +-- 创建语句 +CREATE TABLE bid(time TIMESTAMP TIME, stock_id STRING TAG, price FLOAT FIELD); +-- 插入数据 +INSERT INTO bid(time, stock_id, price) VALUES('2021-01-01T09:05:00','AAPL',100.0),('2021-01-01T09:06:00','TESL',200.0),('2021-01-01T09:07:00','AAPL',103.0),('2021-01-01T09:07:00','TESL',202.0),('2021-01-01T09:09:00','AAPL',102.0),('2021-01-01T09:15:00','TESL',195.0); +``` + +### 11.1 HOP + +#### 11.1.1 功能描述 + +HOP 函数用于按时间分段分窗分析,识别每一行数据所属的时间窗口。该函数通过指定固定窗口大小(size)和窗口滑动步长(SLIDE),将数据按时间戳分配到所有与其时间戳重叠的窗口中。若窗口之间存在重叠(步长 < 窗口大小),数据会自动复制到多个窗口。 + +#### 11.1.2 函数定义 + +```SQL +HOP(data, timecol, size, slide[, origin]) +``` + +#### 11.1.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | +| SLIDE | 标量参数 | 长整数类型 | 窗口滑动步长 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.1.4 返回结果 + +HOP 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.1.5 使用示例 + +```SQL +IoTDB> SELECT * FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM HOP(DATA => bid,TIMECOL => 'time',SLIDE => 5m,SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:25:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:15:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.2 SESSION + +#### 11.2.1 功能描述 + +SESSION 函数用于按会话间隔对数据进行分窗。系统逐行检查与前一行的时间间隔,小于阈值(GAP)则归入当前窗口,超过则归入下一个窗口。 + +#### 11.2.2 函数定义 + +```SQL +SESSION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], timecol, gap) +``` +#### 11.2.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| TIMECOL | 标量参数 | 字符串类型默认值:'time' | 时间列名 +| +| GAP | 标量参数 | 长整数类型 | 会话间隔阈值 | + +#### 11.2.4 返回结果 + +SESSION 函数的返回结果列包含: + +* window\_start: 会话窗口内的第一条数据的时间 +* window\_end: 会话窗口内的最后一条数据的时间 +* 映射列:DATA 参数的所有输入列 + +#### 11.2.5 使用示例 + +```SQL +IoTDB> SELECT * FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY SESSION +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM SESSION(DATA => bid PARTITION BY stock_id ORDER BY time,TIMECOL => 'time',GAP => 2m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL| 201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL| 195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.3 VARIATION + +#### 11.3.1 功能描述 + +VARIATION 函数用于按数据差值分窗,将第一条数据作为首个窗口的基准值,每个数据点会与基准值进行差值运算,如果差值小于给定的阈值(delta)则加入当前窗口;如果超过阈值,则分为下一个窗口,将该值作为下一个窗口的基准值。 + +#### 11.3.2 函数定义 + +```sql +VARIATION(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], col, delta) +``` + +#### 11.3.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| COL | 标量参数 | 字符串类型 | 标识对哪一列计算差值 | +| DELTA | 标量参数 | 浮点数类型 | 差值阈值 | + +#### 11.3.4 返回结果 + +VARIATION 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.3.5 使用示例 + +```sql +IoTDB> SELECT * FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price',DELTA => 2.0); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 1|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY VARIATION +IoTDB> SELECT first(time) as window_start, last(time) as window_end, stock_id, avg(price) as avg FROM VARIATION(DATA => bid PARTITION BY stock_id ORDER BY time,COL => 'price', DELTA => 2.0) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:07:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.5| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.4 CAPACITY + +#### 11.4.1 功能描述 + +CAPACITY 函数用于按数据点数(行数)分窗,每个窗口最多有 SIZE 行数据。 + +#### 11.4.2 函数定义 + +```sql +CAPACITY(data [PARTITION BY(pkeys, ...)] [ORDER BY(okeys, ...)], size) +``` + +#### 11.4.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| -------- | ---------- | -------------------------- | ---------------------------------------- | +| DATA | 表参数 | SET SEMANTICPASS THROUGH | 输入表通过 pkeys、okeys 指定分区和排序 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小 | + +#### 11.4.4 返回结果 + +CAPACITY 函数的返回结果列包含: + +* window\_index: 窗口编号 +* 映射列:DATA 参数的所有输入列 + +#### 11.4.5 使用示例 + +```sql +IoTDB> SELECT * FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2); ++------------+-----------------------------+--------+-----+ +|window_index| time|stock_id|price| ++------------+-----------------------------+--------+-----+ +| 0|2021-01-01T09:06:00.000+08:00| TESL|200.0| +| 0|2021-01-01T09:07:00.000+08:00| TESL|202.0| +| 1|2021-01-01T09:15:00.000+08:00| TESL|195.0| +| 0|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +| 0|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +| 1|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY COUNT +IoTDB> SELECT first(time) as start_time, last(time) as end_time, stock_id, avg(price) as avg FROM CAPACITY(DATA => bid PARTITION BY stock_id ORDER BY time, SIZE => 2) GROUP BY window_index, stock_id; ++-----------------------------+-----------------------------+--------+-----+ +| start_time| end_time|stock_id| avg| ++-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:06:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|201.0| +|2021-01-01T09:15:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:05:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|101.5| +|2021-01-01T09:09:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+--------+-----+ +``` + +### 11.5 TUMBLE + +#### 11.5.1 功能描述 + +TUMBLE 函数用于通过时间属性字段为每行数据分配一个窗口,滚动窗口的大小固定且不重复。 + +#### 11.5.2 函数定义 + +```sql +TUMBLE(data, timecol, size[, origin]) +``` +#### 11.5.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +#### 11.5.4 返回结果 + +TUBMLE 函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.5.5 使用示例 + +```SQL +IoTDB> SELECT * FROM TUMBLE( DATA => bid, TIMECOL => 'time', SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM TUMBLE(DATA => bid, TIMECOL => 'time', SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` + +### 11.6 CUMULATE + +#### 11.6.1 功能描述 + +Cumulate 函数用于从初始的窗口开始,创建相同窗口开始但窗口结束步长不同的窗口,直到达到最大的窗口大小。每个窗口包含其区间内的元素。例如:1小时步长,24小时大小的累计窗口,每天可以获得如下这些窗口:`[00:00, 01:00)`,`[00:00, 02:00)`,`[00:00, 03:00)`, …, `[00:00, 24:00)` + +#### 11.6.2 函数定义 + +```sql +CUMULATE(data, timecol, size, step[, origin]) +``` + +#### 11.6.3 参数说明 + +| 参数名 | 参数类型 | 参数属性 | 描述 | +| --------- | ---------- | --------------------------------- | -------------------------------------------- | +| DATA | 表参数 | ROW SEMANTICPASS THROUGH | 输入表 | +| TIMECOL | 标量参数 | 字符串类型默认值:time | 时间列 | +| SIZE | 标量参数 | 长整数类型 | 窗口大小,SIZE必须是STEP的整数倍,需为正数 | +| STEP | 标量参数 | 长整数类型 | 窗口步长,需为正数 | +| ORIGIN | 标量参数 | 时间戳类型默认值:Unix 纪元时间 | 第一个窗口起始时间 | + +> 注意:size 如果不是 step 的整数倍,则会报错`Cumulative table function requires size must be an integral multiple of step` + +#### 11.6.4 返回结果 + +CUMULATE函数的返回结果列包含: + +* window\_start: 窗口开始时间(闭区间) +* window\_end: 窗口结束时间(开区间) +* 映射列:DATA 参数的所有输入列 + +#### 11.6.5 使用示例 + +```sql +IoTDB> SELECT * FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m,SIZE => 10m); ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +| window_start| window_end| time|stock_id|price| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:06:00.000+08:00| TESL|200.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| TESL|202.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00|2021-01-01T09:15:00.000+08:00| TESL|195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:05:00.000+08:00| AAPL|100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:07:00.000+08:00| AAPL|103.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00|2021-01-01T09:09:00.000+08:00| AAPL|102.0| ++-----------------------------+-----------------------------+-----------------------------+--------+-----+ + +-- 结合 GROUP BY 语句,等效于树模型的 GROUP BY TIME +IoTDB> SELECT window_start, window_end, stock_id, avg(price) as avg FROM CUMULATE(DATA => bid,TIMECOL => 'time',STEP => 2m, SIZE => 10m) GROUP BY window_start, window_end, stock_id; ++-----------------------------+-----------------------------+--------+------------------+ +| window_start| window_end|stock_id| avg| ++-----------------------------+-----------------------------+--------+------------------+ +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| TESL| 201.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| TESL| 201.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:16:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:18:00.000+08:00| TESL| 195.0| +|2021-01-01T09:10:00.000+08:00|2021-01-01T09:20:00.000+08:00| TESL| 195.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:06:00.000+08:00| AAPL| 100.0| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:08:00.000+08:00| AAPL| 101.5| +|2021-01-01T09:00:00.000+08:00|2021-01-01T09:10:00.000+08:00| AAPL|101.66666666666667| ++-----------------------------+-----------------------------+--------+------------------+ +``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md index d9dd63cac..308c527a4 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Featured-Functions_timecho.md @@ -822,7 +822,7 @@ IoTDB> SELECT *,count(flow) OVER(PARTITION BY device ORDER BY flow RANGE BETWEEN ## 5. Object 类型读取函数 描述:用于读取 OBJECT 对象的二进制内容。返回 BLOB 类型(对象的二进制内容)。 -> V2.0.8-beta 版本起支持 +> V2.0.8 版本起支持 语法: diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md index 7b05473a5..1d89c3fc7 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Fill-Clause.md @@ -64,7 +64,7 @@ intervalField IoTDB 支持以下三种空值填充方式: -1. **`PREVIOUS`填充**:使用该列前一个非空值进行填充,V2.0.8-beta 版本起仅该方式支持支持 OBJECT 类型。 +1. **`PREVIOUS`填充**:使用该列前一个非空值进行填充,V2.0.8 版本起仅该方式支持支持 OBJECT 类型。 2. **`LINEAR`填充**:使用该列前一个非空值和下一个非空值的线性插值进行填充。 3. **`Constant`填充**:使用指定的常量值进行填充。 diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md index 00efaff14..35fee992c 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements.md @@ -1,3 +1,6 @@ +--- +redirectTo: SQL-Maintenance-Statements_apache.html +--- - -# 运维语句 - -## 1. 状态查看 - -### 1.1 查看当前的树/表模型 - -**语法:** - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 1.2 查看登录的用户名 - -**语法:** - -```SQL -showCurrentUserStatement - : SHOW CURRENT_USER - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_USER -+-----------+ -|CurrentUser| -+-----------+ -| root| -+-----------+ -``` - -### 1.3 查看连接的数据库名 - -**语法:** - -```SQL -showCurrentDatabaseStatement - : SHOW CURRENT_DATABASE - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| null| -+---------------+ - -IoTDB> USE test; - -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| test| -+---------------+ -``` - -### 1.4 查看集群版本 - -**语法:** - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW VERSION -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.5 查看集群关键参数 - -**语法:** - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW VARIABLES -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.6 查看集群ID - -**语法:** - -```SQL -showClusterIdStatement - : SHOW (CLUSTERID | CLUSTER_ID) - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CLUSTER_ID -+------------------------------------+ -| ClusterId| -+------------------------------------+ -|40163007-9ec1-4455-aa36-8055d740fcda| -``` - -### 1.7 查看服务器的时间 - -查看客户端直连的 DataNode 进程所在的服务器的时间 - -**语法:** - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - -### 1.8 查看分区信息 - -**含义**:返回当前集群的分区信息。 - -#### 语法: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW REGIONS -``` - -执行结果如下: - -```SQL -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | -| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| -| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| -+--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.9 查看可用节点 - -**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 - -> V2.0.8-beta 起支持该功能 - -#### 语法: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -执行结果如下: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. 状态设置 - -### 2.1 设置连接的树/表模型 - -**语法:** - -```SQL -SET SQL_DIALECT EQ (TABLE | TREE) -``` - -**示例:** - -```SQL -IoTDB> SET SQL_DIALECT=TABLE -IoTDB> SHOW CURRENT_SQL_DIALECT -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TABLE| -+-----------------+ -``` - -### 2.2 更新配置项 - -**语法:** - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**示例:** - -```SQL -IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; -``` - -### 2.3 读取手动修改的配置文件 - -**语法:** - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 设置系统的状态 - -**语法:** - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - -## 3. 数据管理 - -### 3.1 将内存表中的数据刷到磁盘 - -**语法:** - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> FLUSH test_db TRUE ON LOCAL; -``` - -### 3.2 清除 DataNode 上的缓存 - -**语法:** - -```SQL -clearCacheStatement - : CLEAR clearCacheOptions? CACHE localOrClusterMode? - ; - -clearCacheOptions - : ATTRIBUTE - | QUERY - | ALL - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> CLEAR ALL CACHE ON LOCAL; -``` - -## 4. 数据修复 - -### 4.1 启动后台扫描并修复 tsfile 任务 - -**语法:** - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 暂停后台修复 tsfile 任务 - -**语法:** - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**示例:** - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. 查询相关 - -### 5.1 查看正在执行的查询 - -**语法:** - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**示例:** - -```SQL -IoTDB> SHOW QUERIES WHERE elapsed_time > 30 -+-----------------------+-----------------------------+-----------+------------+------------+----+ -| query_id| start_time|datanode_id|elapsed_time| statement|user| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| -+-----------------------+-----------------------------+-----------+------------+------------+----+ -``` - -### 5.2 主动终止查询 - -**语法:** - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**示例:** - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query -IoTDB> KILL ALL QUERIES; -- 终止所有query -``` - -### 5.3 查询性能分析 - -#### 5.3.1 查看执行计划 - -**语法:** - -```SQL -EXPLAIN -``` - -更多详细语法说明请参考:[EXPLAIN 语句](../User-Manual/Query-Performance-Analysis.md#_1-explain-语句) - -**示例:** - -```SQL -IoTDB> explain select * from t1 -+-----------------------------------------------------------------------------------------------+ -| distribution plan| -+-----------------------------------------------------------------------------------------------+ -| ┌─────────────────────────────────────────────┐ | -| │OutputNode-4 │ | -| │OutputColumns-[time, device_id, type, speed] │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| │ | -| │ | -| ┌─────────────────────────────────────────────┐ | -| │Collect-21 │ | -| │OutputSymbols: [time, device_id, type, speed]│ | -| └─────────────────────────────────────────────┘ | -| ┌───────────────────────┴───────────────────────┐ | -| │ │ | -|┌─────────────────────────────────────────────┐ ┌───────────┐ | -|│TableScan-19 │ │Exchange-28│ | -|│QualifiedTableName: test.t1 │ └───────────┘ | -|│OutputSymbols: [time, device_id, type, speed]│ │ | -|│DeviceNumber: 1 │ │ | -|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| -|│PushDownOffset: 0 │ │TableScan-20 │| -|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| -|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| -|│RegionId: 2 │ │DeviceNumber: 1 │| -|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| -| │PushDownOffset: 0 │| -| │PushDownLimit: 0 │| -| │PushDownLimitToEachDevice: false │| -| │RegionId: 1 │| -| └─────────────────────────────────────────────┘| -+-----------------------------------------------------------------------------------------------+ -``` - -#### 5.3.2 查询性能分析 - -**语法:** - -```SQL -EXPLAIN ANALYZE [VERBOSE] -``` - -更多详细语法说明请参考:[EXPLAIN ANALYZE 语句](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-语句) - -**示例:** - -```SQL -IoTDB> explain analyze verbose select * from t1 -+-----------------------------------------------------------------------------------------------+ -| Explain Analyze| -+-----------------------------------------------------------------------------------------------+ -|Analyze Cost: 38.860 ms | -|Fetch Partition Cost: 9.888 ms | -|Fetch Schema Cost: 54.046 ms | -|Logical Plan Cost: 10.102 ms | -|Logical Optimization Cost: 17.396 ms | -|Distribution Plan Cost: 2.508 ms | -|Dispatch Cost: 22.126 ms | -|Fragment Instances Count: 2 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| -| Total Wall Time: 18 ms | -| Cost of initDataQuerySource: 6.153 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.294 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.047 | -| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 5.523 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 31]: CollectNode(CollectOperator) | -| CPU Time: 5.512 ms | -| output: 2 rows | -| HasNext() Called Count: 6 | -| Next() Called Count: 5 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 29]: TableScanNode(TableScanOperator) | -| CPU Time: 5.439 ms | -| output: 1 rows | -| HasNext() Called Count: 3 -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | -| CPU Time: 0.053 ms | -| output: 1 rows | -| HasNext() Called Count: 2 | -| Next() Called Count: 1 | -| Estimated Memory Size: : 131072 | -| | -|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| -| Total Wall Time: 13 ms | -| Cost of initDataQuerySource: 5.725 ms | -| Seq File(unclosed): 1, Seq File(closed): 0 | -| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | -| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | -| Query Statistics: | -| loadBloomFilterFromCacheCount: 0 | -| loadBloomFilterFromDiskCount: 0 | -| loadBloomFilterActualIOSize: 0 | -| loadBloomFilterTime: 0.000 | -| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | -| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | -| loadTimeSeriesMetadataFromCacheCount: 0 | -| loadTimeSeriesMetadataFromDiskCount: 0 | -| loadTimeSeriesMetadataActualIOSize: 0 | -| constructAlignedChunkReadersMemCount: 1 | -| constructAlignedChunkReadersMemTime: 0.001 | -| loadChunkFromCacheCount: 0 | -| loadChunkFromDiskCount: 0 | -| loadChunkActualIOSize: 0 | -| pageReadersDecodeAlignedMemCount: 1 | -| pageReadersDecodeAlignedMemTime: 0.007 | -| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | -| CPU Time: 0.270 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| [PlanNodeId 30]: TableScanNode(TableScanOperator) | -| CPU Time: 0.250 ms | -| output: 1 rows | -| HasNext() Called Count: 3 | -| Next() Called Count: 2 | -| Estimated Memory Size: : 327680 | -| DeviceNumber: 1 | -| CurrentDeviceIndex: 0 | -+-----------------------------------------------------------------------------------------------+ -``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_apache.md b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_apache.md new file mode 100644 index 000000000..00efaff14 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_apache.md @@ -0,0 +1,651 @@ + + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看当前的树/表模型 + +**语法:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 查看登录的用户名 + +**语法:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 查看连接的数据库名 + +**语法:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 查看集群版本 + +**语法:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 查看集群关键参数 + +**语法:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 查看集群ID + +**语法:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 查看服务器的时间 + +查看客户端直连的 DataNode 进程所在的服务器的时间 + +**语法:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.8 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8-beta 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的树/表模型 + +**语法:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**示例:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**语法:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**示例:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**语法:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**语法:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. 数据管理 + +### 3.1 将内存表中的数据刷到磁盘 + +**语法:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 清除 DataNode 上的缓存 + +**语法:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**语法:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**语法:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 查询相关 + +### 5.1 查看正在执行的查询 + +**语法:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 主动终止查询 + +**语法:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**示例:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` + +### 5.3 查询性能分析 + +#### 5.3.1 查看执行计划 + +**语法:** + +```SQL +EXPLAIN +``` + +更多详细语法说明请参考:[EXPLAIN 语句](../User-Manual/Query-Performance-Analysis.md#_1-explain-语句) + +**示例:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 查询性能分析 + +**语法:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +更多详细语法说明请参考:[EXPLAIN ANALYZE 语句](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-语句) + +**示例:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md new file mode 100644 index 000000000..4793ddd1f --- /dev/null +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Maintenance-Statements_timecho.md @@ -0,0 +1,651 @@ + + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看当前的树/表模型 + +**语法:** + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 1.2 查看登录的用户名 + +**语法:** + +```SQL +showCurrentUserStatement + : SHOW CURRENT_USER + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_USER ++-----------+ +|CurrentUser| ++-----------+ +| root| ++-----------+ +``` + +### 1.3 查看连接的数据库名 + +**语法:** + +```SQL +showCurrentDatabaseStatement + : SHOW CURRENT_DATABASE + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| test| ++---------------+ +``` + +### 1.4 查看集群版本 + +**语法:** + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VERSION ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.5 查看集群关键参数 + +**语法:** + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW VARIABLES ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.6 查看集群ID + +**语法:** + +```SQL +showClusterIdStatement + : SHOW (CLUSTERID | CLUSTER_ID) + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CLUSTER_ID ++------------------------------------+ +| ClusterId| ++------------------------------------+ +|40163007-9ec1-4455-aa36-8055d740fcda| +``` + +### 1.7 查看服务器的时间 + +查看客户端直连的 DataNode 进程所在的服务器的时间 + +**语法:** + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.8 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 6|SchemaRegion|Running|tcollector| 670| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.194| | +| 7| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.196| 169.85 KB| +| 8| DataRegion|Running|tcollector| 335| 335| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.198| 161.63 KB| ++--------+------------+-------+----------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.9 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的树/表模型 + +**语法:** + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +**示例:** + +```SQL +IoTDB> SET SQL_DIALECT=TABLE +IoTDB> SHOW CURRENT_SQL_DIALECT ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TABLE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**语法:** + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**示例:** + +```SQL +IoTDB> SET CONFIGURATION disk_space_warning_threshold='0.05',heartbeat_interval_in_ms='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**语法:** + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**语法:** + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + +## 3. 数据管理 + +### 3.1 将内存表中的数据刷到磁盘 + +**语法:** + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> FLUSH test_db TRUE ON LOCAL; +``` + +### 3.2 清除 DataNode 上的缓存 + +**语法:** + +```SQL +clearCacheStatement + : CLEAR clearCacheOptions? CACHE localOrClusterMode? + ; + +clearCacheOptions + : ATTRIBUTE + | QUERY + | ALL + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> CLEAR ALL CACHE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**语法:** + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**语法:** + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**示例:** + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 查询相关 + +### 5.1 查看正在执行的查询 + +**语法:** + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**示例:** + +```SQL +IoTDB> SHOW QUERIES WHERE elapsed_time > 30 ++-----------------------+-----------------------------+-----------+------------+------------+----+ +| query_id| start_time|datanode_id|elapsed_time| statement|user| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +|20250108_101015_00000_1|2025-01-08T18:10:15.935+08:00| 1| 32.283|show queries|root| ++-----------------------+-----------------------------+-----------+------------+------------+----+ +``` + +### 5.2 主动终止查询 + +**语法:** + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**示例:** + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` + +### 5.3 查询性能分析 + +#### 5.3.1 查看执行计划 + +**语法:** + +```SQL +EXPLAIN +``` + +更多详细语法说明请参考:[EXPLAIN 语句](../User-Manual/Query-Performance-Analysis.md#_1-explain-语句) + +**示例:** + +```SQL +IoTDB> explain select * from t1 ++-----------------------------------------------------------------------------------------------+ +| distribution plan| ++-----------------------------------------------------------------------------------------------+ +| ┌─────────────────────────────────────────────┐ | +| │OutputNode-4 │ | +| │OutputColumns-[time, device_id, type, speed] │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| │ | +| │ | +| ┌─────────────────────────────────────────────┐ | +| │Collect-21 │ | +| │OutputSymbols: [time, device_id, type, speed]│ | +| └─────────────────────────────────────────────┘ | +| ┌───────────────────────┴───────────────────────┐ | +| │ │ | +|┌─────────────────────────────────────────────┐ ┌───────────┐ | +|│TableScan-19 │ │Exchange-28│ | +|│QualifiedTableName: test.t1 │ └───────────┘ | +|│OutputSymbols: [time, device_id, type, speed]│ │ | +|│DeviceNumber: 1 │ │ | +|│ScanOrder: ASC │ ┌─────────────────────────────────────────────┐| +|│PushDownOffset: 0 │ │TableScan-20 │| +|│PushDownLimit: 0 │ │QualifiedTableName: test.t1 │| +|│PushDownLimitToEachDevice: false │ │OutputSymbols: [time, device_id, type, speed]│| +|│RegionId: 2 │ │DeviceNumber: 1 │| +|└─────────────────────────────────────────────┘ │ScanOrder: ASC │| +| │PushDownOffset: 0 │| +| │PushDownLimit: 0 │| +| │PushDownLimitToEachDevice: false │| +| │RegionId: 1 │| +| └─────────────────────────────────────────────┘| ++-----------------------------------------------------------------------------------------------+ +``` + +#### 5.3.2 查询性能分析 + +**语法:** + +```SQL +EXPLAIN ANALYZE [VERBOSE] +``` + +更多详细语法说明请参考:[EXPLAIN ANALYZE 语句](../User-Manual/Query-Performance-Analysis.md#_2-explain-analyze-语句) + +**示例:** + +```SQL +IoTDB> explain analyze verbose select * from t1 ++-----------------------------------------------------------------------------------------------+ +| Explain Analyze| ++-----------------------------------------------------------------------------------------------+ +|Analyze Cost: 38.860 ms | +|Fetch Partition Cost: 9.888 ms | +|Fetch Schema Cost: 54.046 ms | +|Logical Plan Cost: 10.102 ms | +|Logical Optimization Cost: 17.396 ms | +|Distribution Plan Cost: 2.508 ms | +|Dispatch Cost: 22.126 ms | +|Fragment Instances Count: 2 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.2.0][IP: 0.0.0.0][DataRegion: 2][State: FINISHED]| +| Total Wall Time: 18 ms | +| Cost of initDataQuerySource: 6.153 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.164 ms, blocked queued time: 0.342 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.246 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.294 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.047 | +| [PlanNodeId 43]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 5.523 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 31]: CollectNode(CollectOperator) | +| CPU Time: 5.512 ms | +| output: 2 rows | +| HasNext() Called Count: 6 | +| Next() Called Count: 5 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 29]: TableScanNode(TableScanOperator) | +| CPU Time: 5.439 ms | +| output: 1 rows | +| HasNext() Called Count: 3 +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | +| [PlanNodeId 40]: ExchangeNode(ExchangeOperator) | +| CPU Time: 0.053 ms | +| output: 1 rows | +| HasNext() Called Count: 2 | +| Next() Called Count: 1 | +| Estimated Memory Size: : 131072 | +| | +|FRAGMENT-INSTANCE[Id: 20241127_090849_00009_1.3.0][IP: 0.0.0.0][DataRegion: 1][State: FINISHED]| +| Total Wall Time: 13 ms | +| Cost of initDataQuerySource: 5.725 ms | +| Seq File(unclosed): 1, Seq File(closed): 0 | +| UnSeq File(unclosed): 0, UnSeq File(closed): 0 | +| ready queued time: 0.118 ms, blocked queued time: 5.844 ms | +| Query Statistics: | +| loadBloomFilterFromCacheCount: 0 | +| loadBloomFilterFromDiskCount: 0 | +| loadBloomFilterActualIOSize: 0 | +| loadBloomFilterTime: 0.000 | +| loadTimeSeriesMetadataAlignedMemSeqCount: 1 | +| loadTimeSeriesMetadataAlignedMemSeqTime: 0.004 | +| loadTimeSeriesMetadataFromCacheCount: 0 | +| loadTimeSeriesMetadataFromDiskCount: 0 | +| loadTimeSeriesMetadataActualIOSize: 0 | +| constructAlignedChunkReadersMemCount: 1 | +| constructAlignedChunkReadersMemTime: 0.001 | +| loadChunkFromCacheCount: 0 | +| loadChunkFromDiskCount: 0 | +| loadChunkActualIOSize: 0 | +| pageReadersDecodeAlignedMemCount: 1 | +| pageReadersDecodeAlignedMemTime: 0.007 | +| [PlanNodeId 42]: IdentitySinkNode(IdentitySinkOperator) | +| CPU Time: 0.270 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| [PlanNodeId 30]: TableScanNode(TableScanOperator) | +| CPU Time: 0.250 ms | +| output: 1 rows | +| HasNext() Called Count: 3 | +| Next() Called Count: 2 | +| Estimated Memory Size: : 327680 | +| DeviceNumber: 1 | +| CurrentDeviceIndex: 0 | ++-----------------------------------------------------------------------------------------------+ +``` diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause.md index a1ede9228..1a6da4ae1 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause.md @@ -1,3 +1,6 @@ +--- +redirectTo: Select-Clause_apache.html +--- - -# SELECT 子句 - -## 1. 语法概览 - -```sql -SELECT setQuantifier? selectItem (',' selectItem)* - -selectItem - : expression (AS? identifier)? #selectSingle - | tableName '.' ASTERISK (AS columnAliases)? #selectAll - | ASTERISK #selectAll - ; -setQuantifier - : DISTINCT - | ALL - ; -``` - -- **SELECT 子句**: 指定了查询结果应包含的列,包含聚合函数(如 SUM、AVG、COUNT 等)以及窗口函数,在逻辑上最后执行。 -- **DISTINCT 关键字**: `SELECT DISTINCT column_name` 确保查询结果中的值是唯一的,去除重复项。 -- **COLUMNS 函数**:SELECT 子句中支持使用 COLUMNS 函数进行列筛选,并支持和表达式结合使用,使表达式的效果对所有筛选出的列生效。 - -## 2. 语法详释: - -每个 `selectItem` 可以是以下形式之一: - -- **表达式**: `expression [ [ AS ] column_alias ]` 定义单个输出列,可以指定列别名。 -- **选择某个关系的所有列**: `relation.*` 选择某个关系的所有列,不允许使用列别名。 -- **选择结果集中的所有列**: `*` 选择查询的所有列,不允许使用列别名。 - -`DISTINCT` 的使用场景: - -- **SELECT 语句**:在 SELECT 语句中使用 DISTINCT,查询结果去除重复项。 -- **聚合函数**:与聚合函数一起使用时,DISTINCT 只处理输入数据集中的非重复行。 -- **GROUP BY 子句**:在 GROUP BY 子句中使用 ALL 和 DISTINCT 量词,决定是否每个重复的分组集产生不同的输出行。 - -`COLUMNS` 函数: -- **`COLUMNS(*)`**: 匹配所有列,支持结合表达式进行使用。 -- **`COLUMNS(regexStr) ? AS identifier`**:正则匹配 - - 匹配所有列名满足正则表达式的列,支持结合表达式进行使用。 - - 支持引用正则表达式捕获到的 groups 对列进行重命名,不写 AS 时展示原始列名(即 _coln_原始列名,其中 n 为列在结果表中的 position)。 - - 重命名用法简述: - - regexStr 中使用圆括号设置要捕获的组; - - 在 identifier 中使用 `'$index'` 引用捕获到的组。 - - 注意:使用该功能时,identifier 中会包含特殊字符 '$',所以整个 identifier 要用双引号引起来。 - -## 3. 示例数据 - -在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 - -### 3.1 选择列表 - -#### 3.1.1 星表达式 - -使用星号(*)可以选取表中的所有列,**注意**,星号表达式不能被大多数函数转换,除了`count(*)`的情况。 - -示例:从表中选择所有列 - -```sql -SELECT * FROM table1; -``` - -执行结果如下: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -#### 3.1.2 聚合函数 - -聚合函数将多行数据汇总为单个值。当 SELECT 子句中存在聚合函数时,查询将被视为聚合查询。在聚合查询中,所有表达式必须是聚合函数的一部分或由[GROUP BY子句](../SQL-Manual/GroupBy-Clause.md)指定的分组的一部分。 - -示例1:返回地址表中的总行数: - -```sql -SELECT count(*) FROM table1; -``` - -执行结果如下: - -```sql -+-----+ -|_col0| -+-----+ -| 18| -+-----+ -Total line number = 1 -It costs 0.091s -``` - -示例2:返回按城市分组的地址表中的总行数: - -```sql -SELECT region, count(*) - FROM table1 - GROUP BY region; -``` - -执行结果如下: - -```sql -+------+-----+ -|region|_col1| -+------+-----+ -| 上海| 9| -| 北京| 9| -+------+-----+ -Total line number = 2 -It costs 0.071s -``` - -#### 3.1.3 别名 - -关键字`AS`:为选定的列指定别名,别名将覆盖已存在的列名,以提高查询结果的可读性。 - -示例1:原始表格: - -```sql -IoTDB> SELECT * FROM table1; -``` - -执行结果如下: - -```sql -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| -|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| -|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| -|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| -|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -Total line number = 18 -It costs 0.653s -``` - -示例2:单列设置别名: - -```sql -IoTDB> SELECT device_id - AS device - FROM table1; -``` - -执行结果如下: - -```sql -+------+ -|device| -+------+ -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 100| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -| 101| -+------+ -Total line number = 18 -It costs 0.053s -``` - -示例3:所有列的别名: - -```sql -IoTDB> SELECT table1.* - AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) - FROM table1; -``` - -执行结果如下: - -```sql -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| -|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| -|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| -|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| -|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| -|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| -|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| -|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| -|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| -|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| -|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| -|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| -|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| -|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| -|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| -|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| -|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| -|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| -+-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ -Total line number = 18 -It costs 0.189s -``` - -#### 3.1.4 Object 类型查询 - -> V2.0.8-beta 版本起支持 - -示例一:直接查询 object 类型数据 - -```SQL -IoTDB:database1> select s1 from table1 where device_id = 'tag1' -+------------+ -| s1| -+------------+ -|(Object) 5 B| -+------------+ -Total line number = 1 -It costs 0.428s -``` - -示例二:通过 read\_object 函数查询 Object 类型数据的真实内容 - -```SQL -IoTDB:database1> select read_object(s1) from table1 where device_id = 'tag1' -+------------+ -| _col0| -+------------+ -|0x696f746462| -+------------+ -Total line number = 1 -It costs 0.188s -``` - - -### 3.2 Columns 函数 - -1. 不结合表达式 -```sql --- 查询列名以 'm' 开头的列的数据 -IoTDB:database1> select columns('^m.*') from table1 limit 5 -+--------+-----------+ -|model_id|maintenance| -+--------+-----------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+--------+-----------+ - - --- 查询列名以 'o' 开头的列,未匹配到任何列,抛出异常 -IoTDB:database1> select columns('^o.*') from table1 limit 5 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' - - --- 查询列名以 'm' 开头的列的数据,并重命名以 'series_' 开头 -IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 -+---------------+------------------+ -|series_model_id|series_maintenance| -+---------------+------------------+ -| E| 180| -| E| 180| -| C| 90| -| C| 90| -| C| 90| -+---------------+------------------+ -``` - -2. 结合表达式 - -- 单个 COLUMNS 函数 -```sql --- 查询所有列的最小值 -IoTDB:database1> select min(columns(*)) from table1 -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| -+-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ -``` - -- 多个 COLUMNS 函数,出现在同一表达式 - -> 使用限制:出现多个 COLUMNS 函数时,多个 COLUMNS 函数的参数要完全相同 - -```sql --- 查询 'h' 开头列的最小值和最大值之和 -IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 -+--------------+ -|_col0_humidity| -+--------------+ -| 79.899994| -+--------------+ - --- 错误查询,两个 COLUMNS 函数不完全相同 -IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 -Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported -``` - -- 多个 COLUMNS 函数,出现在不同表达式 - -```sql --- 分别查询 'h' 开头列的最小值和最大值 -IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 -+--------------+--------------+ -|_col0_humidity|_col1_humidity| -+--------------+--------------+ -| 34.8| 45.1| -+--------------+--------------+ - --- 分别查询 'h' 开头列的最小值和 'te'开头列的最大值 -IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 -+--------------+-----------------+ -|_col0_humidity|_col1_temperature| -+--------------+-----------------+ -| 34.8| 90.0| -+--------------+-----------------+ -``` - -3. 在 WHERE 子句中使用 - -```sql --- 查询数据,所有 'h' 开头列的数据必须要大于 40 -IoTDB:database1> select * from table1 where columns('^h.*') > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ - ---等价于 -IoTDB:database1> select * from table1 where humidity > 40 -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| -|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| -|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| -+-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ -``` - -## 4. 结果集列顺序 - -- **列顺序**: 结果集中的列顺序与 SELECT 子句中指定的顺序相同。 -- **多列排序**: 如果选择表达式返回多个列,它们的排序方式与源关系中的排序方式相同 \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause_apache.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause_apache.md new file mode 100644 index 000000000..a1ede9228 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause_apache.md @@ -0,0 +1,414 @@ + + +# SELECT 子句 + +## 1. 语法概览 + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- **SELECT 子句**: 指定了查询结果应包含的列,包含聚合函数(如 SUM、AVG、COUNT 等)以及窗口函数,在逻辑上最后执行。 +- **DISTINCT 关键字**: `SELECT DISTINCT column_name` 确保查询结果中的值是唯一的,去除重复项。 +- **COLUMNS 函数**:SELECT 子句中支持使用 COLUMNS 函数进行列筛选,并支持和表达式结合使用,使表达式的效果对所有筛选出的列生效。 + +## 2. 语法详释: + +每个 `selectItem` 可以是以下形式之一: + +- **表达式**: `expression [ [ AS ] column_alias ]` 定义单个输出列,可以指定列别名。 +- **选择某个关系的所有列**: `relation.*` 选择某个关系的所有列,不允许使用列别名。 +- **选择结果集中的所有列**: `*` 选择查询的所有列,不允许使用列别名。 + +`DISTINCT` 的使用场景: + +- **SELECT 语句**:在 SELECT 语句中使用 DISTINCT,查询结果去除重复项。 +- **聚合函数**:与聚合函数一起使用时,DISTINCT 只处理输入数据集中的非重复行。 +- **GROUP BY 子句**:在 GROUP BY 子句中使用 ALL 和 DISTINCT 量词,决定是否每个重复的分组集产生不同的输出行。 + +`COLUMNS` 函数: +- **`COLUMNS(*)`**: 匹配所有列,支持结合表达式进行使用。 +- **`COLUMNS(regexStr) ? AS identifier`**:正则匹配 + - 匹配所有列名满足正则表达式的列,支持结合表达式进行使用。 + - 支持引用正则表达式捕获到的 groups 对列进行重命名,不写 AS 时展示原始列名(即 _coln_原始列名,其中 n 为列在结果表中的 position)。 + - 重命名用法简述: + - regexStr 中使用圆括号设置要捕获的组; + - 在 identifier 中使用 `'$index'` 引用捕获到的组。 + + 注意:使用该功能时,identifier 中会包含特殊字符 '$',所以整个 identifier 要用双引号引起来。 + +## 3. 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +### 3.1 选择列表 + +#### 3.1.1 星表达式 + +使用星号(*)可以选取表中的所有列,**注意**,星号表达式不能被大多数函数转换,除了`count(*)`的情况。 + +示例:从表中选择所有列 + +```sql +SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 聚合函数 + +聚合函数将多行数据汇总为单个值。当 SELECT 子句中存在聚合函数时,查询将被视为聚合查询。在聚合查询中,所有表达式必须是聚合函数的一部分或由[GROUP BY子句](../SQL-Manual/GroupBy-Clause.md)指定的分组的一部分。 + +示例1:返回地址表中的总行数: + +```sql +SELECT count(*) FROM table1; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +示例2:返回按城市分组的地址表中的总行数: + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +执行结果如下: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 别名 + +关键字`AS`:为选定的列指定别名,别名将覆盖已存在的列名,以提高查询结果的可读性。 + +示例1:原始表格: + +```sql +IoTDB> SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +示例2:单列设置别名: + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +执行结果如下: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +示例3:所有列的别名: + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object 类型查询 + +> V2.0.8-beta 版本起支持 + +示例一:直接查询 object 类型数据 + +```SQL +IoTDB:database1> select s1 from table1 where device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +示例二:通过 read\_object 函数查询 Object 类型数据的真实内容 + +```SQL +IoTDB:database1> select read_object(s1) from table1 where device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns 函数 + +1. 不结合表达式 +```sql +-- 查询列名以 'm' 开头的列的数据 +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- 查询列名以 'o' 开头的列,未匹配到任何列,抛出异常 +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- 查询列名以 'm' 开头的列的数据,并重命名以 'series_' 开头 +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. 结合表达式 + +- 单个 COLUMNS 函数 +```sql +-- 查询所有列的最小值 +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- 多个 COLUMNS 函数,出现在同一表达式 + +> 使用限制:出现多个 COLUMNS 函数时,多个 COLUMNS 函数的参数要完全相同 + +```sql +-- 查询 'h' 开头列的最小值和最大值之和 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- 错误查询,两个 COLUMNS 函数不完全相同 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- 多个 COLUMNS 函数,出现在不同表达式 + +```sql +-- 分别查询 'h' 开头列的最小值和最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- 分别查询 'h' 开头列的最小值和 'te'开头列的最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. 在 WHERE 子句中使用 + +```sql +-- 查询数据,所有 'h' 开头列的数据必须要大于 40 +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--等价于 +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. 结果集列顺序 + +- **列顺序**: 结果集中的列顺序与 SELECT 子句中指定的顺序相同。 +- **多列排序**: 如果选择表达式返回多个列,它们的排序方式与源关系中的排序方式相同 \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause_timecho.md b/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause_timecho.md new file mode 100644 index 000000000..3f2c476b1 --- /dev/null +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/Select-Clause_timecho.md @@ -0,0 +1,414 @@ + + +# SELECT 子句 + +## 1. 语法概览 + +```sql +SELECT setQuantifier? selectItem (',' selectItem)* + +selectItem + : expression (AS? identifier)? #selectSingle + | tableName '.' ASTERISK (AS columnAliases)? #selectAll + | ASTERISK #selectAll + ; +setQuantifier + : DISTINCT + | ALL + ; +``` + +- **SELECT 子句**: 指定了查询结果应包含的列,包含聚合函数(如 SUM、AVG、COUNT 等)以及窗口函数,在逻辑上最后执行。 +- **DISTINCT 关键字**: `SELECT DISTINCT column_name` 确保查询结果中的值是唯一的,去除重复项。 +- **COLUMNS 函数**:SELECT 子句中支持使用 COLUMNS 函数进行列筛选,并支持和表达式结合使用,使表达式的效果对所有筛选出的列生效。 + +## 2. 语法详释: + +每个 `selectItem` 可以是以下形式之一: + +- **表达式**: `expression [ [ AS ] column_alias ]` 定义单个输出列,可以指定列别名。 +- **选择某个关系的所有列**: `relation.*` 选择某个关系的所有列,不允许使用列别名。 +- **选择结果集中的所有列**: `*` 选择查询的所有列,不允许使用列别名。 + +`DISTINCT` 的使用场景: + +- **SELECT 语句**:在 SELECT 语句中使用 DISTINCT,查询结果去除重复项。 +- **聚合函数**:与聚合函数一起使用时,DISTINCT 只处理输入数据集中的非重复行。 +- **GROUP BY 子句**:在 GROUP BY 子句中使用 ALL 和 DISTINCT 量词,决定是否每个重复的分组集产生不同的输出行。 + +`COLUMNS` 函数: +- **`COLUMNS(*)`**: 匹配所有列,支持结合表达式进行使用。 +- **`COLUMNS(regexStr) ? AS identifier`**:正则匹配 + - 匹配所有列名满足正则表达式的列,支持结合表达式进行使用。 + - 支持引用正则表达式捕获到的 groups 对列进行重命名,不写 AS 时展示原始列名(即 _coln_原始列名,其中 n 为列在结果表中的 position)。 + - 重命名用法简述: + - regexStr 中使用圆括号设置要捕获的组; + - 在 identifier 中使用 `'$index'` 引用捕获到的组。 + + 注意:使用该功能时,identifier 中会包含特殊字符 '$',所以整个 identifier 要用双引号引起来。 + +## 3. 示例数据 + +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 + +### 3.1 选择列表 + +#### 3.1.1 星表达式 + +使用星号(*)可以选取表中的所有列,**注意**,星号表达式不能被大多数函数转换,除了`count(*)`的情况。 + +示例:从表中选择所有列 + +```sql +SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +#### 3.1.2 聚合函数 + +聚合函数将多行数据汇总为单个值。当 SELECT 子句中存在聚合函数时,查询将被视为聚合查询。在聚合查询中,所有表达式必须是聚合函数的一部分或由[GROUP BY子句](../SQL-Manual/GroupBy-Clause.md)指定的分组的一部分。 + +示例1:返回地址表中的总行数: + +```sql +SELECT count(*) FROM table1; +``` + +执行结果如下: + +```sql ++-----+ +|_col0| ++-----+ +| 18| ++-----+ +Total line number = 1 +It costs 0.091s +``` + +示例2:返回按城市分组的地址表中的总行数: + +```sql +SELECT region, count(*) + FROM table1 + GROUP BY region; +``` + +执行结果如下: + +```sql ++------+-----+ +|region|_col1| ++------+-----+ +| 上海| 9| +| 北京| 9| ++------+-----+ +Total line number = 2 +It costs 0.071s +``` + +#### 3.1.3 别名 + +关键字`AS`:为选定的列指定别名,别名将覆盖已存在的列名,以提高查询结果的可读性。 + +示例1:原始表格: + +```sql +IoTDB> SELECT * FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| modifytime| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-29T18:30:00.000+08:00| 上海| 3002| 100| E| 180| 90.0| 35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T10:00:00.000+08:00| 上海| 3001| 100| C| 90| 85.0| 35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00| 北京| 1001| 100| A| 180| 90.0| 35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 35.2| true| null| +|2024-11-30T14:30:00.000+08:00| 上海| 3002| 101| F| 360| 90.0| 34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00| 上海| 3001| 101| D| 360| 85.0| null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| 35.3| null| null| +|2024-11-27T16:40:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00| 北京| 1001| 101| B| 180| 85.0| null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00| 北京| 1001| 101| B| 180| null| 35.2| false| null| +|2024-11-27T16:43:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false| null| +|2024-11-27T16:44:00.000+08:00| 北京| 1001| 101| B| 180| null| null| false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +Total line number = 18 +It costs 0.653s +``` + +示例2:单列设置别名: + +```sql +IoTDB> SELECT device_id + AS device + FROM table1; +``` + +执行结果如下: + +```sql ++------+ +|device| ++------+ +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 100| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| +| 101| ++------+ +Total line number = 18 +It costs 0.053s +``` + +示例3:所有列的别名: + +```sql +IoTDB> SELECT table1.* + AS (timestamp, Reg, Pl, DevID, Mod, Mnt, Temp, Hum, Stat,MTime) + FROM table1; +``` + +执行结果如下: + +```sql ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +| TIMESTAMP| REG| PL|DEVID|MOD|MNT|TEMP| HUM| STAT| MTIME| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +|2024-11-29T11:00:00.000+08:00|上海|3002| 100| E|180|null|45.1| true| null| +|2024-11-29T18:30:00.000+08:00|上海|3002| 100| E|180|90.0|35.4| true|2024-11-29T18:30:15.000+08:00| +|2024-11-28T08:00:00.000+08:00|上海|3001| 100| C| 90|85.0|null| null|2024-11-28T08:00:09.000+08:00| +|2024-11-28T09:00:00.000+08:00|上海|3001| 100| C| 90|null|40.9| true| null| +|2024-11-28T10:00:00.000+08:00|上海|3001| 100| C| 90|85.0|35.2| null|2024-11-28T10:00:11.000+08:00| +|2024-11-28T11:00:00.000+08:00|上海|3001| 100| C| 90|88.0|45.1| true|2024-11-28T11:00:12.000+08:00| +|2024-11-26T13:37:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:37:34.000+08:00| +|2024-11-26T13:38:00.000+08:00|北京|1001| 100| A|180|90.0|35.1| true|2024-11-26T13:38:25.000+08:00| +|2024-11-30T09:30:00.000+08:00|上海|3002| 101| F|360|90.0|35.2| true| null| +|2024-11-30T14:30:00.000+08:00|上海|3002| 101| F|360|90.0|34.8| true|2024-11-30T14:30:17.000+08:00| +|2024-11-29T10:00:00.000+08:00|上海|3001| 101| D|360|85.0|null| null|2024-11-29T10:00:13.000+08:00| +|2024-11-27T16:38:00.000+08:00|北京|1001| 101| B|180|null|35.1| true|2024-11-26T16:37:01.000+08:00| +|2024-11-27T16:39:00.000+08:00|北京|1001| 101| B|180|85.0|35.3| null| null| +|2024-11-27T16:40:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:03.000+08:00| +|2024-11-27T16:41:00.000+08:00|北京|1001| 101| B|180|85.0|null| null|2024-11-26T16:37:04.000+08:00| +|2024-11-27T16:42:00.000+08:00|北京|1001| 101| B|180|null|35.2|false| null| +|2024-11-27T16:43:00.000+08:00|北京|1001| 101| B|180|null|null|false| null| +|2024-11-27T16:44:00.000+08:00|北京|1001| 101| B|180|null|null|false|2024-11-26T16:37:08.000+08:00| ++-----------------------------+----+----+-----+---+---+----+----+-----+-----------------------------+ +Total line number = 18 +It costs 0.189s +``` + +#### 3.1.4 Object 类型查询 + +> V2.0.8 版本起支持 + +示例一:直接查询 object 类型数据 + +```SQL +IoTDB:database1> select s1 from table1 where device_id = 'tag1' ++------------+ +| s1| ++------------+ +|(Object) 5 B| ++------------+ +Total line number = 1 +It costs 0.428s +``` + +示例二:通过 read\_object 函数查询 Object 类型数据的真实内容 + +```SQL +IoTDB:database1> select read_object(s1) from table1 where device_id = 'tag1' ++------------+ +| _col0| ++------------+ +|0x696f746462| ++------------+ +Total line number = 1 +It costs 0.188s +``` + + +### 3.2 Columns 函数 + +1. 不结合表达式 +```sql +-- 查询列名以 'm' 开头的列的数据 +IoTDB:database1> select columns('^m.*') from table1 limit 5 ++--------+-----------+ +|model_id|maintenance| ++--------+-----------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++--------+-----------+ + + +-- 查询列名以 'o' 开头的列,未匹配到任何列,抛出异常 +IoTDB:database1> select columns('^o.*') from table1 limit 5 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: No matching columns found that match regex '^o.*' + + +-- 查询列名以 'm' 开头的列的数据,并重命名以 'series_' 开头 +IoTDB:database1> select columns('^m(.*)') AS "series_$0" from table1 limit 5 ++---------------+------------------+ +|series_model_id|series_maintenance| ++---------------+------------------+ +| E| 180| +| E| 180| +| C| 90| +| C| 90| +| C| 90| ++---------------+------------------+ +``` + +2. 结合表达式 + +- 单个 COLUMNS 函数 +```sql +-- 查询所有列的最小值 +IoTDB:database1> select min(columns(*)) from table1 ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +| _col0_time|_col1_region|_col2_plant_id|_col3_device_id|_col4_model_id|_col5_maintenance|_col6_temperature|_col7_humidity|_col8_status| _col9_arrival_time| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +|2024-11-26T13:37:00.000+08:00| 上海| 1001| 100| A| 180| 85.0| 34.8| false|2024-11-26T13:37:34.000+08:00| ++-----------------------------+------------+--------------+---------------+--------------+-----------------+-----------------+--------------+------------+-----------------------------+ +``` + +- 多个 COLUMNS 函数,出现在同一表达式 + +> 使用限制:出现多个 COLUMNS 函数时,多个 COLUMNS 函数的参数要完全相同 + +```sql +-- 查询 'h' 开头列的最小值和最大值之和 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^h.*')) from table1 ++--------------+ +|_col0_humidity| ++--------------+ +| 79.899994| ++--------------+ + +-- 错误查询,两个 COLUMNS 函数不完全相同 +IoTDB:database1> select min(columns('^h.*')) + max(columns('^t.*')) from table1 +Msg: org.apache.iotdb.jdbc.IoTDBSQLException: 701: Multiple different COLUMNS in the same expression are not supported +``` + +- 多个 COLUMNS 函数,出现在不同表达式 + +```sql +-- 分别查询 'h' 开头列的最小值和最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^h.*')) from table1 ++--------------+--------------+ +|_col0_humidity|_col1_humidity| ++--------------+--------------+ +| 34.8| 45.1| ++--------------+--------------+ + +-- 分别查询 'h' 开头列的最小值和 'te'开头列的最大值 +IoTDB:database1> select min(columns('^h.*')) , max(columns('^te.*')) from table1 ++--------------+-----------------+ +|_col0_humidity|_col1_temperature| ++--------------+-----------------+ +| 34.8| 90.0| ++--------------+-----------------+ +``` + +3. 在 WHERE 子句中使用 + +```sql +-- 查询数据,所有 'h' 开头列的数据必须要大于 40 +IoTDB:database1> select * from table1 where columns('^h.*') > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ + +--等价于 +IoTDB:database1> select * from table1 where humidity > 40 ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +| time|region|plant_id|device_id|model_id|maintenance|temperature|humidity|status| arrival_time| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +|2024-11-29T11:00:00.000+08:00| 上海| 3002| 100| E| 180| null| 45.1| true| null| +|2024-11-28T09:00:00.000+08:00| 上海| 3001| 100| C| 90| null| 40.9| true| null| +|2024-11-28T11:00:00.000+08:00| 上海| 3001| 100| C| 90| 88.0| 45.1| true|2024-11-28T11:00:12.000+08:00| ++-----------------------------+------+--------+---------+--------+-----------+-----------+--------+------+-----------------------------+ +``` + +## 4. 结果集列顺序 + +- **列顺序**: 结果集中的列顺序与 SELECT 子句中指定的顺序相同。 +- **多列排序**: 如果选择表达式返回多个列,它们的排序方式与源关系中的排序方式相同 \ No newline at end of file diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/overview_apache.md b/src/zh/UserGuide/latest-Table/SQL-Manual/overview_apache.md index ea5a1a9cf..4f8ca7579 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/overview_apache.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/overview_apache.md @@ -37,7 +37,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_apache.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/overview_timecho.md b/src/zh/UserGuide/latest-Table/SQL-Manual/overview_timecho.md index 7b6fcb458..0cb0fd1d9 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/overview_timecho.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/overview_timecho.md @@ -38,7 +38,7 @@ SELECT ⟨select_list⟩ IoTDB 查询语法提供以下子句: -- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause.md) +- SELECT 子句:查询结果应包含的列。详细语法见:[SELECT子句](../SQL-Manual/Select-Clause_timecho.md) - FROM 子句:指出查询的数据源,可以是单个表、多个通过 `JOIN` 子句连接的表,或者是一个子查询。详细语法见:[FROM & JOIN 子句](../SQL-Manual/From-Join-Clause.md) - WHERE 子句:用于过滤数据,只选择满足特定条件的数据行。这个子句在逻辑上紧跟在 FROM 子句之后执行。详细语法见:[WHERE 子句](../SQL-Manual/Where-Clause.md) - GROUP BY 子句:当需要对数据进行聚合时使用,指定了用于分组的列。详细语法见:[GROUP BY 子句](../SQL-Manual/GroupBy-Clause.md) diff --git a/src/zh/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md b/src/zh/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md index 4c964c0d7..e6b5aae19 100644 --- a/src/zh/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md +++ b/src/zh/UserGuide/latest-Table/Tools-System/Data-Export-Tool_timecho.md @@ -46,8 +46,8 @@ |-end_time |--end_time | 将要导出的数据的终止时间,只有`-sql_dialect`为 table 类型时生效。如果填写了`-q`,则此参数不生效。| 否 | - | | -t | --target | 指定输出文件的目标文件夹,如果路径不存在新建文件夹 | √ | | | -pfn | --prefix\_file\_name | 指定导出文件的名称。例如:abc,生成的文件是abc\_0.tsfile、abc\_1.tsfile | 否 | dump\_0.tsfile | -| -q | --query | 要执行的查询语句。自 V2.0.8-beta 起,SQL 语句中的分号将被自动移除,查询执行保持正常。 | 否 | 无 | -| -timeout | --query\_timeout | 会话查询的超时时间(ms) | 否 | `-1`(V2.0.8-beta 之前)
`Long.MAX_VALUE`(V2.0.8-beta 及之后)
范围:`-1~Long.MAX_VALUE` | +| -q | --query | 要执行的查询语句。自 V2.0.8 起,SQL 语句中的分号将被自动移除,查询执行保持正常。 | 否 | 无 | +| -timeout | --query\_timeout | 会话查询的超时时间(ms) | 否 | `-1`(V2.0.8 之前)
`Long.MAX_VALUE`(V2.0.8 及之后)
范围:`-1~Long.MAX_VALUE` | | -help | --help | 显示帮助信息 | 否 | | ### 2.2 CSV 格式 diff --git a/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md b/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md index 299107e84..89d214405 100644 --- a/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md +++ b/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md @@ -31,7 +31,7 @@ * 可通过参数设置审计日志文件的存储周期,包括基于 TTL 实现时间滚动和基于 SpaceTL 实现空间滚动。 * 审计日志文件默认加密存储 -> 注意:该功能从 V2.0.8-beta 版本开始提供。 +> 注意:该功能从 V2.0.8 版本开始提供。 ## 2. 配置参数 diff --git a/src/zh/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md b/src/zh/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md index 740828f99..483be04cd 100644 --- a/src/zh/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md +++ b/src/zh/UserGuide/latest-Table/User-Manual/Black-White-List_timecho.md @@ -39,7 +39,7 @@ IoTDB 是一款针对物联网场景设计的时间序列数据库,支持高 * 编辑配置文件 `iotdb-system.properties`进行维护 * 通过 set configuration 语句进行维护 - * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-更新配置项) + * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-更新配置项) 相关参数如下: @@ -60,7 +60,7 @@ IoTDB 是一款针对物联网场景设计的时间序列数据库,支持高 * 编辑配置文件 `iotdb-system.properties`进行维护 * 通过 set configuration 语句进行维护 - * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements.md#_2-2-更新配置项) + * 表模型请参考:[set configuration](../SQL-Manual/SQL-Maintenance-Statements_timecho.md#_2-2-更新配置项) 相关参数如下: diff --git a/src/zh/UserGuide/latest-Table/User-Manual/Maintenance-statement_timecho.md b/src/zh/UserGuide/latest-Table/User-Manual/Maintenance-statement_timecho.md index 41eda769e..aadcb8b29 100644 --- a/src/zh/UserGuide/latest-Table/User-Manual/Maintenance-statement_timecho.md +++ b/src/zh/UserGuide/latest-Table/User-Manual/Maintenance-statement_timecho.md @@ -340,7 +340,7 @@ IoTDB> SHOW REGIONS **含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 -> V2.0.8-beta 起支持该功能 +> V2.0.8 起支持该功能 #### 语法: diff --git a/src/zh/UserGuide/latest/API/Programming-OPC-UA_timecho.md b/src/zh/UserGuide/latest/API/Programming-OPC-UA_timecho.md index 83f2debae..6a7d2678b 100644 --- a/src/zh/UserGuide/latest/API/Programming-OPC-UA_timecho.md +++ b/src/zh/UserGuide/latest/API/Programming-OPC-UA_timecho.md @@ -27,7 +27,7 @@ * **模式一:数据订阅服务 (IoTDB 作为 OPC UA 服务器)**:IoTDB 启动内置的 OPC UA 服务器,被动地允许外部客户端(如 UAExpert)连接并订阅其内部数据。这是传统用法。 * **模式二:数据主动推送 (IoTDB 作为 OPC UA 客户端)**:IoTDB 作为客户端,主动将数据和元数据同步到一个或多个独立部署的外部 OPC UA 服务器。 - > 注意:该模式从 V2.0.8-beta 起支持。 + > 注意:该模式从 V2.0.8 起支持。 **注意:模式互斥** @@ -68,15 +68,15 @@ create pipe p1 | sink.opcua.tcp.port | OPC UA 的 TCP 端口 | Integer: [0, 65536] | 选填 | 12686 | | sink.opcua.https.port | OPC UA 的 HTTPS 端口 | Integer: [0, 65536] | 选填 | 8443 | | sink.opcua.security.dir | OPC UA 的密钥及证书目录 | String: Path,支持绝对及相对目录 | 选填 | 1. iotdb 相关 DataNode 的 conf 目录下的 `opc_security` 文件夹 `/`。2. 如无 iotdb 的 conf 目录(例如 IDEA 中启动 DataNode),则为用户主目录下的 `iotdb_opc_security` 文件夹 `/` | -| opcua.security-policy | OPC UA 连接使用的安全策略,不区分大小写。可以配置多个,用`,`连接。配置一个安全策略后,client 才能用对应的策略连接。当前实现默认支持`None`和`Basic256Sha256`策略,应该默认改为任意的非`None`策略,`None`策略在调试环境中单独配置,因为`None`策略虽然不需移动证书,操作方便,但是不安全,生产环境的 server 不建议支持该策略。注意:V2.0.8-beta 起支持该参数,且仅支持 client-server 模式 | String(安全性依次递增):
`None`
`Basic128Rsa15`
`Basic256`
`Basic256Sha256`
`Aes128_Sha256_RsaOaep`
`Aes256_Sha256_RsaPss` | 选填| `Basic256Sha256`,`Aes128_Sha256_RsaOaep`,`lAes256_Sha256_RsaPss` | +| opcua.security-policy | OPC UA 连接使用的安全策略,不区分大小写。可以配置多个,用`,`连接。配置一个安全策略后,client 才能用对应的策略连接。当前实现默认支持`None`和`Basic256Sha256`策略,应该默认改为任意的非`None`策略,`None`策略在调试环境中单独配置,因为`None`策略虽然不需移动证书,操作方便,但是不安全,生产环境的 server 不建议支持该策略。注意:V2.0.8 起支持该参数,且仅支持 client-server 模式 | String(安全性依次递增):
`None`
`Basic128Rsa15`
`Basic256`
`Basic256Sha256`
`Aes128_Sha256_RsaOaep`
`Aes256_Sha256_RsaPss` | 选填| `Basic256Sha256`,`Aes128_Sha256_RsaOaep`,`lAes256_Sha256_RsaPss` | | sink.opcua.enable-anonymous-access | OPC UA 是否允许匿名访问 | Boolean | 选填 | true | | sink.user | 用户,这里指 OPC UA 的允许用户 | String | 选填 | root | | sink.password | 密码,这里指 OPC UA 的允许密码 | String | 选填 | TimechoDB@2021(V2.0.6.x 之前默认密码为root) | -| opcua.with-quality | OPC UA 的测点发布是否为 value + quality 模式。启用配置后,系统将按以下规则处理写入数据:
1. 同时包含 value 和 quality,则直接推送至 OPC UA Server。
2. 仅包含 value,则 quality 自动填充为 UNCERTAIN(默认值,支持自定义配置)。
3. 仅包含 quality,则该写入被忽略,不进行任何处理。
4. 包含非 value/quality 字段,则忽略该数据,并记录警告日志(日志频率可配置,避免高频干扰)。
5. quality 类型限制:目前仅支持布尔类型(true 表示 GOOD,false 表示 BAD); 注意:V2.0.8-beta 起支持该参数,且仅支持 client-server 模式 | Boolean | 选填 | false | -| opcua.value-name | With-quality 为 true 时生效,表示 value 测点的名字。 注意:V2.0.8-beta 起支持该参数,且仅支持 client-server 模式 | String | 选填 | value | -| opcua.quality-name | With-quality 为 true 时生效,表示 quality 测点的名字。 注意:V2.0.8-beta 起支持该参数,且仅支持 client-server 模式 | String | 选填 | quality | -| opcua.default-quality | 没有 quality 时,可以通过 SQL 参数指定`GOOD`/`UNCERTAIN`/`BAD`。 注意:V2.0.8-beta 起支持该参数,且仅支持 client-server 模式 | String:`GOOD`/`UNCERTAIN`/`BAD` | 选填 | `UNCERTAIN` | -| opcua.timeout-seconds | Client 连接 server 的超时秒数,仅在 IoTDB 为 client 时生效 注意:V2.0.8-beta 起支持该参数,且仅支持 client-server 模式 | Long | 选填 | 10L | +| opcua.with-quality | OPC UA 的测点发布是否为 value + quality 模式。启用配置后,系统将按以下规则处理写入数据:
1. 同时包含 value 和 quality,则直接推送至 OPC UA Server。
2. 仅包含 value,则 quality 自动填充为 UNCERTAIN(默认值,支持自定义配置)。
3. 仅包含 quality,则该写入被忽略,不进行任何处理。
4. 包含非 value/quality 字段,则忽略该数据,并记录警告日志(日志频率可配置,避免高频干扰)。
5. quality 类型限制:目前仅支持布尔类型(true 表示 GOOD,false 表示 BAD); 注意:V2.0.8 起支持该参数,且仅支持 client-server 模式 | Boolean | 选填 | false | +| opcua.value-name | With-quality 为 true 时生效,表示 value 测点的名字。 注意:V2.0.8 起支持该参数,且仅支持 client-server 模式 | String | 选填 | value | +| opcua.quality-name | With-quality 为 true 时生效,表示 quality 测点的名字。 注意:V2.0.8 起支持该参数,且仅支持 client-server 模式 | String | 选填 | quality | +| opcua.default-quality | 没有 quality 时,可以通过 SQL 参数指定`GOOD`/`UNCERTAIN`/`BAD`。 注意:V2.0.8 起支持该参数,且仅支持 client-server 模式 | String:`GOOD`/`UNCERTAIN`/`BAD` | 选填 | `UNCERTAIN` | +| opcua.timeout-seconds | Client 连接 server 的超时秒数,仅在 IoTDB 为 client 时生效 注意:V2.0.8 起支持该参数,且仅支持 client-server 模式 | Long | 选填 | 10L | #### 2.1.3 示例 @@ -288,7 +288,7 @@ create pipe p1 > **参数命名注意**:以上参数均支持省略 `opcua.` 前缀,例如 `node-urls` 和 `opcua.node-urls` 等价。 > -> **参数支持说明**:V2.0.8-beta 起支持以上`opcua. `相关参数,且仅支持` client-server` 模式 +> **参数支持说明**:V2.0.8 起支持以上`opcua. `相关参数,且仅支持` client-server` 模式 #### 3.1.3 示例 diff --git a/src/zh/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md b/src/zh/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md index 3b98da398..642d29af3 100644 --- a/src/zh/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md +++ b/src/zh/UserGuide/latest/Tools-System/Data-Export-Tool_timecho.md @@ -40,8 +40,8 @@ | -pw | --password | 密码 | 否 | TimechoDB@2021 (V2.0.6.x 版本之前为 root) | | -t | --target | 指定输出文件的目标文件夹,如果路径不存在新建文件夹 | √ | | | -pfn | --prefix\_file\_name | 指定导出文件的名称。例如:abc,生成的文件是abc\_0.tsfile、abc\_1.tsfile | 否 | dump\_0.tsfile | -| -q | --query | 要执行的查询语句。自 V2.0.8-beta 起,SQL 语句中的分号将被自动移除,查询执行保持正常。 | 否 | 无 | -| -timeout | --query\_timeout | 会话查询的超时时间(ms) | 否 | `-1`(V2.0.8-beta 之前)
`Long.MAX_VALUE`(V2.0.8-beta 及之后)
范围:`-1~Long.MAX_VALUE` | +| -q | --query | 要执行的查询语句。自 V2.0.8 起,SQL 语句中的分号将被自动移除,查询执行保持正常。 | 否 | 无 | +| -timeout | --query\_timeout | 会话查询的超时时间(ms) | 否 | `-1`(V2.0.8 之前)
`Long.MAX_VALUE`(V2.0.8 及之后)
范围:`-1~Long.MAX_VALUE` | | -help | --help | 显示帮助信息 | 否 | | ### 2.2 Csv 格式 diff --git a/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md b/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md index 787b61c27..a4de129c8 100644 --- a/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md +++ b/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md @@ -31,7 +31,7 @@ * 可通过参数设置审计日志文件的存储周期,包括基于 TTL 实现时间滚动和基于 SpaceTL 实现空间滚动。 * 审计日志文件默认加密存储 -> 注意:该功能从 V2.0.8-beta 版本开始提供。 +> 注意:该功能从 V2.0.8 版本开始提供。 ## 2. 配置参数 diff --git a/src/zh/UserGuide/latest/User-Manual/Maintenance-statement.md b/src/zh/UserGuide/latest/User-Manual/Maintenance-statement.md index 45b37b014..2e0d90de0 100644 --- a/src/zh/UserGuide/latest/User-Manual/Maintenance-statement.md +++ b/src/zh/UserGuide/latest/User-Manual/Maintenance-statement.md @@ -1,3 +1,6 @@ +--- +redirectTo: Maintenance-statement_apache.html +--- -# 运维语句 - -## 1. 状态查看 - -### 1.1 查看连接的模型 - -**含义**:返回当前连接的 sql_dialect 是树模型/表模型。 - -#### 语法: - -```SQL -showCurrentSqlDialectStatement - : SHOW CURRENT_SQL_DIALECT - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW CURRENT_SQL_DIALECT -``` - -执行结果如下: - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 1.2 查看集群版本 - -**含义**:返回当前集群的版本。 - -#### 语法: - -```SQL -showVersionStatement - : SHOW VERSION - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW VERSION -``` - -执行结果如下: - -```SQL -+-------+---------+ -|Version|BuildInfo| -+-------+---------+ -|2.0.1.2| 1ca4008| -+-------+---------+ -``` - -### 1.3 查看集群关键参数 - -**含义**:返回当前集群的关键参数。 - -#### 语法: - -```SQL -showVariablesStatement - : SHOW VARIABLES - ; -``` - -关键参数如下: - -1. **ClusterName**:当前集群的名称。 -2. **DataReplicationFactor**:数据副本的数量,表示每个数据分区(DataRegion)的副本数。 -3. **SchemaReplicationFactor**:元数据副本的数量,表示每个元数据分区(SchemaRegion)的副本数。 -4. **DataRegionConsensusProtocolClass**:数据分区(DataRegion)使用的共识协议类。 -5. **SchemaRegionConsensusProtocolClass**:元数据分区(SchemaRegion)使用的共识协议类。 -6. **ConfigNodeConsensusProtocolClass**:配置节点(ConfigNode)使用的共识协议类。 -7. **TimePartitionOrigin**:数据库时间分区的起始时间戳。 -8. **TimePartitionInterval**:数据库的时间分区间隔(单位:毫秒)。 -9. **ReadConsistencyLevel**:读取操作的一致性级别。 -10. **SchemaRegionPerDataNode**:数据节点(DataNode)上的元数据分区(SchemaRegion)数量。 -11. **DataRegionPerDataNode**:数据节点(DataNode)上的数据分区(DataRegion)数量。 -12. **SeriesSlotNum**:数据分区(DataRegion)的序列槽(SeriesSlot)数量。 -13. **SeriesSlotExecutorClass**:序列槽的实现类。 -14. **DiskSpaceWarningThreshold**:磁盘空间告警阈值(单位:百分比)。 -15. **TimestampPrecision**:时间戳精度。 - -#### 示例: - -```SQL -IoTDB> SHOW VARIABLES -``` - -执行结果如下: - -```SQL -+----------------------------------+-----------------------------------------------------------------+ -| Variable| Value| -+----------------------------------+-----------------------------------------------------------------+ -| ClusterName| defaultCluster| -| DataReplicationFactor| 1| -| SchemaReplicationFactor| 1| -| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| -|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| -| TimePartitionOrigin| 0| -| TimePartitionInterval| 604800000| -| ReadConsistencyLevel| strong| -| SchemaRegionPerDataNode| 1| -| DataRegionPerDataNode| 0| -| SeriesSlotNum| 1000| -| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| -| DiskSpaceWarningThreshold| 0.05| -| TimestampPrecision| ms| -+----------------------------------+-----------------------------------------------------------------+ -``` - -### 1.4 查看数据库当前时间 - -#### 语法: - -**含义**:返回数据库当前时间。 - -```SQL -showCurrentTimestampStatement - : SHOW CURRENT_TIMESTAMP - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW CURRENT_TIMESTAMP -``` - -执行结果如下: - -```SQL -+-----------------------------+ -| CurrentTimestamp| -+-----------------------------+ -|2025-02-17T11:11:52.987+08:00| -+-----------------------------+ -``` - -### 1.5 查看正在执行的查询信息 - -**含义**:用于显示所有正在执行的查询信息。 - -#### 语法: - -```SQL -showQueriesStatement - : SHOW (QUERIES | QUERY PROCESSLIST) - (WHERE where=booleanExpression)? - (ORDER BY sortItem (',' sortItem)*)? - limitOffsetClause - ; -``` - -**参数解释**: - -1. **WHERE** 子句:需保证过滤的目标列是结果集中存在的列 -2. **ORDER BY** 子句:需保证`sortKey`是结果集中存在的列 -3. **limitOffsetClause**: - - **含义**:用于限制结果集的返回数量。 - - **格式**:`LIMIT , `, `` 是偏移量,`` 是返回的行数。 -4. **QUERIES** 表中的列: - - **time**:查询开始的时间戳,时间戳精度与系统精度一致 - - **queryid**:查询语句的 ID - - **datanodeid**:发起查询语句的 DataNode 的ID - - **elapsedtime**:查询的执行耗时,单位是秒 - - **statement**:查询的 SQL 语句 - - -#### 示例: - -```SQL -IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 -``` - -执行结果如下: - -```SQL -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -| Time| QueryId|DataNodeId|ElapsedTime| Statement| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| -+-----------------------------+-----------------------+----------+-----------+--------------------------------------+ -``` - - -### 1.6 查看分区信息 - -**含义**:返回当前集群的分区信息。 - -#### 语法: - -```SQL -showRegionsStatement - : SHOW REGIONS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW REGIONS -``` - -执行结果如下: - -```SQL -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | -| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| -| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | -| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| -+--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ -``` - -### 1.7 查看可用节点 - -**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 - -> V2.0.8-beta 起支持该功能 - -#### 语法: - -```SQL -showAvailableUrlsStatement - : SHOW AVAILABLE URLS - ; -``` - -#### 示例: - -```SQL -IoTDB> SHOW AVAILABLE URLS -``` - -执行结果如下: - -```SQL -+----------+-------+ -|RpcAddress|RpcPort| -+----------+-------+ -| 0.0.0.0| 6667| -+----------+-------+ -``` - - -## 2. 状态设置 - -### 2.1 设置连接的模型 - -**含义**:将当前连接的 sql_dialect 置为树模型/表模型,在树模型和表模型中均可使用该命令。 - -#### 语法: - -```SQL -SET SQL_DIALECT EQ (TABLE | TREE) -``` - -#### 示例: - -```SQL -IoTDB> SET SQL_DIALECT=TREE -IoTDB> SHOW CURRENT_SQL_DIALECT -``` - -执行结果如下: - -```SQL -+-----------------+ -|CurrentSqlDialect| -+-----------------+ -| TREE| -+-----------------+ -``` - -### 2.2 更新配置项 - -**含义**:用于更新配置项,执行完成后会进行配置项的热加载,对于支持热修改的配置项会立即生效。 - -#### 语法: - -```SQL -setConfigurationStatement - : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? - ; - -propertyAssignments - : property (',' property)* - ; - -property - : identifier EQ propertyValue - ; - -propertyValue - : DEFAULT - | expression - ; -``` - -**参数解释**: - -1. **propertyAssignments** - - **含义**:更新的配置列表,由多个 `property` 组成。 - - 可以更新多个配置列表,用逗号分隔。 - - **取值**: - - `DEFAULT`:将配置项恢复为默认值。 - - `expression`:具体的值,必须是一个字符串。 -2. **ON INTEGER_VALUE** - - **含义**:指定要更新配置的节点 ID。 - - **可选性**:可选。如果不指定或指定的值低于 0,则更新所有 ConfigNode 和 DataNode 的配置。 - -#### 示例: - -```SQL -IoTDB> SET CONFIGURATION 'disk_space_warning_threshold'='0.05','heartbeat_interval_in_ms'='1000' ON 1; -``` - -### 2.3 读取手动修改的配置文件 - -**含义**:用于读取手动修改过的配置文件,并对配置项进行热加载,对于支持热修改的配置项会立即生效。 - -#### 语法: - -```SQL -loadConfigurationStatement - : LOAD CONFIGURATION localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **localOrClusterMode** - - **含义**:指定配置热加载的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `LOCAL`:只对客户端直连的 DataNode 进行配置热加载。 - - `CLUSTER`:对集群中所有 DataNode 进行配置热加载。 - -#### 示例: - -```SQL -IoTDB> LOAD CONFIGURATION ON LOCAL; -``` - -### 2.4 设置系统的状态 - -**含义**:用于设置系统的状态。 - -#### 语法: - -```SQL -setSystemStatusStatement - : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **RUNNING | READONLY** - - **含义**:指定系统的新状态。 - - **取值**: - - `RUNNING`:将系统设置为运行状态,允许读写操作。 - - `READONLY`:将系统设置为只读状态,只允许读取操作,禁止写入操作。 -2. **localOrClusterMode** - - **含义**:指定状态变更的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `LOCAL`:仅对客户端直连的 DataNode 生效。 - - `CLUSTER`:对集群中所有 DataNode 生效。 - -#### 示例: - -```SQL -IoTDB> SET SYSTEM TO READONLY ON CLUSTER; -``` - - -## 3. 数据管理 - -### 3.1 刷写内存表中的数据到磁盘 - -**含义**:将内存表中的数据刷写到磁盘上。 - -#### 语法: - -```SQL -flushStatement - : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? - ; - -booleanValue - : TRUE | FALSE - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **identifier** - - **含义**:指定要刷写的路径名称。 - - **可选性**:可选。如果不指定,则默认刷写所有路径。 - - **多个路径**:可以指定多个路径名称,用逗号分隔。例如:`FLUSH root.ln, root.lnm`。 -2. **booleanValue** - - **含义**:指定刷写的内容。 - - **可选性**:可选。如果不指定,则默认刷写顺序和乱序空间的内存。 - - **取值**: - - `TRUE`:只刷写顺序空间的内存表。 - - `FALSE`:只刷写乱序空间的MemTable。 -3. **localOrClusterMode** - - **含义**:指定刷写的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `ON LOCAL`:只刷写客户端直连的 DataNode 上的内存表。 - - `ON CLUSTER`:刷写集群中所有 DataNode 上的内存表。 - -#### 示例: - -```SQL -IoTDB> FLUSH root.ln TRUE ON LOCAL; -``` - -## 4. 数据修复 - -### 4.1 启动后台扫描并修复 tsfile 任务 - -**含义**:启动一个后台任务,开始扫描并修复 tsfile,能够修复数据文件内的时间戳乱序类异常。 - -#### 语法: - -```SQL -startRepairDataStatement - : START REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **localOrClusterMode** - - **含义**:指定数据修复的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 - - `ON CLUSTER`:对集群中所有 DataNode 执行。 - -#### 示例: - -```SQL -IoTDB> START REPAIR DATA ON CLUSTER; -``` - -### 4.2 暂停后台修复 tsfile 任务 - -**含义**:暂停后台的修复任务,暂停中的任务可通过再次执行 start repair data 命令恢复。 - -#### 语法: - -```SQL -stopRepairDataStatement - : STOP REPAIR DATA localOrClusterMode? - ; - -localOrClusterMode - : (ON (LOCAL | CLUSTER)) - ; -``` - -**参数解释**: - -1. **localOrClusterMode** - - **含义**:指定数据修复的范围。 - - **可选性**:可选。默认值为 `CLUSTER`。 - - **取值**: - - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 - - `ON CLUSTER`:对集群中所有 DataNode 执行。 - -#### 示例: - -```SQL -IoTDB> STOP REPAIR DATA ON CLUSTER; -``` - -## 5. 终止查询 - -### 5.1 主动终止查询 - -**含义**:使用该命令主动地终止查询。 - -#### 语法: - -```SQL -killQueryStatement - : KILL (QUERY queryId=string | ALL QUERIES) - ; -``` - -**参数解释**: - -1. **QUERY queryId=string** - - **含义**:指定要终止的查询的 ID。 `` 是正在执行的查询的唯一标识符。 - - **获取查询 ID**:可以通过 `SHOW QUERIES` 命令获取所有正在执行的查询及其 ID。 -2. **ALL QUERIES** - - **含义**:终止所有正在执行的查询。 - -#### 示例: - -通过指定 `queryId` 可以中止指定的查询,为了获取正在执行的查询 id,用户可以使用 show queries 命令,该命令将显示所有正在执行的查询列表。 - -```SQL -IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query -IoTDB> KILL ALL QUERIES; -- 终止所有query -``` \ No newline at end of file +--> \ No newline at end of file diff --git a/src/zh/UserGuide/latest/User-Manual/Maintenance-statement_apache.md b/src/zh/UserGuide/latest/User-Manual/Maintenance-statement_apache.md new file mode 100644 index 000000000..45b37b014 --- /dev/null +++ b/src/zh/UserGuide/latest/User-Manual/Maintenance-statement_apache.md @@ -0,0 +1,558 @@ + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看连接的模型 + +**含义**:返回当前连接的 sql_dialect 是树模型/表模型。 + +#### 语法: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 查看集群版本 + +**含义**:返回当前集群的版本。 + +#### 语法: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW VERSION +``` + +执行结果如下: + +```SQL ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 查看集群关键参数 + +**含义**:返回当前集群的关键参数。 + +#### 语法: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +关键参数如下: + +1. **ClusterName**:当前集群的名称。 +2. **DataReplicationFactor**:数据副本的数量,表示每个数据分区(DataRegion)的副本数。 +3. **SchemaReplicationFactor**:元数据副本的数量,表示每个元数据分区(SchemaRegion)的副本数。 +4. **DataRegionConsensusProtocolClass**:数据分区(DataRegion)使用的共识协议类。 +5. **SchemaRegionConsensusProtocolClass**:元数据分区(SchemaRegion)使用的共识协议类。 +6. **ConfigNodeConsensusProtocolClass**:配置节点(ConfigNode)使用的共识协议类。 +7. **TimePartitionOrigin**:数据库时间分区的起始时间戳。 +8. **TimePartitionInterval**:数据库的时间分区间隔(单位:毫秒)。 +9. **ReadConsistencyLevel**:读取操作的一致性级别。 +10. **SchemaRegionPerDataNode**:数据节点(DataNode)上的元数据分区(SchemaRegion)数量。 +11. **DataRegionPerDataNode**:数据节点(DataNode)上的数据分区(DataRegion)数量。 +12. **SeriesSlotNum**:数据分区(DataRegion)的序列槽(SeriesSlot)数量。 +13. **SeriesSlotExecutorClass**:序列槽的实现类。 +14. **DiskSpaceWarningThreshold**:磁盘空间告警阈值(单位:百分比)。 +15. **TimestampPrecision**:时间戳精度。 + +#### 示例: + +```SQL +IoTDB> SHOW VARIABLES +``` + +执行结果如下: + +```SQL ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 查看数据库当前时间 + +#### 语法: + +**含义**:返回数据库当前时间。 + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP +``` + +执行结果如下: + +```SQL ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 查看正在执行的查询信息 + +**含义**:用于显示所有正在执行的查询信息。 + +#### 语法: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**参数解释**: + +1. **WHERE** 子句:需保证过滤的目标列是结果集中存在的列 +2. **ORDER BY** 子句:需保证`sortKey`是结果集中存在的列 +3. **limitOffsetClause**: + - **含义**:用于限制结果集的返回数量。 + - **格式**:`LIMIT , `, `` 是偏移量,`` 是返回的行数。 +4. **QUERIES** 表中的列: + - **time**:查询开始的时间戳,时间戳精度与系统精度一致 + - **queryid**:查询语句的 ID + - **datanodeid**:发起查询语句的 DataNode 的ID + - **elapsedtime**:查询的执行耗时,单位是秒 + - **statement**:查询的 SQL 语句 + + +#### 示例: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +执行结果如下: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + + +### 1.6 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8-beta 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的模型 + +**含义**:将当前连接的 sql_dialect 置为树模型/表模型,在树模型和表模型中均可使用该命令。 + +#### 语法: + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +#### 示例: + +```SQL +IoTDB> SET SQL_DIALECT=TREE +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**含义**:用于更新配置项,执行完成后会进行配置项的热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**参数解释**: + +1. **propertyAssignments** + - **含义**:更新的配置列表,由多个 `property` 组成。 + - 可以更新多个配置列表,用逗号分隔。 + - **取值**: + - `DEFAULT`:将配置项恢复为默认值。 + - `expression`:具体的值,必须是一个字符串。 +2. **ON INTEGER_VALUE** + - **含义**:指定要更新配置的节点 ID。 + - **可选性**:可选。如果不指定或指定的值低于 0,则更新所有 ConfigNode 和 DataNode 的配置。 + +#### 示例: + +```SQL +IoTDB> SET CONFIGURATION 'disk_space_warning_threshold'='0.05','heartbeat_interval_in_ms'='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**含义**:用于读取手动修改过的配置文件,并对配置项进行热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定配置热加载的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:只对客户端直连的 DataNode 进行配置热加载。 + - `CLUSTER`:对集群中所有 DataNode 进行配置热加载。 + +#### 示例: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**含义**:用于设置系统的状态。 + +#### 语法: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **RUNNING | READONLY** + - **含义**:指定系统的新状态。 + - **取值**: + - `RUNNING`:将系统设置为运行状态,允许读写操作。 + - `READONLY`:将系统设置为只读状态,只允许读取操作,禁止写入操作。 +2. **localOrClusterMode** + - **含义**:指定状态变更的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:仅对客户端直连的 DataNode 生效。 + - `CLUSTER`:对集群中所有 DataNode 生效。 + +#### 示例: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + + +## 3. 数据管理 + +### 3.1 刷写内存表中的数据到磁盘 + +**含义**:将内存表中的数据刷写到磁盘上。 + +#### 语法: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **identifier** + - **含义**:指定要刷写的路径名称。 + - **可选性**:可选。如果不指定,则默认刷写所有路径。 + - **多个路径**:可以指定多个路径名称,用逗号分隔。例如:`FLUSH root.ln, root.lnm`。 +2. **booleanValue** + - **含义**:指定刷写的内容。 + - **可选性**:可选。如果不指定,则默认刷写顺序和乱序空间的内存。 + - **取值**: + - `TRUE`:只刷写顺序空间的内存表。 + - `FALSE`:只刷写乱序空间的MemTable。 +3. **localOrClusterMode** + - **含义**:指定刷写的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:只刷写客户端直连的 DataNode 上的内存表。 + - `ON CLUSTER`:刷写集群中所有 DataNode 上的内存表。 + +#### 示例: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**含义**:启动一个后台任务,开始扫描并修复 tsfile,能够修复数据文件内的时间戳乱序类异常。 + +#### 语法: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**含义**:暂停后台的修复任务,暂停中的任务可通过再次执行 start repair data 命令恢复。 + +#### 语法: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 终止查询 + +### 5.1 主动终止查询 + +**含义**:使用该命令主动地终止查询。 + +#### 语法: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**参数解释**: + +1. **QUERY queryId=string** + - **含义**:指定要终止的查询的 ID。 `` 是正在执行的查询的唯一标识符。 + - **获取查询 ID**:可以通过 `SHOW QUERIES` 命令获取所有正在执行的查询及其 ID。 +2. **ALL QUERIES** + - **含义**:终止所有正在执行的查询。 + +#### 示例: + +通过指定 `queryId` 可以中止指定的查询,为了获取正在执行的查询 id,用户可以使用 show queries 命令,该命令将显示所有正在执行的查询列表。 + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` \ No newline at end of file diff --git a/src/zh/UserGuide/latest/User-Manual/Maintenance-statement_timecho.md b/src/zh/UserGuide/latest/User-Manual/Maintenance-statement_timecho.md new file mode 100644 index 000000000..93259a49a --- /dev/null +++ b/src/zh/UserGuide/latest/User-Manual/Maintenance-statement_timecho.md @@ -0,0 +1,558 @@ + +# 运维语句 + +## 1. 状态查看 + +### 1.1 查看连接的模型 + +**含义**:返回当前连接的 sql_dialect 是树模型/表模型。 + +#### 语法: + +```SQL +showCurrentSqlDialectStatement + : SHOW CURRENT_SQL_DIALECT + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 1.2 查看集群版本 + +**含义**:返回当前集群的版本。 + +#### 语法: + +```SQL +showVersionStatement + : SHOW VERSION + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW VERSION +``` + +执行结果如下: + +```SQL ++-------+---------+ +|Version|BuildInfo| ++-------+---------+ +|2.0.1.2| 1ca4008| ++-------+---------+ +``` + +### 1.3 查看集群关键参数 + +**含义**:返回当前集群的关键参数。 + +#### 语法: + +```SQL +showVariablesStatement + : SHOW VARIABLES + ; +``` + +关键参数如下: + +1. **ClusterName**:当前集群的名称。 +2. **DataReplicationFactor**:数据副本的数量,表示每个数据分区(DataRegion)的副本数。 +3. **SchemaReplicationFactor**:元数据副本的数量,表示每个元数据分区(SchemaRegion)的副本数。 +4. **DataRegionConsensusProtocolClass**:数据分区(DataRegion)使用的共识协议类。 +5. **SchemaRegionConsensusProtocolClass**:元数据分区(SchemaRegion)使用的共识协议类。 +6. **ConfigNodeConsensusProtocolClass**:配置节点(ConfigNode)使用的共识协议类。 +7. **TimePartitionOrigin**:数据库时间分区的起始时间戳。 +8. **TimePartitionInterval**:数据库的时间分区间隔(单位:毫秒)。 +9. **ReadConsistencyLevel**:读取操作的一致性级别。 +10. **SchemaRegionPerDataNode**:数据节点(DataNode)上的元数据分区(SchemaRegion)数量。 +11. **DataRegionPerDataNode**:数据节点(DataNode)上的数据分区(DataRegion)数量。 +12. **SeriesSlotNum**:数据分区(DataRegion)的序列槽(SeriesSlot)数量。 +13. **SeriesSlotExecutorClass**:序列槽的实现类。 +14. **DiskSpaceWarningThreshold**:磁盘空间告警阈值(单位:百分比)。 +15. **TimestampPrecision**:时间戳精度。 + +#### 示例: + +```SQL +IoTDB> SHOW VARIABLES +``` + +执行结果如下: + +```SQL ++----------------------------------+-----------------------------------------------------------------+ +| Variable| Value| ++----------------------------------+-----------------------------------------------------------------+ +| ClusterName| defaultCluster| +| DataReplicationFactor| 1| +| SchemaReplicationFactor| 1| +| DataRegionConsensusProtocolClass| org.apache.iotdb.consensus.iot.IoTConsensus| +|SchemaRegionConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| ConfigNodeConsensusProtocolClass| org.apache.iotdb.consensus.ratis.RatisConsensus| +| TimePartitionOrigin| 0| +| TimePartitionInterval| 604800000| +| ReadConsistencyLevel| strong| +| SchemaRegionPerDataNode| 1| +| DataRegionPerDataNode| 0| +| SeriesSlotNum| 1000| +| SeriesSlotExecutorClass|org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor| +| DiskSpaceWarningThreshold| 0.05| +| TimestampPrecision| ms| ++----------------------------------+-----------------------------------------------------------------+ +``` + +### 1.4 查看数据库当前时间 + +#### 语法: + +**含义**:返回数据库当前时间。 + +```SQL +showCurrentTimestampStatement + : SHOW CURRENT_TIMESTAMP + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW CURRENT_TIMESTAMP +``` + +执行结果如下: + +```SQL ++-----------------------------+ +| CurrentTimestamp| ++-----------------------------+ +|2025-02-17T11:11:52.987+08:00| ++-----------------------------+ +``` + +### 1.5 查看正在执行的查询信息 + +**含义**:用于显示所有正在执行的查询信息。 + +#### 语法: + +```SQL +showQueriesStatement + : SHOW (QUERIES | QUERY PROCESSLIST) + (WHERE where=booleanExpression)? + (ORDER BY sortItem (',' sortItem)*)? + limitOffsetClause + ; +``` + +**参数解释**: + +1. **WHERE** 子句:需保证过滤的目标列是结果集中存在的列 +2. **ORDER BY** 子句:需保证`sortKey`是结果集中存在的列 +3. **limitOffsetClause**: + - **含义**:用于限制结果集的返回数量。 + - **格式**:`LIMIT , `, `` 是偏移量,`` 是返回的行数。 +4. **QUERIES** 表中的列: + - **time**:查询开始的时间戳,时间戳精度与系统精度一致 + - **queryid**:查询语句的 ID + - **datanodeid**:发起查询语句的 DataNode 的ID + - **elapsedtime**:查询的执行耗时,单位是秒 + - **statement**:查询的 SQL 语句 + + +#### 示例: + +```SQL +IoTDB> SHOW QUERIES WHERE elapsedtime > 0.003 +``` + +执行结果如下: + +```SQL ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +| Time| QueryId|DataNodeId|ElapsedTime| Statement| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +|2025-05-09T15:16:01.293+08:00|20250509_071601_00015_1| 1| 0.006|SHOW QUERIES WHERE elapsedtime > 0.003| ++-----------------------------+-----------------------+----------+-----------+--------------------------------------+ +``` + + +### 1.6 查看分区信息 + +**含义**:返回当前集群的分区信息。 + +#### 语法: + +```SQL +showRegionsStatement + : SHOW REGIONS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW REGIONS +``` + +执行结果如下: + +```SQL ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +|RegionId| Type| Status| Database|SeriesSlotNum|TimeSlotNum|DataNodeId|RpcAddress|RpcPort|InternalAddress| Role| CreateTime|TsFileSize| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +| 9|SchemaRegion|Running|root.__system| 21| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.555| | +| 10| DataRegion|Running|root.__system| 21| 21| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-01T17:37:01.556| 8.27 KB| +| 65|SchemaRegion|Running| root.ln| 1| 0| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.113| | +| 66| DataRegion|Running| root.ln| 1| 1| 1| 0.0.0.0| 6667| 127.0.0.1|Leader|2025-08-25T14:46:50.425| 524 B| ++--------+------------+-------+-------------+-------------+-----------+----------+----------+-------+---------------+------+-----------------------+----------+ +``` + +### 1.7 查看可用节点 + +**含义**:返回当前集群所有可用的 DataNode 的 RPC 地址和端口。注意:这里对于“可用”的定义为:处于非 REMOVING 状态的 DN 节点。 + +> V2.0.8 起支持该功能 + +#### 语法: + +```SQL +showAvailableUrlsStatement + : SHOW AVAILABLE URLS + ; +``` + +#### 示例: + +```SQL +IoTDB> SHOW AVAILABLE URLS +``` + +执行结果如下: + +```SQL ++----------+-------+ +|RpcAddress|RpcPort| ++----------+-------+ +| 0.0.0.0| 6667| ++----------+-------+ +``` + + +## 2. 状态设置 + +### 2.1 设置连接的模型 + +**含义**:将当前连接的 sql_dialect 置为树模型/表模型,在树模型和表模型中均可使用该命令。 + +#### 语法: + +```SQL +SET SQL_DIALECT EQ (TABLE | TREE) +``` + +#### 示例: + +```SQL +IoTDB> SET SQL_DIALECT=TREE +IoTDB> SHOW CURRENT_SQL_DIALECT +``` + +执行结果如下: + +```SQL ++-----------------+ +|CurrentSqlDialect| ++-----------------+ +| TREE| ++-----------------+ +``` + +### 2.2 更新配置项 + +**含义**:用于更新配置项,执行完成后会进行配置项的热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +setConfigurationStatement + : SET CONFIGURATION propertyAssignments (ON INTEGER_VALUE)? + ; + +propertyAssignments + : property (',' property)* + ; + +property + : identifier EQ propertyValue + ; + +propertyValue + : DEFAULT + | expression + ; +``` + +**参数解释**: + +1. **propertyAssignments** + - **含义**:更新的配置列表,由多个 `property` 组成。 + - 可以更新多个配置列表,用逗号分隔。 + - **取值**: + - `DEFAULT`:将配置项恢复为默认值。 + - `expression`:具体的值,必须是一个字符串。 +2. **ON INTEGER_VALUE** + - **含义**:指定要更新配置的节点 ID。 + - **可选性**:可选。如果不指定或指定的值低于 0,则更新所有 ConfigNode 和 DataNode 的配置。 + +#### 示例: + +```SQL +IoTDB> SET CONFIGURATION 'disk_space_warning_threshold'='0.05','heartbeat_interval_in_ms'='1000' ON 1; +``` + +### 2.3 读取手动修改的配置文件 + +**含义**:用于读取手动修改过的配置文件,并对配置项进行热加载,对于支持热修改的配置项会立即生效。 + +#### 语法: + +```SQL +loadConfigurationStatement + : LOAD CONFIGURATION localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定配置热加载的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:只对客户端直连的 DataNode 进行配置热加载。 + - `CLUSTER`:对集群中所有 DataNode 进行配置热加载。 + +#### 示例: + +```SQL +IoTDB> LOAD CONFIGURATION ON LOCAL; +``` + +### 2.4 设置系统的状态 + +**含义**:用于设置系统的状态。 + +#### 语法: + +```SQL +setSystemStatusStatement + : SET SYSTEM TO (READONLY | RUNNING) localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **RUNNING | READONLY** + - **含义**:指定系统的新状态。 + - **取值**: + - `RUNNING`:将系统设置为运行状态,允许读写操作。 + - `READONLY`:将系统设置为只读状态,只允许读取操作,禁止写入操作。 +2. **localOrClusterMode** + - **含义**:指定状态变更的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `LOCAL`:仅对客户端直连的 DataNode 生效。 + - `CLUSTER`:对集群中所有 DataNode 生效。 + +#### 示例: + +```SQL +IoTDB> SET SYSTEM TO READONLY ON CLUSTER; +``` + + +## 3. 数据管理 + +### 3.1 刷写内存表中的数据到磁盘 + +**含义**:将内存表中的数据刷写到磁盘上。 + +#### 语法: + +```SQL +flushStatement + : FLUSH identifier? (',' identifier)* booleanValue? localOrClusterMode? + ; + +booleanValue + : TRUE | FALSE + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **identifier** + - **含义**:指定要刷写的路径名称。 + - **可选性**:可选。如果不指定,则默认刷写所有路径。 + - **多个路径**:可以指定多个路径名称,用逗号分隔。例如:`FLUSH root.ln, root.lnm`。 +2. **booleanValue** + - **含义**:指定刷写的内容。 + - **可选性**:可选。如果不指定,则默认刷写顺序和乱序空间的内存。 + - **取值**: + - `TRUE`:只刷写顺序空间的内存表。 + - `FALSE`:只刷写乱序空间的MemTable。 +3. **localOrClusterMode** + - **含义**:指定刷写的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:只刷写客户端直连的 DataNode 上的内存表。 + - `ON CLUSTER`:刷写集群中所有 DataNode 上的内存表。 + +#### 示例: + +```SQL +IoTDB> FLUSH root.ln TRUE ON LOCAL; +``` + +## 4. 数据修复 + +### 4.1 启动后台扫描并修复 tsfile 任务 + +**含义**:启动一个后台任务,开始扫描并修复 tsfile,能够修复数据文件内的时间戳乱序类异常。 + +#### 语法: + +```SQL +startRepairDataStatement + : START REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> START REPAIR DATA ON CLUSTER; +``` + +### 4.2 暂停后台修复 tsfile 任务 + +**含义**:暂停后台的修复任务,暂停中的任务可通过再次执行 start repair data 命令恢复。 + +#### 语法: + +```SQL +stopRepairDataStatement + : STOP REPAIR DATA localOrClusterMode? + ; + +localOrClusterMode + : (ON (LOCAL | CLUSTER)) + ; +``` + +**参数解释**: + +1. **localOrClusterMode** + - **含义**:指定数据修复的范围。 + - **可选性**:可选。默认值为 `CLUSTER`。 + - **取值**: + - `ON LOCAL`:仅对客户端直连的 DataNode 执行。 + - `ON CLUSTER`:对集群中所有 DataNode 执行。 + +#### 示例: + +```SQL +IoTDB> STOP REPAIR DATA ON CLUSTER; +``` + +## 5. 终止查询 + +### 5.1 主动终止查询 + +**含义**:使用该命令主动地终止查询。 + +#### 语法: + +```SQL +killQueryStatement + : KILL (QUERY queryId=string | ALL QUERIES) + ; +``` + +**参数解释**: + +1. **QUERY queryId=string** + - **含义**:指定要终止的查询的 ID。 `` 是正在执行的查询的唯一标识符。 + - **获取查询 ID**:可以通过 `SHOW QUERIES` 命令获取所有正在执行的查询及其 ID。 +2. **ALL QUERIES** + - **含义**:终止所有正在执行的查询。 + +#### 示例: + +通过指定 `queryId` 可以中止指定的查询,为了获取正在执行的查询 id,用户可以使用 show queries 命令,该命令将显示所有正在执行的查询列表。 + +```SQL +IoTDB> KILL QUERY 20250108_101015_00000_1; -- 终止指定query +IoTDB> KILL ALL QUERIES; -- 终止所有query +``` \ No newline at end of file