Skip to content

Commit 29d7069

Browse files
authored
Merge branch 'main' into fix_using_0
2 parents 878783b + 5267ad5 commit 29d7069

File tree

55 files changed

+1061
-284
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+1061
-284
lines changed

Cargo.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
{
2+
"label": "System Functions",
3+
"link": {
4+
"type": "generated-index",
5+
"slug": "/reference/functions/system-functions"
6+
}
7+
}
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
---
2+
title: CLUSTERING_INFORMATION
3+
---
4+
5+
Returns clustering information of a table.
6+
7+
## Syntax
8+
9+
```sql
10+
CLUSTERING_INFORMATION(‘<database_name>’, ‘<table_name>’)
11+
```
12+
13+
## Examples
14+
15+
```sql
16+
CREATE TABLE mytable(a int, b int) CLUSTER BY(a+1);
17+
18+
INSERT INTO mytable VALUES(1,1),(3,3);
19+
INSERT INTO mytable VALUES(2,2),(5,5);
20+
INSERT INTO mytable VALUES(4,4);
21+
22+
SELECT * FROM CLUSTERING_INFORMATION(‘default‘,’mytable‘);
23+
24+
---
25+
| cluster_by_keys | total_block_count | total_constant_block_count | average_overlaps | average_depth | block_depth_histogram |
26+
|-----------------|-------------------|----------------------------|------------------|---------------|-----------------------|
27+
| ((a + 1)) | 3 | 1 | 1.3333 | 2.0 | {"00002":3} |
28+
```
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
---
2+
title: FUSE_SEGMENT
3+
---
4+
5+
Returns the segment information of a snapshot of table.
6+
7+
## Syntax
8+
9+
```sql
10+
FUSE_SEGMENT('<database_name>', '<table_name>','<snapshot_id>')
11+
```
12+
13+
## Examples
14+
15+
```sql
16+
CREATE TABLE mytable(c int);
17+
INSERT INTO mytable values(1);
18+
INSERT INTO mytable values(2);
19+
20+
-- Obtain a snapshot ID
21+
SELECT snapshot_id FROM FUSE_SNAPSHOT('default', 'mytable') limit 1;
22+
23+
---
24+
+----------------------------------+
25+
| snapshot_id |
26+
+----------------------------------+
27+
| 82c572947efa476892bd7c0635158ba2 |
28+
+----------------------------------+
29+
30+
SELECT * FROM FUSE_SEGMENT('default', 'mytable', '82c572947efa476892bd7c0635158ba2');
31+
32+
---
33+
+----------------------------------------------------+----------------+-------------+-----------+--------------------+------------------+
34+
| file_location | format_version | block_count | row_count | bytes_uncompressed | bytes_compressed |
35+
+----------------------------------------------------+----------------+-------------+-----------+--------------------+------------------+
36+
| 1/319/_sg/d35fe7bf99584301b22e8f6a8a9c97f9_v1.json | 1 | 1 | 1 | 4 | 184 |
37+
| 1/319/_sg/c261059d47c840e1b749222dabb4b2bb_v1.json | 1 | 1 | 1 | 4 | 184 |
38+
+----------------------------------------------------+----------------+-------------+-----------+--------------------+------------------+
39+
```
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
---
2+
title: FUSE_SNAPSHOT
3+
---
4+
5+
Returns the snapshot information of a table for querying previous versions of your data. For more information, see [AT](../../30-sql/20-query-syntax/dml-at.md).
6+
7+
## Syntax
8+
9+
```sql
10+
FUSE_SNAPSHOT('<database_name>', '<table_name>')
11+
```
12+
13+
## Examples
14+
15+
```sql
16+
CREATE TABLE mytable(a int, b int) CLUSTER BY(a+1);
17+
18+
INSERT INTO mytable VALUES(1,1),(3,3);
19+
INSERT INTO mytable VALUES(2,2),(5,5);
20+
INSERT INTO mytable VALUES(4,4);
21+
22+
SELECT * FROM FUSE_SNAPSHOT(‘default‘,’mytable‘);
23+
24+
---
25+
| snapshot_id | snapshot_location | format_version | previous_snapshot_id | segment_count | block_count | row_count | bytes_uncompressed | bytes_compressed | timestamp |
26+
|----------------------------------|------------------------------------------------------------|----------------|----------------------------------|---------------|-------------|-----------|--------------------|------------------|----------------------------|
27+
| dd98266f968f4817b470255592d04fda | 670655/670675/_ss/dd98266f968f4817b470255592d04fda_v1.json | 1 | \N | 1 | 1 | 2 | 16 | 290 | 2022-09-07 01:58:55.204997 |
28+
| 2f2d004ff6f842cdb25f5631b2bbb703 | 670655/670675/_ss/2f2d004ff6f842cdb25f5631b2bbb703_v1.json | 1 | dd98266f968f4817b470255592d04fda | 2 | 2 | 4 | 32 | 580 | 2022-09-07 01:59:09.742999 |
29+
| 0aa6dfd5d5364bde80f21161ba48c96e | 670655/670675/_ss/0aa6dfd5d5364bde80f21161ba48c96e_v1.json | 1 | 2f2d004ff6f842cdb25f5631b2bbb703 | 3 | 3 | 5 | 40 | 862 | 2022-09-07 01:59:16.858454 |
30+
```

docs/doc/30-reference/30-sql/00-ddl/40-stage/01-ddl-create-stage.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ CREATE STAGE [ IF NOT EXISTS ] <internal_stage_name>
1212
[ FILE_FORMAT = ( { TYPE = { CSV | PARQUET } [ formatTypeOptions ] ) } ]
1313
[ COPY_OPTIONS = ( copyOptions ) ]
1414
[ COMMENT = '<string_literal>' ]
15-
15+
1616
-- External stage
1717
CREATE STAGE [ IF NOT EXISTS ] <external_stage_name>
1818
externalStageParams
@@ -70,8 +70,8 @@ externalStageParams ::=
7070
### formatTypeOptions
7171
```
7272
formatTypeOptions ::=
73-
RECORD_DELIMITER = '<character>'
74-
FIELD_DELIMITER = '<character>'
73+
RECORD_DELIMITER = '<character>'
74+
FIELD_DELIMITER = '<character>'
7575
SKIP_HEADER = <integer>
7676
```
7777

@@ -85,11 +85,13 @@ formatTypeOptions ::=
8585
```
8686
copyOptions ::=
8787
[ SIZE_LIMIT = <num> ]
88+
[ PURGE = <bool> ]
8889
```
8990

9091
| Parameters | Description | Required |
9192
| ----------- | ----------- | --- |
9293
| `SIZE_LIMIT = <num>` | Number (> 0) that specifies the maximum rows of data to be loaded for a given COPY statement. Default `0` | Optional |
94+
| `PURGE = <bool>` | True specifies that the command will purge the files in the stage if they are loaded successfully into table. Default `false` | Optional |
9395

9496

9597
## Examples

docs/doc/30-reference/30-sql/10-dml/dml-copy-into-location.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ externalLocation (for Amazon S3) ::=
5656
### formatTypeOptions
5757
```
5858
formatTypeOptions ::=
59-
RECORD_DELIMITER = '<character>'
60-
FIELD_DELIMITER = '<character>'
59+
RECORD_DELIMITER = '<character>'
60+
FIELD_DELIMITER = '<character>'
6161
SKIP_HEADER = <integer>
6262
```
6363

docs/doc/30-reference/30-sql/10-dml/dml-copy-into-table.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,8 @@ A regular expression pattern string, enclosed in single quotes, specifying the f
9797

9898
```
9999
formatTypeOptions ::=
100-
RECORD_DELIMITER = '<character>'
101-
FIELD_DELIMITER = '<character>'
100+
RECORD_DELIMITER = '<character>'
101+
FIELD_DELIMITER = '<character>'
102102
SKIP_HEADER = <integer>
103103
COMPRESSION = AUTO | GZIP | BZ2 | BROTLI | ZSTD | DEFLATE | RAW_DEFLATE | NONE
104104
```
@@ -129,7 +129,7 @@ Default: `NONE`
129129

130130
Values:
131131

132-
| Values | Notes |
132+
| Values | Notes |
133133
|---------------|-----------------------------------------------------------------|
134134
| `AUTO` | Auto detect compression via file extensions |
135135
| `GZIP` | |
@@ -145,11 +145,13 @@ Values:
145145
```
146146
copyOptions ::=
147147
[ SIZE_LIMIT = <num> ]
148+
[ PURGE = <bool> ]
148149
```
149150

150151
| Parameters | Description | Required |
151152
| ----------- | ----------- | --- |
152153
| `SIZE_LIMIT = <num>` | Number (> 0) that specifies the maximum rows of data to be loaded for a given COPY statement. Default `0` | Optional |
154+
| `PURGE = <bool>` | True that specifies the command will purge the files in the stage if they are loaded successfully into table. Default `false` | Optional |
153155

154156
## Examples
155157

docs/doc/30-reference/30-sql/20-query-syntax/dml-at.md

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
title: AT
33
---
44

5-
The SELECT statement can include an AT clause that allows you to query previous versions of your data by a specific snapshot ID or timestamp.
5+
The SELECT statement can include an AT clause that allows you to query previous versions of your data by a specific snapshot ID or timestamp.
66

77
Databend automatically creates snapshots when data updates occur, so a snapshot can be considered as a view of your data at a time point in the past. You can access a snapshot by the snapshot ID or the timestamp at which the snapshot was created. For how to obtain the snapshot ID and timestamp, see [Obtaining Snapshot ID and Timestamp](#obtaining-snapshot-id-and-timestamp).
88

@@ -26,12 +26,7 @@ SELECT snapshot_id,
2626
FROM fuse_snapshot('<database_name>', '<table_name>');
2727
```
2828

29-
You can find more information related to the snapshots by executing the following statement:
30-
31-
```sql
32-
SELECT *
33-
FROM Fuse_snapshot('<database_name>', '<table_name>');
34-
```
29+
For more information about the FUSE_SNAPSHOT function,see [FUSE_SNAPSHOT](../../20-functions/111-system-functions/fuse_snapshot.md).
3530

3631
## Examples
3732

docs/doc/30-reference/30-sql/70-system-tables/system-query-log.md

Lines changed: 100 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,71 @@
22
title: system.query_log
33
---
44

5-
A read-only in-memory table stores all the query logs;
5+
A read-only in-memory table stores all the query logs.
66

7-
```sql
8-
SELECT * FROM system.query_log ORDER BY event_time DESC LIMIT 1\G
9-
*************************** 1. row ***************************
7+
8+
## Columns
9+
10+
```
11+
CREATE TABLE `query_log` (
12+
`log_type` TINYINT,
13+
`handler_type` VARCHAR,
14+
`tenant_id` VARCHAR,
15+
`cluster_id` VARCHAR,
16+
`sql_user` VARCHAR,
17+
`sql_user_quota` VARCHAR,
18+
`sql_user_privileges` VARCHAR,
19+
`query_id` VARCHAR,
20+
`query_kind` VARCHAR,
21+
`query_text` VARCHAR,
22+
`event_date` DATE,
23+
`event_time` TIMESTAMP(3),
24+
`current_database` VARCHAR,
25+
`databases` VARCHAR,
26+
`tables` VARCHAR,
27+
`columns` VARCHAR,
28+
`projections` VARCHAR,
29+
`written_rows` BIGINT UNSIGNED,
30+
`written_bytes` BIGINT UNSIGNED,
31+
`written_io_bytes` BIGINT UNSIGNED,
32+
`written_io_bytes_cost_ms` BIGINT UNSIGNED,
33+
`scan_rows` BIGINT UNSIGNED,
34+
`scan_bytes` BIGINT UNSIGNED,
35+
`scan_io_bytes` BIGINT UNSIGNED,
36+
`scan_io_bytes_cost_ms` BIGINT UNSIGNED,
37+
`scan_partitions` BIGINT UNSIGNED,
38+
`total_partitions` BIGINT UNSIGNED,
39+
`result_rows` BIGINT UNSIGNED,
40+
`result_bytes` BIGINT UNSIGNED,
41+
`cpu_usage` INT UNSIGNED,
42+
`memory_usage` BIGINT UNSIGNED,
43+
`client_info` VARCHAR,
44+
`client_address` VARCHAR,
45+
`exception_code` INT,
46+
`exception_text` VARCHAR,
47+
`stack_trace` VARCHAR,
48+
`server_version` VARCHAR,
49+
`session_settings` VARCHAR,
50+
`extra` VARCHAR
51+
)
52+
```
53+
54+
## Examples
55+
56+
```
57+
*************************** 4. row ***************************
1058
log_type: 1
1159
handler_type: MySQL
12-
tenant_id: test_tenant
13-
cluster_id: test_cluster
60+
tenant_id: admin
61+
cluster_id:
1462
sql_user: root
1563
sql_user_quota: UserQuota<cpu:0,mem:0,store:0>
16-
sql_user_privileges: GRANT ALL ON *.* TO 'root'@'127.0.0.1', ROLES: []
17-
query_id: da879c17-94bb-4163-b2ac-ff4786bbe69e
18-
query_kind: SelectPlan
19-
query_text: SELECT * from system.query_log order by event_time desc limit 1
20-
event_date: 2022-03-24
21-
event_time: 2022-03-24 11:13:27.414
64+
sql_user_privileges: GRANT ALL ON *.*, ROLES: []
65+
query_id: eda2a82b-3667-4ffb-b436-953785178c39
66+
query_kind: Query
67+
query_text: select avg(number) from numbers(1000000)
68+
event_date: 2022-09-08
69+
event_time: 2022-09-08 03:32:39.517
2270
current_database: default
2371
databases:
2472
tables:
@@ -34,16 +82,50 @@ written_io_bytes_cost_ms: 0
3482
scan_io_bytes_cost_ms: 0
3583
scan_partitions: 0
3684
total_partitions: 0
37-
result_rows: 0
38-
result_bytes: 0
39-
cpu_usage: 10
40-
memory_usage: 1603
85+
86+
, skip_header=0, sql_dialect=PostgreSQL, storage_read_buffer_size=1048576, timezone=UTC, unquoted_ident_case_sensitive=0, wait_for_async_insert=1, wait_for_async_insert_timeout=100, scope: SESSION
87+
extra:
88+
89+
90+
*************************** 5. row ***************************
91+
log_type: 2
92+
handler_type: MySQL
93+
tenant_id: admin
94+
cluster_id:
95+
sql_user: root
96+
sql_user_quota: UserQuota<cpu:0,mem:0,store:0>
97+
sql_user_privileges: GRANT ALL ON *.*, ROLES: []
98+
query_id: eda2a82b-3667-4ffb-b436-953785178c39
99+
query_kind: Query
100+
query_text: select avg(number) from numbers(1000000)
101+
event_date: 2022-09-08
102+
event_time: 2022-09-08 03:32:39.519
103+
current_database: default
104+
databases:
105+
tables:
106+
columns:
107+
projections:
108+
written_rows: 0
109+
written_bytes: 0
110+
written_io_bytes: 0
111+
written_io_bytes_cost_ms: 0
112+
scan_rows: 1000000
113+
scan_bytes: 8000000
114+
scan_io_bytes: 0
115+
scan_io_bytes_cost_ms: 0
116+
scan_partitions: 0
117+
total_partitions: 0
118+
result_rows: 1
119+
result_bytes: 9
120+
cpu_usage: 24
121+
memory_usage: 0
41122
client_info:
42-
client_address: 127.0.0.1:56744
123+
client_address: 127.0.0.1:53304
43124
exception_code: 0
44125
exception_text:
45126
stack_trace:
46127
server_version:
47-
session_settings: enable_new_processor_framework=1, flight_client_timeout=60, max_block_size=10000, max_threads=8, storage_occ_backoff_init_delay_ms=5, storage_occ_backoff_max_delay_ms=20000, storage_occ_backoff_max_elapsed_ms=120000, storage_read_buffer_size=1048576, scope: SESSION
128+
session_settings: compression=None, empty_as_default=1, enable_async_insert=0, enable_new_processor_framework=1, enable_planner_v2=1, field_delimiter=,, flight_client_timeout=60, group_by_two_level_threshold=10000, max_block_size=10000, max_execute_time=0, max_threads=24, quoted_ident_case_sensitive=1, record_delimiter=
129+
, skip_header=0, sql_dialect=PostgreSQL, storage_read_buffer_size=1048576, timezone=UTC, unquoted_ident_case_sensitive=0, wait_for_async_insert=1, wait_for_async_insert_timeout=100, scope: SESSION
48130
extra:
49131
```

0 commit comments

Comments
 (0)