Skip to content

Commit 21e80b1

Browse files
authored
Merge branch 'main' into in-improve
2 parents 05024c5 + 5691187 commit 21e80b1

File tree

42 files changed

+879
-376
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+879
-376
lines changed
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
name: "Test Stateful Cluster Linux"
2+
description: "Running stateful tests in cluster mode"
3+
inputs:
4+
profile:
5+
description: "The profile for this test"
6+
required: true
7+
default: "debug"
8+
target:
9+
description: ""
10+
required: true
11+
default: "x86_64-unknown-linux-gnu"
12+
runs:
13+
using: "composite"
14+
steps:
15+
- name: Download artifact
16+
uses: ./.github/actions/artifact_download
17+
with:
18+
profile: ${{ inputs.profile }}
19+
sha: ${{ github.sha }}
20+
target: ${{ inputs.target }}
21+
22+
- name: Minio Setup for (ubuntu-latest only)
23+
shell: bash
24+
run: |
25+
docker run -d -p 9900:9000 --name minio \
26+
-e "MINIO_ACCESS_KEY=minioadmin" \
27+
-e "MINIO_SECRET_KEY=minioadmin" \
28+
-v /tmp/data:/data \
29+
-v /tmp/config:/root/.minio \
30+
minio/minio server /data
31+
32+
export AWS_ACCESS_KEY_ID=minioadmin
33+
export AWS_SECRET_ACCESS_KEY=minioadmin
34+
export AWS_EC2_METADATA_DISABLED=true
35+
36+
aws --endpoint-url http://127.0.0.1:9900/ s3 mb s3://testbucket
37+
aws --endpoint-url http://127.0.0.1:9900/ s3 cp tests/data s3://testbucket/admin/data --recursive
38+
aws --endpoint-url http://127.0.0.1:9900/ s3 cp tests/data/ontime_200.csv s3://testbucket/admin/data/ontime_200_v1.csv
39+
aws --endpoint-url http://127.0.0.1:9900/ s3 cp tests/data/ontime_200.parquet s3://testbucket/admin/data/ontime_200_v1.parquet
40+
41+
- name: Run Stateful Tests with Cluster mode (ubuntu-latest only)
42+
shell: bash
43+
run: |
44+
./scripts/ci/ci-run-stateful-tests-cluster-s3.sh
45+
46+
- name: Upload .stdout files
47+
if: failure()
48+
uses: actions/upload-artifact@v3
49+
with:
50+
name: test-stateful-cluster-linux-stdouts
51+
path: |
52+
./tests/suites/1_stateful/*/*.stdout
53+
54+
- name: Upload failure
55+
if: failure()
56+
uses: ./.github/actions/artifact_failure
57+
with:
58+
name: test-stateful-cluster-linux

.github/mergify.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,12 @@ queue_rules:
1010
- check-success~=^build_(aarch64|x86_64)_musl$
1111
- check-success=test_unit
1212
- check-success=test_metactl
13-
- check-success=test_stateless_standalone_linux
14-
- check-success=test_stateless_cluster_linux
15-
- check-success=test_stateful_standalone_linux
13+
# state tests
14+
- check-success~=^test_(stateless|stateful)_(standalone|cluster)_linux$
15+
# sqllogic tests
1616
- check-success=test_sqllogic_base_standalone_linux
1717
- check-success=test_sqllogic_ydb_standalone_linux
18+
- check-success=test_sqllogic_base_cluster_linux
1819

1920
- name: docs_queue
2021
conditions:

.github/workflows/dev-linux.yml

Lines changed: 28 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -112,22 +112,6 @@ jobs:
112112
- uses: actions/checkout@v3
113113
- uses: ./.github/actions/test_meta_cluster
114114

115-
test_stateless_standalone_linux:
116-
timeout-minutes: 10
117-
runs-on: [self-hosted, X64, Linux, development]
118-
needs: build_gnu
119-
steps:
120-
- uses: actions/checkout@v3
121-
- uses: ./.github/actions/test_stateless_standalone_linux
122-
123-
test_stateless_cluster_linux:
124-
timeout-minutes: 15
125-
runs-on: [self-hosted, X64, Linux, development]
126-
needs: build_gnu
127-
steps:
128-
- uses: actions/checkout@v3
129-
- uses: ./.github/actions/test_stateless_cluster_linux
130-
131115
test_sqllogic_standalone_linux:
132116
timeout-minutes: 30
133117
name: test_sqllogic_${{ matrix.dirs }}_standalone_linux
@@ -161,14 +145,40 @@ jobs:
161145
with:
162146
dirs: ${{ matrix.dirs }}
163147

164-
test_stateful_standalone_linux:
165-
timeout-minutes: 10
148+
test_stateless_standalone_linux:
149+
timeout-minutes: 20
150+
runs-on: [self-hosted, X64, Linux, development]
151+
needs: build_gnu
152+
steps:
153+
- uses: actions/checkout@v3
154+
- uses: ./.github/actions/test_stateless_standalone_linux
155+
156+
test_stateless_cluster_linux:
157+
timeout-minutes: 20
166158
runs-on: [self-hosted, X64, Linux, development]
167159
needs: build_gnu
160+
steps:
161+
- uses: actions/checkout@v3
162+
- uses: ./.github/actions/test_stateless_cluster_linux
163+
164+
test_stateful_standalone_linux:
165+
timeout-minutes: 20
166+
runs-on: [ self-hosted, X64, Linux, development ]
167+
needs: build_gnu
168168
steps:
169169
- uses: actions/checkout@v3
170170
- uses: ./.github/actions/test_stateful_standalone_linux
171171

172+
# Wait for issue https://github.com/datafuselabs/databend/issues/7684
173+
#
174+
# test_stateful_cluster_linux:
175+
# timeout-minutes: 20
176+
# runs-on: [ self-hosted, X64, Linux, development ]
177+
# needs: build_gnu
178+
# steps:
179+
# - uses: actions/checkout@v3
180+
# - uses: ./.github/actions/test_stateful_cluster_linux
181+
172182
test_fuzz_standalone_linux:
173183
timeout-minutes: 10
174184
runs-on: [self-hosted, X64, Linux, development]

docs/doc/30-reference/20-functions/111-system-functions/fuse_snapshot.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,12 @@ INSERT INTO mytable VALUES(1,1),(3,3);
1919
INSERT INTO mytable VALUES(2,2),(5,5);
2020
INSERT INTO mytable VALUES(4,4);
2121

22-
SELECT * FROM FUSE_SNAPSHOT(default‘,’mytable);
22+
SELECT * FROM FUSE_SNAPSHOT('default','mytable');
2323

2424
---
2525
| snapshot_id | snapshot_location | format_version | previous_snapshot_id | segment_count | block_count | row_count | bytes_uncompressed | bytes_compressed | timestamp |
2626
|----------------------------------|------------------------------------------------------------|----------------|----------------------------------|---------------|-------------|-----------|--------------------|------------------|----------------------------|
2727
| dd98266f968f4817b470255592d04fda | 670655/670675/_ss/dd98266f968f4817b470255592d04fda_v1.json | 1 | \N | 1 | 1 | 2 | 16 | 290 | 2022-09-07 01:58:55.204997 |
2828
| 2f2d004ff6f842cdb25f5631b2bbb703 | 670655/670675/_ss/2f2d004ff6f842cdb25f5631b2bbb703_v1.json | 1 | dd98266f968f4817b470255592d04fda | 2 | 2 | 4 | 32 | 580 | 2022-09-07 01:59:09.742999 |
2929
| 0aa6dfd5d5364bde80f21161ba48c96e | 670655/670675/_ss/0aa6dfd5d5364bde80f21161ba48c96e_v1.json | 1 | 2f2d004ff6f842cdb25f5631b2bbb703 | 3 | 3 | 5 | 40 | 862 | 2022-09-07 01:59:16.858454 |
30-
```
30+
```

docs/doc/30-reference/30-sql/10-dml/dml-copy-into-location.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,15 @@ description:
55
'Unload Data using COPY INTO <location>'
66
---
77

8-
`COPY` moves data between Databend and object storage systems (such as Amazon S3).
8+
`COPY` loads data into Databend or unloads data from Databend.
99

10-
Unloads data from a table (or query) into one or more files in one of the following locations:
10+
This command unloads data from a table (or query) into one or more files in one of the following locations:
1111

1212
* Named internal stage: The files can be downloaded from the stage using the GET command.
1313
* Named external stage: An external location (including Amazon S3).
1414
* External location: An object storage system (including Amazon S3).
1515

16+
See Also: [COPY INTO table](dml-copy-into-table.md)
1617

1718
## Syntax
1819

@@ -102,5 +103,4 @@ copy into @s2 from test_table FILE_FORMAT = (type = 'CSV');
102103

103104
-- Unload the data from a query into the stage as a Parquet file
104105
copy into @s2 from (select name, age, id from test_table limit 100) FILE_FORMAT = (type = 'PARQUET');
105-
```
106-
106+
```

docs/doc/30-reference/30-sql/10-dml/dml-copy-into-table.md

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,15 @@ title: 'COPY INTO <table>'
33
sidebar_label: 'COPY INTO <table>'
44
---
55

6-
`COPY` moves data between Databend tables and object storage systems (AWS S3 compatible object storage services and Azure Blob storage).
6+
`COPY` loads data into Databend or unloads data from Databend.
77

8-
This command loads data into a table from files staged in one of the following locations:
8+
This command loads data into a table from files in one of the following locations:
99

10-
* Named internal stage, files can be staged using the [PUT to Stage](../../00-api/10-put-to-stage.md).
11-
* Named external stage that references an external location (AWS S3 compatible object storage services and Azure Blob storage).
12-
* External location. This includes AWS S3 compatible object storage services, Azure Blob storage, Google Cloud Storage, Huawei OBS.
10+
* Named internal stage: Databend internal named stages. Files can be staged using the [PUT to Stage](../../00-api/10-put-to-stage.md) API.
11+
* Named external stage: Stages created in AWS S3 compatible object storage services and Azure Blob storage.
12+
* External location: This can be a bucket in AWS S3 compatible object storage services, Azure Blob storage, Google Cloud Storage, or Huawei OBS. The exteranl location can be also just a remote server from where you can access the file by a URL (starting with "https://...").
13+
14+
See Also: [COPY INTO location](dml-copy-into-location.md)
1315

1416
## Syntax
1517

@@ -123,10 +125,10 @@ externalLocation ::=
123125
'https://<url>'
124126
```
125127

126-
Especially, HTTP supports glob patterns. For example, use
128+
Please note that, HTTP supports glob patterns. For example, use
127129

128-
- `ontime_200{6,7,8}.csv` to represents `ontime_2006.csv`,`ontime_2007.csv`,`ontime_20080.csv`.
129-
- `ontime_200[6-8].csv` to represents `ontime_2006.csv`,`ontime_2007.csv`,`ontime_20080.csv`.
130+
- `ontime_200{6,7,8}.csv` to represents `ontime_2006.csv`,`ontime_2007.csv`,`ontime_2008.csv`.
131+
- `ontime_200[6-8].csv` to represents `ontime_2006.csv`,`ontime_2007.csv`,`ontime_2008.csv`.
130132

131133
### FILES = ( 'file_name' [ , 'file_name' ... ] )
132134

@@ -261,14 +263,16 @@ COPY INTO mytable
261263
FILE_FORMAT = (type = 'CSV' field_delimiter = ',' record_delimiter = '\n' skip_header = 1 compression = GZIP) size_limit=10;
262264
```
263265

264-
This example moves data from a CSV file without specifying the endpoint URL:
266+
This example loads data from a CSV file without specifying the endpoint URL:
267+
265268
```sql
266269
COPY INTO mytable
267270
FROM 's3://mybucket/data.csv'
268271
FILE_FORMAT = (type = 'CSV' field_delimiter = ',' record_delimiter = '\n' skip_header = 1) size_limit=10;
269272
```
270-
271-
`Parquet` file example:
273+
274+
This is an example loading data from a `Parquet` file:
275+
272276
```sql
273277
COPY INTO mytable
274278
FROM 's3://mybucket/data.parquet'
@@ -280,7 +284,8 @@ COPY INTO mytable
280284

281285
**Azure Blob storage**
282286

283-
This example reads data from a CSV file and inserts them into a table:
287+
This example reads data from a CSV file and inserts it into a table:
288+
284289
```sql
285290
COPY INTO mytable
286291
FROM 'azblob://mybucket/data.csv'
@@ -294,10 +299,10 @@ COPY INTO mytable
294299

295300
**HTTP**
296301

297-
This example reads data from a CSV file and inserts them into a table:
302+
This example reads data from three CSV files and inserts it into a table:
298303

299304
```sql
300305
COPY INTO mytable
301306
FROM 'https://repo.databend.rs/dataset/stateful/ontime_200{6,7,8}_200.csv'
302307
FILE_FORMAT = (type = 'CSV');
303-
```
308+
```
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#!/bin/bash
2+
# Copyright 2020-2021 The Databend Authors.
3+
# SPDX-License-Identifier: Apache-2.0.
4+
5+
echo "*************************************"
6+
echo "* Setting STORAGE_TYPE to S3. *"
7+
echo "* *"
8+
echo "* Please make sure that S3 backend *"
9+
echo "* is ready, and configured properly.*"
10+
echo "*************************************"
11+
export STORAGE_TYPE=s3
12+
export STORAGE_S3_BUCKET=testbucket
13+
export STORAGE_S3_ROOT=admin
14+
export STORAGE_S3_ENDPOINT_URL=http://127.0.0.1:9900
15+
export STORAGE_S3_ACCESS_KEY_ID=minioadmin
16+
export STORAGE_S3_SECRET_ACCESS_KEY=minioadmin
17+
export STORAGE_ALLOW_INSECURE=true
18+
19+
echo "Install dependence"
20+
python3 -m pip install --quiet mysql-connector-python
21+
22+
echo "calling test suite"
23+
echo "Starting Cluster databend-query"
24+
./scripts/ci/deploy/databend-query-cluster-3-nodes.sh
25+
26+
SCRIPT_PATH="$(cd "$(dirname "$0")" >/dev/null 2>&1 && pwd)"
27+
cd "$SCRIPT_PATH/../../tests" || exit
28+
29+
echo "Starting databend-test"
30+
./databend-test --mode 'cluster' --run-dir 1_stateful

src/meta/api/src/lib.rs

Lines changed: 26 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ mod id_generator;
2020
mod kv_api;
2121
mod kv_api_key;
2222
mod kv_api_test_suite;
23-
mod kv_api_utils;
2423
mod schema_api;
2524
mod schema_api_impl;
2625
mod schema_api_keys;
@@ -29,6 +28,8 @@ mod share_api;
2928
mod share_api_impl;
3029
mod share_api_keys;
3130
mod share_api_test_suite;
31+
pub(crate) mod testing;
32+
pub(crate) mod util;
3233

3334
pub use id::Id;
3435
pub(crate) use id_generator::IdGenerator;
@@ -40,34 +41,32 @@ pub use kv_api::KVApi;
4041
pub use kv_api_key::KVApiKey;
4142
pub use kv_api_key::KVApiKeyError;
4243
pub use kv_api_test_suite::KVApiTestSuite;
43-
pub use kv_api_utils::db_has_to_exist;
44-
pub use kv_api_utils::deserialize_struct;
45-
pub use kv_api_utils::deserialize_u64;
46-
pub use kv_api_utils::fetch_id;
47-
pub use kv_api_utils::get_kv_data;
48-
pub use kv_api_utils::get_object_shared_by_share_ids;
49-
pub use kv_api_utils::get_share_account_meta_or_err;
50-
pub use kv_api_utils::get_share_database_id_and_privilege;
51-
pub use kv_api_utils::get_share_id_to_name_or_err;
52-
pub use kv_api_utils::get_share_meta_by_id_or_err;
53-
pub use kv_api_utils::get_share_or_err;
54-
pub use kv_api_utils::get_struct_value;
55-
pub use kv_api_utils::get_u64_value;
56-
pub use kv_api_utils::is_all_db_data_removed;
57-
pub use kv_api_utils::is_db_need_to_be_remove;
58-
pub use kv_api_utils::list_keys;
59-
pub use kv_api_utils::list_u64_value;
60-
pub use kv_api_utils::send_txn;
61-
pub use kv_api_utils::serialize_struct;
62-
pub use kv_api_utils::serialize_u64;
63-
pub use kv_api_utils::table_has_to_exist;
64-
pub use kv_api_utils::txn_cond_seq;
65-
pub use kv_api_utils::txn_op_del;
66-
pub use kv_api_utils::txn_op_put;
67-
pub use kv_api_utils::txn_op_put_with_expire;
68-
pub use kv_api_utils::TXN_MAX_RETRY_TIMES;
6944
pub use schema_api::SchemaApi;
7045
pub(crate) use schema_api_impl::get_db_or_err;
7146
pub use schema_api_test_suite::SchemaApiTestSuite;
7247
pub use share_api::ShareApi;
7348
pub use share_api_test_suite::ShareApiTestSuite;
49+
pub use util::db_has_to_exist;
50+
pub use util::deserialize_struct;
51+
pub use util::fetch_id;
52+
pub use util::get_object_shared_by_share_ids;
53+
pub use util::get_share_account_meta_or_err;
54+
pub use util::get_share_database_id_and_privilege;
55+
pub use util::get_share_id_to_name_or_err;
56+
pub use util::get_share_meta_by_id_or_err;
57+
pub use util::get_share_or_err;
58+
pub use util::get_struct_value;
59+
pub use util::get_u64_value;
60+
pub use util::is_all_db_data_removed;
61+
pub use util::is_db_need_to_be_remove;
62+
pub use util::list_keys;
63+
pub use util::list_u64_value;
64+
pub use util::send_txn;
65+
pub use util::serialize_struct;
66+
pub use util::serialize_u64;
67+
pub use util::table_has_to_exist;
68+
pub use util::txn_cond_seq;
69+
pub use util::txn_op_del;
70+
pub use util::txn_op_put;
71+
pub use util::txn_op_put_with_expire;
72+
pub use util::TXN_MAX_RETRY_TIMES;

src/meta/api/src/schema_api_test_suite.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,9 @@ use common_meta_types::UpsertKVReq;
7474
use tracing::debug;
7575
use tracing::info;
7676

77-
use crate::get_kv_data;
7877
use crate::is_all_db_data_removed;
7978
use crate::serialize_struct;
79+
use crate::testing::get_kv_data;
8080
use crate::ApiBuilder;
8181
use crate::AsKVApi;
8282
use crate::KVApi;

src/meta/api/src/share_api_test_suite.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@ use common_meta_types::MetaError;
2525
use enumflags2::BitFlags;
2626
use tracing::info;
2727

28-
use crate::get_kv_data;
2928
use crate::get_object_shared_by_share_ids;
3029
use crate::get_share_account_meta_or_err;
3130
use crate::get_share_id_to_name_or_err;
3231
use crate::get_share_meta_by_id_or_err;
3332
use crate::get_share_or_err;
3433
use crate::is_all_db_data_removed;
34+
use crate::testing::get_kv_data;
3535
use crate::ApiBuilder;
3636
use crate::AsKVApi;
3737
use crate::KVApi;

0 commit comments

Comments
 (0)