Skip to content

Commit d6d502e

Browse files
committed
Enable mutation parameter
1 parent 2111265 commit d6d502e

File tree

5 files changed

+62
-20
lines changed

5 files changed

+62
-20
lines changed

README.md

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,13 @@ Assuming you started a test environment using docker-compose, please refer to ex
217217
218218
* Mutation
219219
```sql
220+
-- use query parameter
221+
select * from jdbc('ch-server?mutation', 'drop table if exists system.test_table');
222+
select * from jdbc('ch-server?mutation', 'create table system.test_table(a String, b UInt8) engine=Memory()');
223+
select * from jdbc('ch-server?mutation', 'insert into system.test_table values(''a'', 1)');
224+
select * from jdbc('ch-server?mutation', 'truncate table system.test_table');
225+
226+
-- use JDBC table engine
220227
drop table if exists system.test_table;
221228
create table system.test_table (
222229
a String,
@@ -302,8 +309,9 @@ Assuming you started a test environment using docker-compose, please refer to ex
302309
Couple of timeout settings you should be aware of:
303310
1. datasource timeout, for example: `max_execution_time` in MariaDB
304311
2. JDBC driver timeout, for example: `connectTimeout` and `socketTimeout` in [MariaDB Connector/J](https://mariadb.com/kb/en/about-mariadb-connector-j/)
305-
3. Vertx timeout - see `config/server.json` and `config/vertx.json`
306-
4. Client(ClickHouse JDBC driver) timeout - see timeout settings in ClickHouse JDBC driver
312+
3. JDBC bridge timeout, for examples: `queryTimeout` in `config/server.json`, and `maxWorkerExecuteTime` in `config/vertx.json`
313+
4. ClickHouse timeout like `max_execution_time` and `keep_alive_timeout` etc.
314+
5. Client timeout, for example: `socketTimeout` in ClickHouse JDBC driver
307315
308316
309317
## Migration

src/main/java/ru/yandex/clickhouse/jdbcbridge/JdbcBridgeVerticle.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,9 @@ private void handleWrite(RoutingContext ctx) {
528528
ds.executeMutation(parser.getRawSchema(), table, parser.getTable(), params, ByteBuffer.wrap(ctx.getBody()),
529529
writer);
530530

531-
resp.end(ByteBuffer.asBuffer(WRITE_RESPONSE));
531+
if (writer.isOpen()) {
532+
resp.end(ByteBuffer.asBuffer(WRITE_RESPONSE));
533+
}
532534
}
533535

534536
private Repository<NamedDataSource> getDataSourceRepository() {

src/main/java/ru/yandex/clickhouse/jdbcbridge/core/NamedDataSource.java

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,36 @@ private void writeDebugResult(String schema, String originalQuery, String loaded
185185
Objects.requireNonNull(writer).write(buffer);
186186
}
187187

188+
protected final void writeMutationResult(long effectedRows, ColumnDefinition[] requestColumns,
189+
ColumnDefinition[] customColumns, ResponseWriter writer) {
190+
ByteBuffer buffer = ByteBuffer.newInstance(100);
191+
Map<String, String> values = new HashMap<>();
192+
for (ColumnDefinition c : customColumns) {
193+
values.put(c.getName(), converter.as(String.class, c.getValue()));
194+
}
195+
196+
String typeName = TableDefinition.MUTATION_COLUMNS.getColumn(0).getName();
197+
String rowsName = TableDefinition.MUTATION_COLUMNS.getColumn(1).getName();
198+
199+
values.put(typeName, this.getType());
200+
201+
for (ColumnDefinition c : requestColumns) {
202+
String name = c.getName();
203+
if (rowsName.equals(name)) {
204+
buffer.writeUInt64(effectedRows);
205+
} else {
206+
String str = values.get(name);
207+
if (str == null) {
208+
buffer.writeNull();
209+
} else {
210+
buffer.writeNonNull().writeString(str);
211+
}
212+
}
213+
}
214+
215+
Objects.requireNonNull(writer).write(buffer);
216+
}
217+
188218
protected void writeMutationResult(String schema, String originalQuery, String loadedQuery, QueryParameters params,
189219
ColumnDefinition[] requestColumns, ColumnDefinition[] customColumns, DefaultValues defaultValues,
190220
ResponseWriter writer) {

src/main/java/ru/yandex/clickhouse/jdbcbridge/core/TableDefinition.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,8 @@ public class TableDefinition {
5959
// datasource type: jdbc, config, script etc.
6060
new ColumnDefinition("type", DataType.Str, true, DEFAULT_LENGTH, DEFAULT_PRECISION, DEFAULT_SCALE),
6161
// operation: read or write
62-
new ColumnDefinition("operation", DataType.Str, true, DEFAULT_LENGTH, DEFAULT_PRECISION, DEFAULT_SCALE),
62+
// new ColumnDefinition("operation", DataType.Str, true, DEFAULT_LENGTH,
63+
// DEFAULT_PRECISION, DEFAULT_SCALE),
6364
new ColumnDefinition("rows", DataType.UInt64, false, DEFAULT_LENGTH, DEFAULT_PRECISION, DEFAULT_SCALE));
6465

6566
private static final String COLUMN_HEADER = "columns format version: ";

src/main/java/ru/yandex/clickhouse/jdbcbridge/impl/JdbcDataSource.java

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -517,6 +517,20 @@ protected final void setTimeout(Statement stmt, int expectedTimeout) {
517517
}
518518
}
519519

520+
protected long getFirstMutationResult(Statement stmt) throws SQLException {
521+
long count = 0L;
522+
523+
try {
524+
count = stmt.getLargeUpdateCount();
525+
} catch (SQLException e) {
526+
throw e;
527+
} catch (Exception e) {
528+
count = stmt.getUpdateCount();
529+
}
530+
531+
return count == -1 ? 0 : count;
532+
}
533+
520534
protected ResultSet getFirstQueryResult(Statement stmt, boolean hasResultSet) throws SQLException {
521535
ResultSet rs = null;
522536

@@ -654,27 +668,14 @@ protected void writeMutationResult(String schema, String originalQuery, String l
654668
try (Connection conn = getConnection(); Statement stmt = createStatement(conn, params)) {
655669
setTimeout(stmt, this.getQueryTimeout(params.getTimeout()));
656670

657-
final ResultSet rs = getFirstQueryResult(stmt, stmt.execute(loadedQuery));
658-
659-
DataTableReader reader = new ResultSetReader(getId(), rs, params);
660-
reader.process(getId(), requestColumns, customColumns, getColumnsFromResultSet(rs, params), defaultValues,
661-
getTimeZone(), params, writer);
662-
663-
/*
664-
* if (stmt.execute(loadedQuery)) { // TODO multiple resultsets
665-
*
666-
* } else if (columns.size() == 1 && columns.getColumn(0).getType() ==
667-
* ClickHouseDataType.Int32) {
668-
* writer.write(ClickHouseBuffer.newInstance(4).writeInt32(stmt.getUpdateCount()
669-
* )); } else { throw new IllegalStateException(
670-
* "Not able to handle query result due to incompatible columns: " + columns); }
671-
*/
671+
stmt.execute(loadedQuery);
672+
this.writeMutationResult(getFirstMutationResult(stmt), requestColumns, customColumns, writer);
672673
} catch (SQLException e) {
673674
throw new DataAccessException(getId(), buildErrorMessage(e), e);
674675
} catch (DataAccessException e) {
675676
Throwable cause = e.getCause();
676677
throw new IllegalStateException(
677-
"Failed to query against [" + this.getId() + "] due to: " + buildErrorMessage(cause), cause);
678+
"Failed to mutate against [" + this.getId() + "] due to: " + buildErrorMessage(cause), cause);
678679
}
679680
}
680681

0 commit comments

Comments
 (0)