19
19
from databricks .sql .thrift_api .TCLIService import ttypes
20
20
from databricks .sql .types import Row
21
21
from databricks .sql .exc import Error , RequestError , CursorAlreadyClosedError
22
- from databricks .sql .utils import ColumnTable , ColumnQueue
22
+ from databricks .sql .utils import ColumnTable , ColumnQueue , JsonQueue
23
23
from databricks .sql .backend .types import CommandId , CommandState , ExecuteResponse
24
24
25
25
logger = logging .getLogger (__name__ )
@@ -41,10 +41,11 @@ def __init__(
41
41
command_id : CommandId ,
42
42
status : CommandState ,
43
43
has_been_closed_server_side : bool = False ,
44
- has_more_rows : bool = False ,
45
44
results_queue = None ,
46
45
description = None ,
47
46
is_staging_operation : bool = False ,
47
+ lz4_compressed : bool = False ,
48
+ arrow_schema_bytes : bytes = b"" ,
48
49
):
49
50
"""
50
51
A ResultSet manages the results of a single command.
@@ -72,9 +73,10 @@ def __init__(
72
73
self .command_id = command_id
73
74
self .status = status
74
75
self .has_been_closed_server_side = has_been_closed_server_side
75
- self .has_more_rows = has_more_rows
76
76
self .results = results_queue
77
77
self ._is_staging_operation = is_staging_operation
78
+ self .lz4_compressed = lz4_compressed
79
+ self ._arrow_schema_bytes = arrow_schema_bytes
78
80
79
81
def __iter__ (self ):
80
82
while True :
@@ -157,7 +159,10 @@ def __init__(
157
159
buffer_size_bytes : int = 104857600 ,
158
160
arraysize : int = 10000 ,
159
161
use_cloud_fetch : bool = True ,
160
- arrow_schema_bytes : Optional [bytes ] = None ,
162
+ t_row_set = None ,
163
+ max_download_threads : int = 10 ,
164
+ ssl_options = None ,
165
+ has_more_rows : bool = True ,
161
166
):
162
167
"""
163
168
Initialize a ThriftResultSet with direct access to the ThriftDatabricksClient.
@@ -169,12 +174,30 @@ def __init__(
169
174
buffer_size_bytes: Buffer size for fetching results
170
175
arraysize: Default number of rows to fetch
171
176
use_cloud_fetch: Whether to use cloud fetch for retrieving results
172
- arrow_schema_bytes: Arrow schema bytes for the result set
177
+ t_row_set: The TRowSet containing result data (if available)
178
+ max_download_threads: Maximum number of download threads for cloud fetch
179
+ ssl_options: SSL options for cloud fetch
180
+ has_more_rows: Whether there are more rows to fetch
173
181
"""
174
182
# Initialize ThriftResultSet-specific attributes
175
- self ._arrow_schema_bytes = arrow_schema_bytes
176
183
self ._use_cloud_fetch = use_cloud_fetch
177
- self .lz4_compressed = execute_response .lz4_compressed
184
+ self .has_more_rows = has_more_rows
185
+
186
+ # Build the results queue if t_row_set is provided
187
+ results_queue = None
188
+ if t_row_set and execute_response .result_format is not None :
189
+ from databricks .sql .utils import ThriftResultSetQueueFactory
190
+
191
+ # Create the results queue using the provided format
192
+ results_queue = ThriftResultSetQueueFactory .build_queue (
193
+ row_set_type = execute_response .result_format ,
194
+ t_row_set = t_row_set ,
195
+ arrow_schema_bytes = execute_response .arrow_schema_bytes or b"" ,
196
+ max_download_threads = max_download_threads ,
197
+ lz4_compressed = execute_response .lz4_compressed ,
198
+ description = execute_response .description ,
199
+ ssl_options = ssl_options ,
200
+ )
178
201
179
202
# Call parent constructor with common attributes
180
203
super ().__init__ (
@@ -185,10 +208,11 @@ def __init__(
185
208
command_id = execute_response .command_id ,
186
209
status = execute_response .status ,
187
210
has_been_closed_server_side = execute_response .has_been_closed_server_side ,
188
- has_more_rows = execute_response .has_more_rows ,
189
- results_queue = execute_response .results_queue ,
211
+ results_queue = results_queue ,
190
212
description = execute_response .description ,
191
213
is_staging_operation = execute_response .is_staging_operation ,
214
+ lz4_compressed = execute_response .lz4_compressed ,
215
+ arrow_schema_bytes = execute_response .arrow_schema_bytes ,
192
216
)
193
217
194
218
# Initialize results queue if not provided
@@ -419,7 +443,7 @@ def map_col_type(type_):
419
443
420
444
421
445
class SeaResultSet (ResultSet ):
422
- """ResultSet implementation for the SEA backend."""
446
+ """ResultSet implementation for SEA backend."""
423
447
424
448
def __init__ (
425
449
self ,
@@ -428,17 +452,20 @@ def __init__(
428
452
sea_client : "SeaDatabricksClient" ,
429
453
buffer_size_bytes : int = 104857600 ,
430
454
arraysize : int = 10000 ,
455
+ result_data = None ,
456
+ manifest = None ,
431
457
):
432
458
"""
433
459
Initialize a SeaResultSet with the response from a SEA query execution.
434
460
435
461
Args:
436
462
connection: The parent connection
463
+ execute_response: Response from the execute command
437
464
sea_client: The SeaDatabricksClient instance for direct access
438
465
buffer_size_bytes: Buffer size for fetching results
439
466
arraysize: Default number of rows to fetch
440
- execute_response: Response from the execute command (new style )
441
- sea_response: Direct SEA response (legacy style )
467
+ result_data: Result data from SEA response (optional )
468
+ manifest: Manifest from SEA response (optional )
442
469
"""
443
470
444
471
super ().__init__ (
@@ -449,15 +476,15 @@ def __init__(
449
476
command_id = execute_response .command_id ,
450
477
status = execute_response .status ,
451
478
has_been_closed_server_side = execute_response .has_been_closed_server_side ,
452
- has_more_rows = execute_response .has_more_rows ,
453
- results_queue = execute_response .results_queue ,
454
479
description = execute_response .description ,
455
480
is_staging_operation = execute_response .is_staging_operation ,
481
+ lz4_compressed = execute_response .lz4_compressed ,
482
+ arrow_schema_bytes = execute_response .arrow_schema_bytes ,
456
483
)
457
484
458
485
def _fill_results_buffer (self ):
459
486
"""Fill the results buffer from the backend."""
460
- raise NotImplementedError ("fetchone is not implemented for SEA backend" )
487
+ raise NotImplementedError ("fetchall_arrow is not implemented for SEA backend" )
461
488
462
489
def fetchone (self ) -> Optional [Row ]:
463
490
"""
@@ -480,6 +507,7 @@ def fetchall(self) -> List[Row]:
480
507
"""
481
508
Fetch all (remaining) rows of a query result, returning them as a list of rows.
482
509
"""
510
+
483
511
raise NotImplementedError ("fetchall is not implemented for SEA backend" )
484
512
485
513
def fetchmany_arrow (self , size : int ) -> Any :
0 commit comments