Skip to content

Commit 7d21ad1

Browse files
make description non Optional, correct docstring, optimize col conversion
Signed-off-by: varun-edachali-dbx <varun.edachali@databricks.com>
1 parent 245aa77 commit 7d21ad1

File tree

2 files changed

+4
-7
lines changed

2 files changed

+4
-7
lines changed

src/databricks/sql/result_set.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -512,17 +512,14 @@ def _convert_json_to_arrow(self, rows: List[List]) -> "pyarrow.Table":
512512
if not rows:
513513
return pyarrow.Table.from_pydict({})
514514

515-
columns = []
516-
num_cols = len(rows[0])
517-
for i in range(num_cols):
518-
columns.append([row[i] for row in rows])
515+
# Transpose rows to columns efficiently using zip
516+
columns = list(map(list, zip(*rows)))
519517
names = [col[0] for col in self.description]
520518
return pyarrow.Table.from_arrays(columns, names=names)
521519

522520
def _convert_json_types(self, rows: List[List]) -> List[List]:
523521
"""
524-
Convert raw data rows to Row objects with named columns based on description.
525-
Also converts string values to appropriate Python types based on column metadata.
522+
Convert string values to appropriate Python types based on column metadata.
526523
"""
527524

528525
if not self.description or not rows:

src/databricks/sql/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def __init__(
274274
start_row_offset: int = 0,
275275
result_links: Optional[List[TSparkArrowResultLink]] = None,
276276
lz4_compressed: bool = True,
277-
description: Optional[List[Tuple]] = None,
277+
description: List[Tuple] = [],
278278
):
279279
"""
280280
A queue-like wrapper over CloudFetch arrow batches.

0 commit comments

Comments
 (0)