Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import com.databricks.jdbc.model.core.ColumnInfoTypeName;
import com.databricks.jdbc.model.core.ColumnMetadata;
import com.databricks.jdbc.model.core.ResultManifest;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
Expand All @@ -42,8 +43,9 @@ public class DatabricksResultSetMetaData implements ResultSetMetaData {
private final ImmutableList<ImmutableDatabricksColumn> columns;
private final CaseInsensitiveImmutableMap<Integer> columnNameIndex;
private final long totalRows;
private Long chunkCount;
private final Long chunkCount;
private final boolean isCloudFetchUsed;
private final boolean truncated;

/**
* Constructs a {@code DatabricksResultSetMetaData} object for a SEA result set.
Expand Down Expand Up @@ -142,6 +144,7 @@ public DatabricksResultSetMetaData(
this.totalRows = resultManifest.getTotalRowCount();
this.chunkCount = resultManifest.getTotalChunkCount();
this.isCloudFetchUsed = usesExternalLinks;
this.truncated = MoreObjects.firstNonNull(resultManifest.getTruncated(), false);
}

/**
Expand Down Expand Up @@ -258,6 +261,7 @@ public DatabricksResultSetMetaData(
this.totalRows = rows;
this.chunkCount = chunkCount;
this.isCloudFetchUsed = getIsCloudFetchFromManifest(resultManifest);
this.truncated = false;
}

/**
Expand Down Expand Up @@ -306,7 +310,9 @@ public DatabricksResultSetMetaData(
this.columns = columnsBuilder.build();
this.columnNameIndex = CaseInsensitiveImmutableMap.copyOf(columnNameToIndexMap);
this.totalRows = totalRows;
this.chunkCount = -1L;
this.isCloudFetchUsed = false;
this.truncated = false;
}

/**
Expand Down Expand Up @@ -358,7 +364,9 @@ public DatabricksResultSetMetaData(
this.columns = columnsBuilder.build();
this.columnNameIndex = CaseInsensitiveImmutableMap.copyOf(columnNameToIndexMap);
this.totalRows = totalRows;
this.chunkCount = -1L;
this.isCloudFetchUsed = false;
this.truncated = false;
}

/**
Expand Down Expand Up @@ -416,7 +424,9 @@ public DatabricksResultSetMetaData(
this.columns = columnsBuilder.build();
this.columnNameIndex = CaseInsensitiveImmutableMap.copyOf(columnNameToIndexMap);
this.totalRows = totalRows;
this.chunkCount = -1L;
this.isCloudFetchUsed = false;
this.truncated = false;
}

/**
Expand Down Expand Up @@ -492,8 +502,10 @@ public DatabricksResultSetMetaData(
this.statementId = statementId;
this.isCloudFetchUsed = false;
this.totalRows = -1;
this.chunkCount = -1L;
this.columns = columnsBuilder.build();
this.columnNameIndex = CaseInsensitiveImmutableMap.copyOf(columnNameToIndexMap);
this.truncated = false;
}
Comment on lines 497 to 503
Copy link

Copilot AI Mar 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This constructor now sets chunkCount to -1L. Since chunkCount is only used as nullable metadata (e.g., telemetry’s total_chunks_present), using null for "not applicable" avoids negative values showing up in logs/metrics.

Copilot uses AI. Check for mistakes.

@Override
Expand Down Expand Up @@ -643,6 +655,10 @@ private boolean getIsCloudFetchFromManifest(TGetResultSetMetadataResp resultMani
return resultManifest.getResultFormat() == TSparkRowSetType.URL_BASED_SET;
}

public boolean getIsTruncated() {
return truncated;
}

public Long getChunkCount() {
return chunkCount;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,7 @@ public void testGetDispositionThrift(TSparkRowSetType resultFormat) {
} else {
assertFalse(metaData.getIsCloudFetchUsed());
}
assertFalse(metaData.getIsTruncated());
}

@Test
Expand All @@ -476,6 +477,26 @@ public void testCloudFetchUsedSdk() {
assertFalse(metaData.getIsCloudFetchUsed());
}

@Test
public void testSdkTruncated() {
ResultManifest resultManifest = getResultManifest();
resultManifest.setTruncated(null);

DatabricksResultSetMetaData metaData =
new DatabricksResultSetMetaData(STATEMENT_ID, resultManifest, true, connectionContext);
assertFalse(metaData.getIsTruncated());

resultManifest.setTruncated(true);
metaData =
new DatabricksResultSetMetaData(STATEMENT_ID, resultManifest, false, connectionContext);
assertTrue(metaData.getIsTruncated());

resultManifest.setTruncated(false);
metaData =
new DatabricksResultSetMetaData(STATEMENT_ID, resultManifest, false, connectionContext);
assertFalse(metaData.getIsTruncated());
}

@Test
public void testSEAInlineComplexType() throws SQLException {
ResultManifest resultManifest = new ResultManifest();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ void testHybridSmallQuery() throws SQLException {
assertEquals(maxRows, metaData.getTotalRows());
// For small query, arrow results are received inline in hybrid mode
assertFalse(metaData.getIsCloudFetchUsed());
assertFalse(metaData.getIsTruncated());

// For small query, arrow results are received inline in hybrid mode so no cloud fetch calls
// are made
Expand Down Expand Up @@ -93,6 +94,7 @@ void testHybridLargeQuery() throws SQLException {
assertEquals(maxRows, metaData.getTotalRows());
// For large query, arrow results are fetched using cloud fetch
assertTrue(metaData.getIsCloudFetchUsed());
assertFalse(metaData.getIsTruncated());

// The number of cloud fetch calls should be equal to the number of chunks
final int cloudFetchCalls =
Expand Down
Loading