Skip to content
Merged
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 73 additions & 15 deletions rust/lance-io/src/object_writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,22 +47,35 @@ fn max_conn_reset_retries() -> u16 {
})
}

/// Maximum part size in GCS and S3: 5GB.
const MAX_UPLOAD_PART_SIZE: usize = 1024 * 1024 * 1024 * 5;

/// Clamps a requested upload part size to the valid [5MB, 5GB] range.
/// Returns the clamped value and whether clamping was necessary.
fn clamp_initial_upload_size(raw: usize) -> (usize, bool) {
let clamped = raw.clamp(INITIAL_UPLOAD_STEP, MAX_UPLOAD_PART_SIZE);
(clamped, clamped != raw)
}

fn initial_upload_size() -> usize {
static LANCE_INITIAL_UPLOAD_SIZE: OnceLock<usize> = OnceLock::new();
*LANCE_INITIAL_UPLOAD_SIZE.get_or_init(|| {
std::env::var("LANCE_INITIAL_UPLOAD_SIZE")
let Some(raw) = std::env::var("LANCE_INITIAL_UPLOAD_SIZE")
.ok()
.and_then(|s| s.parse::<usize>().ok())
.inspect(|size| {
if *size < INITIAL_UPLOAD_STEP {
// Minimum part size in GCS and S3
panic!("LANCE_INITIAL_UPLOAD_SIZE must be at least 5MB");
} else if *size > 1024 * 1024 * 1024 * 5 {
// Maximum part size in GCS and S3
panic!("LANCE_INITIAL_UPLOAD_SIZE must be at most 5GB");
}
})
.unwrap_or(INITIAL_UPLOAD_STEP)
else {
return INITIAL_UPLOAD_STEP;
};
let (clamped, was_clamped) = clamp_initial_upload_size(raw);
if was_clamped {
// OnceLock caches the result, so this warning fires at most once per process.
tracing::warn!(
requested = raw,
clamped,
"LANCE_INITIAL_UPLOAD_SIZE must be between 5MB and 5GB; clamping to valid range"
);
}
clamped
})
}

Expand All @@ -79,6 +92,7 @@ pub struct ObjectWriter {
cursor: usize,
connection_resets: u16,
buffer: Vec<u8>,
upload_size: usize,
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue: this change is no longer necessary. Could you remove this and just call initial_upload_size() as before?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

105338e address this

// TODO: use constant size to support R2
use_constant_size_upload_parts: bool,
}
Expand Down Expand Up @@ -157,25 +171,32 @@ impl UploadState {

impl ObjectWriter {
pub async fn new(object_store: &LanceObjectStore, path: &Path) -> Result<Self> {
let upload_size = initial_upload_size();
Ok(Self {
state: UploadState::Started(object_store.inner.clone()),
cursor: 0,
path: Arc::new(path.clone()),
connection_resets: 0,
buffer: Vec::with_capacity(initial_upload_size()),
buffer: Vec::with_capacity(upload_size),
upload_size,
use_constant_size_upload_parts: object_store.use_constant_size_upload_parts,
})
}

/// Returns the contents of `buffer` as a `Bytes` object and resets `buffer`.
/// The new capacity of `buffer` is determined by the current part index.
fn next_part_buffer(buffer: &mut Vec<u8>, part_idx: u16, constant_upload_size: bool) -> Bytes {
fn next_part_buffer(
buffer: &mut Vec<u8>,
part_idx: u16,
constant_upload_size: bool,
upload_size: usize,
) -> Bytes {
let new_capacity = if constant_upload_size {
// The store does not support variable part sizes, so use the initial size.
initial_upload_size()
upload_size
} else {
// Increase the upload size every 100 parts. This gives maximum part size of 2.5TB.
initial_upload_size().max(((part_idx / 100) as usize + 1) * INITIAL_UPLOAD_STEP)
upload_size.max(((part_idx / 100) as usize + 1) * INITIAL_UPLOAD_STEP)
};
let new_buffer = Vec::with_capacity(new_capacity);
let part = std::mem::replace(buffer, new_buffer);
Expand Down Expand Up @@ -222,6 +243,7 @@ impl ObjectWriter {
&mut mut_self.buffer,
0,
mut_self.use_constant_size_upload_parts,
mut_self.upload_size,
);
futures.spawn(Self::put_part(upload.as_mut(), data, 0, None));

Expand Down Expand Up @@ -386,6 +408,7 @@ impl AsyncWrite for ObjectWriter {
&mut mut_self.buffer,
*part_idx,
mut_self.use_constant_size_upload_parts,
mut_self.upload_size,
);
futures.spawn(
Self::put_part(upload.as_mut(), data, *part_idx, None)
Expand Down Expand Up @@ -820,4 +843,39 @@ mod tests {
assert!(!temp_file_path.exists());
assert!(!file_path.exists());
}

#[test]
fn clamp_initial_upload_size_below_min_is_clamped_up() {
assert_eq!(clamp_initial_upload_size(0), (INITIAL_UPLOAD_STEP, true));
assert_eq!(
clamp_initial_upload_size(INITIAL_UPLOAD_STEP - 1),
(INITIAL_UPLOAD_STEP, true)
);
}

#[test]
fn clamp_initial_upload_size_within_range_is_unchanged() {
assert_eq!(
clamp_initial_upload_size(INITIAL_UPLOAD_STEP),
(INITIAL_UPLOAD_STEP, false)
);
assert_eq!(
clamp_initial_upload_size(MAX_UPLOAD_PART_SIZE),
(MAX_UPLOAD_PART_SIZE, false)
);
let mid = INITIAL_UPLOAD_STEP * 8; // 40MB, in range
assert_eq!(clamp_initial_upload_size(mid), (mid, false));
}

#[test]
fn clamp_initial_upload_size_above_max_is_clamped_down() {
assert_eq!(
clamp_initial_upload_size(MAX_UPLOAD_PART_SIZE + 1),
(MAX_UPLOAD_PART_SIZE, true)
);
assert_eq!(
clamp_initial_upload_size(usize::MAX),
(MAX_UPLOAD_PART_SIZE, true)
);
}
}
Loading