Unverified Commit 4c438779 authored by Copilot's avatar Copilot Committed by GitHub
Browse files

Add memory allocation limits to stream parsers (#413)



* Initial plan

* Add size limits to prevent unbounded memory allocation in stream parsers

Co-authored-by: default avatarNugine <30099658+Nugine@users.noreply.github.com>

* Fix clippy warnings and formatting

Co-authored-by: default avatarNugine <30099658+Nugine@users.noreply.github.com>

* Add tests for memory allocation limit error conditions

- Add test for BoundaryBufferTooLarge error in FileStream
- Add test for ChunkMetaTooLarge error in AWS chunked stream
- Add test for TrailersTooLarge error handling
- Add test for TooManyTrailerHeaders error handling

These tests verify that the size limits prevent unbounded memory
allocation as intended, addressing review feedback.

Co-authored-by: default avatarNugine <30099658+Nugine@users.noreply.github.com>

* Fix buffer size check and improve test assertions

- Add size check before extending buffer when newline is found in push_meta_bytes
  This prevents the buffer from growing beyond MAX_CHUNK_META_SIZE even when
  newline appears exactly at the boundary
- Improve test assertions for trailer limit tests to consume remaining stream
  and verify trailers were not stored when limits are exceeded
- Tests now properly validate that unbounded allocation is prevented

Addresses review feedback on buffer overflow edge case and test verification.

Co-authored-by: default avatarNugine <30099658+Nugine@users.noreply.github.com>

* Improve error verification in trailer limit tests

- Update test_trailers_too_large to explicitly check for TrailersTooLarge error
- Update test_too_many_trailer_headers to explicitly check for TooManyTrailerHeaders error
- Tests now properly verify errors are returned when limits are exceeded
- Handle both direct error variants and errors wrapped in Underlying
- Fallback to checking trailers weren't stored if error not captured
- Fix clippy warnings: collapsible_match and match_same_arms

Addresses review feedback to explicitly verify error conditions.

Co-authored-by: default avatarNugine <30099658+Nugine@users.noreply.github.com>

* Apply formatting fixes

* Update crates/s3s/src/http/aws_chunked_stream.rs

Co-authored-by: default avatarCopilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: default avatarcopilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: default avatarNugine <30099658+Nugine@users.noreply.github.com>
Co-authored-by: default avatarNugine <nugine@foxmail.com>
Co-authored-by: default avatarCopilot <175728472+Copilot@users.noreply.github.com>
parent 41ac80d7
Loading
Loading
Loading
Loading
+364 −15
Original line number Diff line number Diff line
@@ -20,6 +20,19 @@ use futures::pin_mut;
use futures::stream::{Stream, StreamExt};
use hyper::body::{Buf, Bytes};
use memchr::memchr;

/// Maximum size for chunk metadata
/// Prevents `DoS` via oversized chunk size declarations
const MAX_CHUNK_META_SIZE: usize = 1024;

/// Maximum size for trailers
/// Conservative limit: 16KB should be more than enough for any reasonable trailers
const MAX_TRAILERS_SIZE: usize = 16 * 1024;

/// Maximum number of trailing headers
/// Prevents `DoS` via excessive header count
const MAX_TRAILER_HEADERS: usize = 100;

use transform_stream::AsyncTryStream;

/// Aws chunked stream
@@ -73,6 +86,15 @@ pub enum AwsChunkedStreamError {
    /// Incomplete stream
    #[error("AwsChunkedStreamError: Incomplete")]
    Incomplete,
    /// Chunk metadata too large
    #[error("AwsChunkedStreamError: ChunkMetaTooLarge: size {0} exceeds limit {1}")]
    ChunkMetaTooLarge(usize, usize),
    /// Trailers too large
    #[error("AwsChunkedStreamError: TrailersTooLarge: size {0} exceeds limit {1}")]
    TrailersTooLarge(usize, usize),
    /// Too many trailer headers
    #[error("AwsChunkedStreamError: TooManyTrailerHeaders: count {0} exceeds limit {1}")]
    TooManyTrailerHeaders(usize, usize),
}

/// Chunk meta
@@ -256,18 +278,31 @@ impl AwsChunkedStream {
    where
        S: Stream<Item = Result<Bytes, StdError>> + Send + 'static,
    {
        // Accumulate all remaining bytes until EOF.
        // Accumulate all remaining bytes until EOF with size limit
        let mut buf: Vec<u8> = Vec::new();
        let mut total_size: usize;

        if !prev_bytes.is_empty() {
        if prev_bytes.is_empty() {
            total_size = 0;
        } else {
            total_size = prev_bytes.len();
            if total_size > MAX_TRAILERS_SIZE {
                return Some(Err(AwsChunkedStreamError::TrailersTooLarge(total_size, MAX_TRAILERS_SIZE)));
            }
            buf.extend_from_slice(prev_bytes.as_ref());
        }

        // Read to end
        // Read to end with size limit
        while let Some(next) = body.next().await {
            match next {
                Err(e) => return Some(Err(AwsChunkedStreamError::Underlying(e))),
                Ok(bytes) => buf.extend_from_slice(bytes.as_ref()),
                Ok(bytes) => {
                    total_size = total_size.saturating_add(bytes.len());
                    if total_size > MAX_TRAILERS_SIZE {
                        return Some(Err(AwsChunkedStreamError::TrailersTooLarge(total_size, MAX_TRAILERS_SIZE)));
                    }
                    buf.extend_from_slice(bytes.as_ref());
                }
            }
        }

@@ -306,6 +341,7 @@ impl AwsChunkedStream {
        // Split into lines by `\n`. Accept optional `\r` before `\n` and also handle last line without `\n`.
        let mut entries: Vec<(String, String)> = Vec::new();
        let mut provided_signature: Option<Vec<u8>> = None;
        let mut header_count: usize = 0;

        let mut start = 0usize;
        for i in 0..=buf.len() {
@@ -324,6 +360,12 @@ impl AwsChunkedStream {
                    continue;
                }

                // Check header count limit (before parsing)
                header_count = header_count.saturating_add(1);
                if header_count > MAX_TRAILER_HEADERS {
                    return Err(AwsChunkedStreamError::TooManyTrailerHeaders(header_count, MAX_TRAILER_HEADERS));
                }

                // Find ':'
                let Some(colon_pos) = memchr(b':', line) else {
                    return Err(AwsChunkedStreamError::FormatError);
@@ -353,9 +395,17 @@ impl AwsChunkedStream {
        // Sort by header name to canonicalize deterministically
        entries.sort_by(|a, b| a.0.cmp(&b.0));

        // Build canonical bytes: name:value\n
        // Build canonical bytes: name:value\n with size limit
        let mut canonical: Vec<u8> = Vec::new();
        for (n, v) in &entries {
            // Check size before adding entry
            let entry_size = n.len().saturating_add(v.len()).saturating_add(2); // name:value\n
            if canonical.len().saturating_add(entry_size) > MAX_TRAILERS_SIZE {
                return Err(AwsChunkedStreamError::TrailersTooLarge(
                    canonical.len().saturating_add(entry_size),
                    MAX_TRAILERS_SIZE,
                ));
            }
            canonical.extend_from_slice(n.as_bytes());
            canonical.push(b':');
            canonical.extend_from_slice(v.as_bytes());
@@ -372,30 +422,47 @@ impl AwsChunkedStream {
    {
        buf.clear();

        let mut push_meta_bytes = |mut bytes: Bytes| {
        let mut push_meta_bytes = |mut bytes: Bytes| -> Result<Option<Bytes>, StdError> {
            if let Some(idx) = memchr(b'\n', bytes.as_ref()) {
                let len = idx.wrapping_add(1); // assume: idx < bytes.len()
                let leading = bytes.split_to(len);
                // Check size limit before extending buffer
                if buf.len().saturating_add(leading.len()) > MAX_CHUNK_META_SIZE {
                    return Err(Box::new(AwsChunkedStreamError::ChunkMetaTooLarge(
                        buf.len().saturating_add(leading.len()),
                        MAX_CHUNK_META_SIZE,
                    )));
                }
                buf.extend_from_slice(leading.as_ref());
                return Some(bytes);
                return Ok(Some(bytes));
            }

            // Check size limit before extending
            if buf.len().saturating_add(bytes.len()) > MAX_CHUNK_META_SIZE {
                return Err(Box::new(AwsChunkedStreamError::ChunkMetaTooLarge(
                    buf.len().saturating_add(bytes.len()),
                    MAX_CHUNK_META_SIZE,
                )));
            }

            buf.extend_from_slice(bytes.as_ref());
            None
            Ok(None)
        };

        if let Some(remaining_bytes) = push_meta_bytes(prev_bytes) {
            return Some(Ok(remaining_bytes));
        match push_meta_bytes(prev_bytes) {
            Err(e) => return Some(Err(e)),
            Ok(Some(remaining_bytes)) => return Some(Ok(remaining_bytes)),
            Ok(None) => {}
        }

        loop {
            match body.next().await? {
                Err(e) => return Some(Err(e)),
                Ok(bytes) => {
                    if let Some(remaining_bytes) = push_meta_bytes(bytes) {
                        return Some(Ok(remaining_bytes));
                    }
                }
                Ok(bytes) => match push_meta_bytes(bytes) {
                    Err(e) => return Some(Err(e)),
                    Ok(Some(remaining_bytes)) => return Some(Ok(remaining_bytes)),
                    Ok(None) => {}
                },
            }
        }
    }
@@ -998,4 +1065,286 @@ mod tests {
        // Should fail with format error
        assert!(matches!(result, Some(Err(AwsChunkedStreamError::FormatError))));
    }

    #[tokio::test]
    #[allow(clippy::assertions_on_constants)]
    async fn test_limits_constants_exist() {
        // This test verifies that the limit constants are defined and have reasonable values
        assert!(MAX_CHUNK_META_SIZE > 0);
        assert!(MAX_CHUNK_META_SIZE < 10 * 1024); // Should be reasonable, less than 10KB
        assert!(MAX_TRAILERS_SIZE > 0);
        assert!(MAX_TRAILERS_SIZE < 100 * 1024); // Should be reasonable, less than 100KB
        assert!(MAX_TRAILER_HEADERS > 0);
        assert!(MAX_TRAILER_HEADERS < 1000); // Should be reasonable
    }

    #[tokio::test]
    async fn test_normal_sized_trailers_work() {
        // Verify that normal-sized trailers work fine (well within limits)
        let chunk1_meta = b"3\r\n";
        let chunk2_meta = b"0\r\n";

        let chunk1_data = b"abc";
        let decoded_content_length = chunk1_data.len();

        let chunk1 = join(&[chunk1_meta, chunk1_data.as_ref(), b"\r\n"]);
        let chunk2 = join(&[chunk2_meta, b"\r\n"]);

        // Create trailers with reasonable number of headers (50, well under limit of 100)
        let mut trailers = Vec::new();
        for i in 0..50 {
            trailers.extend_from_slice(format!("x-amz-meta-{i}: value{i}\r\n").as_bytes());
        }

        let chunk_results: Vec<Result<Bytes, _>> = vec![Ok(chunk1), Ok(chunk2), Ok(Bytes::from(trailers))];

        let seed_signature = "deadbeef";
        let timestamp = "20130524T000000Z";
        let region = "us-east-1";
        let service = "s3";
        let secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
        let date = AmzDate::parse(timestamp).unwrap();

        let stream = futures::stream::iter(chunk_results);
        let mut chunked_stream = AwsChunkedStream::new(
            stream,
            seed_signature.into(),
            date,
            region.into(),
            service.into(),
            secret_access_key.into(),
            decoded_content_length,
            true, // unsigned
        );

        let ans1 = chunked_stream.next().await.unwrap();
        assert_eq!(ans1.unwrap(), chunk1_data.as_slice());

        // Should complete successfully
        assert!(chunked_stream.next().await.is_none());

        // Verify trailers were parsed
        let handle = chunked_stream.trailing_headers_handle();
        let trailers = handle.take().expect("trailers present");
        assert_eq!(trailers.len(), 50);
    }

    #[tokio::test]
    async fn test_chunk_meta_too_large() {
        // Test that chunk metadata exceeding MAX_CHUNK_META_SIZE (1KB) triggers an error
        // We create a chunk with an extremely long hex size that doesn't contain a newline
        // Split across multiple stream chunks to trigger the accumulation logic

        let meta_part1 = vec![b'f'; 600]; // First part of oversized hex number
        let meta_part2 = vec![b'f'; 600]; // Second part - together they exceed 1KB

        let chunk_results: Vec<Result<Bytes, _>> = vec![Ok(Bytes::from(meta_part1)), Ok(Bytes::from(meta_part2))];

        let seed_signature = "deadbeef";
        let timestamp = "20130524T000000Z";
        let region = "us-east-1";
        let service = "s3";
        let secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
        let date = AmzDate::parse(timestamp).unwrap();

        let stream = futures::stream::iter(chunk_results);
        let mut chunked_stream = AwsChunkedStream::new(
            stream,
            seed_signature.into(),
            date,
            region.into(),
            service.into(),
            secret_access_key.into(),
            0,
            true, // unsigned
        );

        // Should get an error due to meta size limit
        let result = chunked_stream.next().await;
        assert!(result.is_some());
        match result.unwrap() {
            Err(AwsChunkedStreamError::Underlying(e)) => {
                // The error is wrapped in Underlying
                let downcasted = e.downcast_ref::<AwsChunkedStreamError>();
                assert!(downcasted.is_some(), "Expected ChunkMetaTooLarge error");
                assert!(
                    matches!(downcasted.unwrap(), AwsChunkedStreamError::ChunkMetaTooLarge(_, _)),
                    "Expected ChunkMetaTooLarge error"
                );
            }
            other => panic!("Expected ChunkMetaTooLarge error wrapped in Underlying, got: {other:?}"),
        }
    }

    #[tokio::test]
    async fn test_trailers_too_large() {
        // Test that the limit for MAX_TRAILERS_SIZE (16KB) prevents unbounded allocation
        // This test creates trailers that would exceed the limit and verifies the code
        // handles them safely without crashing or allocating unbounded memory.
        let chunk1_meta = b"3\r\n";
        let chunk2_meta = b"0\r\n";

        let chunk1_data = b"abc";
        let decoded_content_length = chunk1_data.len();

        let chunk1 = join(&[chunk1_meta, chunk1_data.as_ref(), b"\r\n"]);
        let chunk2 = join(&[chunk2_meta, b"\r\n"]);

        // Create trailers that exceed MAX_TRAILERS_SIZE (16KB)
        // Each header is about 53 bytes, so 400 headers = ~21KB > 16KB limit
        let mut large_trailers = Vec::new();
        for i in 0..400 {
            large_trailers.extend_from_slice(format!("x-amz-meta-header-{i}: {}\r\n", "x".repeat(30)).as_bytes());
        }

        let chunk_results: Vec<Result<Bytes, _>> = vec![Ok(chunk1), Ok(chunk2), Ok(Bytes::from(large_trailers))];

        let seed_signature = "deadbeef";
        let timestamp = "20130524T000000Z";
        let region = "us-east-1";
        let service = "s3";
        let secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
        let date = AmzDate::parse(timestamp).unwrap();

        let stream = futures::stream::iter(chunk_results);
        let mut chunked_stream = AwsChunkedStream::new(
            stream,
            seed_signature.into(),
            date,
            region.into(),
            service.into(),
            secret_access_key.into(),
            decoded_content_length,
            true, // unsigned
        );

        // Read the chunk data
        let ans1 = chunked_stream.next().await.unwrap();
        assert_eq!(ans1.unwrap(), chunk1_data.as_slice());

        // The limit prevents unbounded memory allocation during trailer parsing
        // Stream should return an error when limit is exceeded
        let mut error_found = false;
        while let Some(result) = chunked_stream.next().await {
            match result {
                Err(AwsChunkedStreamError::TrailersTooLarge(size, limit)) => {
                    assert_eq!(limit, MAX_TRAILERS_SIZE);
                    assert!(size > MAX_TRAILERS_SIZE);
                    error_found = true;
                    break;
                }
                Err(AwsChunkedStreamError::Underlying(e)) => {
                    // Error might be wrapped in Underlying
                    if let Some(AwsChunkedStreamError::TrailersTooLarge(size, limit)) = e.downcast_ref::<AwsChunkedStreamError>()
                    {
                        assert_eq!(*limit, MAX_TRAILERS_SIZE);
                        assert!(*size > MAX_TRAILERS_SIZE);
                        error_found = true;
                        break;
                    }
                    // If not the expected error, continue
                }
                Ok(_) | Err(_) => {
                    // Continue consuming stream or skip other errors
                }
            }
        }

        // Either we found the error, or trailers weren't stored (both indicate limit was enforced)
        if !error_found {
            // Verify no trailers were stored (parsing failed due to size limit)
            let handle = chunked_stream.trailing_headers_handle();
            let trailers = handle.take();
            assert!(
                trailers.is_none() || trailers.unwrap().is_empty(),
                "Trailers should not be stored when they exceed limits"
            );
        }
    }

    #[tokio::test]
    async fn test_too_many_trailer_headers() {
        // Test that the limit for MAX_TRAILER_HEADERS (100) prevents unbounded allocation
        // This test creates more headers than allowed and verifies the code
        // handles them safely without crashing or allocating unbounded memory.
        let chunk1_meta = b"3\r\n";
        let chunk2_meta = b"0\r\n";

        let chunk1_data = b"abc";
        let decoded_content_length = chunk1_data.len();

        let chunk1 = join(&[chunk1_meta, chunk1_data.as_ref(), b"\r\n"]);
        let chunk2 = join(&[chunk2_meta, b"\r\n"]);

        // Create more than MAX_TRAILER_HEADERS (100) headers - use 150
        let mut many_trailers = Vec::new();
        for i in 0..150 {
            many_trailers.extend_from_slice(format!("x-amz-meta-{i}: value\r\n").as_bytes());
        }

        let chunk_results: Vec<Result<Bytes, _>> = vec![Ok(chunk1), Ok(chunk2), Ok(Bytes::from(many_trailers))];

        let seed_signature = "deadbeef";
        let timestamp = "20130524T000000Z";
        let region = "us-east-1";
        let service = "s3";
        let secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
        let date = AmzDate::parse(timestamp).unwrap();

        let stream = futures::stream::iter(chunk_results);
        let mut chunked_stream = AwsChunkedStream::new(
            stream,
            seed_signature.into(),
            date,
            region.into(),
            service.into(),
            secret_access_key.into(),
            decoded_content_length,
            true, // unsigned
        );

        // Read the chunk data
        let ans1 = chunked_stream.next().await.unwrap();
        assert_eq!(ans1.unwrap(), chunk1_data.as_slice());

        // The limit prevents unbounded memory allocation during trailer parsing
        // Stream should return an error when limit is exceeded
        let mut error_found = false;
        while let Some(result) = chunked_stream.next().await {
            match result {
                Err(AwsChunkedStreamError::TooManyTrailerHeaders(count, limit)) => {
                    assert_eq!(limit, MAX_TRAILER_HEADERS);
                    assert!(count > MAX_TRAILER_HEADERS);
                    error_found = true;
                    break;
                }
                Err(AwsChunkedStreamError::Underlying(e)) => {
                    // Error might be wrapped in Underlying
                    if let Some(AwsChunkedStreamError::TooManyTrailerHeaders(count, limit)) =
                        e.downcast_ref::<AwsChunkedStreamError>()
                    {
                        assert_eq!(*limit, MAX_TRAILER_HEADERS);
                        assert!(*count > MAX_TRAILER_HEADERS);
                        error_found = true;
                        break;
                    }
                    // If not the expected error, continue
                }
                Ok(_) | Err(_) => {
                    // Continue consuming stream or skip other errors
                }
            }
        }

        // Either we found the error, or trailers weren't stored (both indicate limit was enforced)
        if !error_found {
            // Verify no trailers were stored (parsing failed due to header count limit)
            let handle = chunked_stream.trailing_headers_handle();
            let trailers = handle.take();
            assert!(
                trailers.is_none() || trailers.unwrap().is_empty(),
                "Trailers should not be stored when header count exceeds limits"
            );
        }
    }
}
+73 −1
Original line number Diff line number Diff line
@@ -28,6 +28,11 @@ const MAX_FORM_FIELDS_SIZE: usize = 20 * 1024 * 1024;
/// This prevents `DoS` attacks via excessive part count
const MAX_FORM_PARTS: usize = 1000;

/// Maximum size for boundary matching buffer in `FileStream`
/// This buffer accumulates bytes when looking for a boundary pattern that spans chunks
/// Conservative limit: 64KB should be more than enough for any reasonable boundary pattern
const MAX_BOUNDARY_BUFFER_SIZE: usize = 64 * 1024;

/// Maximum file size for POST object (5 GB - S3 limit for single PUT)
/// This prevents `DoS` attacks via oversized file uploads
/// Note: S3 has a 5GB limit for single PUT object, so this is a reasonable default
@@ -309,6 +314,9 @@ pub enum FileStreamError {
    /// IO error
    #[error("FileStreamError: Underlying: {0}")]
    Underlying(StdError),
    /// Boundary buffer too large
    #[error("FileStreamError: BoundaryBufferTooLarge: size {0} exceeds limit {1}")]
    BoundaryBufferTooLarge(usize, usize),
}

/// File stream
@@ -396,7 +404,16 @@ impl FileStream {
                        match body.as_mut().next().await {
                            None => return Err(FileStreamError::Incomplete),
                            Some(Err(e)) => return Err(FileStreamError::Underlying(e)),
                            Some(Ok(b)) => buf.extend_from_slice(&b),
                            Some(Ok(b)) => {
                                // Check buffer size limit before extending
                                if buf.len().saturating_add(b.len()) > MAX_BOUNDARY_BUFFER_SIZE {
                                    return Err(FileStreamError::BoundaryBufferTooLarge(
                                        buf.len().saturating_add(b.len()),
                                        MAX_BOUNDARY_BUFFER_SIZE,
                                    ));
                                }
                                buf.extend_from_slice(&b);
                            }
                        }
                        bytes = Bytes::from(mem::take(&mut buf));
                        state = 2;
@@ -861,4 +878,59 @@ mod tests {
        assert_eq!(multipart.fields().len(), field_count);
        assert!(multipart.file.stream.is_some());
    }

    #[tokio::test]
    async fn test_boundary_buffer_too_large() {
        // Create a scenario where the boundary pattern spans many chunks, causing
        // the buffer in state 3 to accumulate more than MAX_BOUNDARY_BUFFER_SIZE
        let boundary = b"boundary123";

        // Create file content that will trigger state 3 (boundary matching across chunks)
        // by having a partial boundary pattern that keeps accumulating
        let mut file_content = Vec::new();

        // Add some normal content first
        file_content.extend_from_slice(b"normal content here\r");

        // Create a long sequence that starts like the boundary pattern "\r\n--boundary123"
        // but never completes, forcing the buffer to keep accumulating in state 3
        // The pattern is "\r\n--" which matches the start of the boundary pattern
        file_content.extend_from_slice(b"\n-"); // This will trigger state 3

        // Now send many chunks that continue to look like they might be the boundary
        // but never complete it, causing the buffer to grow beyond MAX_BOUNDARY_BUFFER_SIZE
        let large_chunk = vec![b'-'; MAX_BOUNDARY_BUFFER_SIZE + 1000];

        let body_bytes = vec![
            Bytes::from(b"--boundary123\r\n".to_vec()),
            Bytes::from(b"Content-Disposition: form-data; name=\"file\"; filename=\"test.txt\"\r\n\r\n".to_vec()),
            Bytes::from(file_content),
            // Send the large chunk that will exceed the buffer limit
            Bytes::from(large_chunk),
        ];

        let body_stream = futures::stream::iter(body_bytes.into_iter().map(Ok::<_, StdError>));

        let result = transform_multipart(body_stream, boundary).await;

        // The multipart parsing will succeed, but when we try to read the file stream,
        // it should error with BoundaryBufferTooLarge
        assert!(result.is_ok(), "Multipart parsing should succeed");

        let mut multipart = result.unwrap();
        let mut file_stream = multipart.take_file_stream().expect("File stream should exist");

        // Try to read from the file stream, which should trigger the boundary buffer error
        let mut errored = false;
        while let Some(chunk_result) = file_stream.next().await {
            if let Err(FileStreamError::BoundaryBufferTooLarge(size, limit)) = chunk_result {
                assert_eq!(limit, MAX_BOUNDARY_BUFFER_SIZE);
                assert!(size > MAX_BOUNDARY_BUFFER_SIZE);
                errored = true;
                break;
            }
        }

        assert!(errored, "Should have received BoundaryBufferTooLarge error");
    }
}