Skip to content

Commit 3ac6179

Browse files
committed
pr comments
1 parent 23d6e7a commit 3ac6179

File tree

2 files changed

+27
-5
lines changed

2 files changed

+27
-5
lines changed

sdk/src/Services/S3/Custom/Transfer/Internal/ChunkedBufferStream.cs

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,12 @@ namespace Amazon.S3.Transfer.Internal
4242
/// <item>Present standard Stream interface for easy integration</item>
4343
/// </list>
4444
///
45+
/// <para><strong>Size Limits:</strong></para>
46+
/// <para>
47+
/// Maximum supported stream size is approximately 175TB (int.MaxValue * CHUNK_SIZE bytes).
48+
/// This limit exists because chunk indexing uses int for List indexing.
49+
/// </para>
50+
///
4551
/// <para><strong>Usage Pattern:</strong></para>
4652
/// <code>
4753
/// var stream = new ChunkedBufferStream();
@@ -66,6 +72,12 @@ internal class ChunkedBufferStream : Stream
6672
/// </summary>
6773
private const int CHUNK_SIZE = 81920; // 80KB - safely below 85KB LOH threshold
6874

75+
/// <summary>
76+
/// Maximum supported stream size. This limit exists because chunk indexing uses int for List indexing.
77+
/// With 80KB chunks, this allows approximately 175TB of data.
78+
/// </summary>
79+
private const long MAX_STREAM_SIZE = (long)int.MaxValue * CHUNK_SIZE;
80+
6981
private readonly List<byte[]> _chunks = new List<byte[]>();
7082
private long _length = 0;
7183
private long _position = 0;
@@ -140,6 +152,7 @@ public override long Position
140152
/// <exception cref="NotSupportedException">Thrown if stream is in read mode.</exception>
141153
/// <exception cref="ArgumentNullException">Thrown if buffer is null.</exception>
142154
/// <exception cref="ArgumentOutOfRangeException">Thrown if offset or count is negative or exceeds buffer bounds.</exception>
155+
/// <exception cref="IOException">Thrown if the write would exceed the maximum supported stream size (approximately 175TB).</exception>
143156
public override void Write(byte[] buffer, int offset, int count)
144157
{
145158
ThrowIfDisposed();
@@ -156,6 +169,10 @@ public override void Write(byte[] buffer, int offset, int count)
156169
if (offset + count > buffer.Length)
157170
throw new ArgumentException("Offset and count exceed buffer bounds");
158171

172+
// Check for overflow before writing - prevents chunk index overflow for extremely large streams
173+
if (_length > MAX_STREAM_SIZE - count)
174+
throw new IOException($"Write would exceed maximum supported stream size of {MAX_STREAM_SIZE} bytes (approximately 175TB).");
175+
159176
int remaining = count;
160177
int sourceOffset = offset;
161178

@@ -194,11 +211,11 @@ public override void Write(byte[] buffer, int offset, int count)
194211
/// <remarks>
195212
/// Delegates to synchronous <see cref="Write"/> as ArrayPool operations are fast and don't benefit from async.
196213
/// </remarks>
197-
public override async Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
214+
public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
198215
{
199216
cancellationToken.ThrowIfCancellationRequested();
200217
Write(buffer, offset, count);
201-
await Task.CompletedTask.ConfigureAwait(false);
218+
return Task.CompletedTask;
202219
}
203220

204221
/// <summary>
@@ -267,10 +284,10 @@ public override int Read(byte[] buffer, int offset, int count)
267284
/// <remarks>
268285
/// Delegates to synchronous <see cref="Read"/> as buffer operations are fast and don't benefit from async.
269286
/// </remarks>
270-
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
287+
public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
271288
{
272289
cancellationToken.ThrowIfCancellationRequested();
273-
return await Task.FromResult(Read(buffer, offset, count)).ConfigureAwait(false);
290+
return Task.FromResult(Read(buffer, offset, count));
274291
}
275292

276293
/// <summary>

sdk/src/Services/S3/Custom/Transfer/Internal/ChunkedPartDataSource.cs

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ namespace Amazon.S3.Transfer.Internal
4545
/// var dataSource = new ChunkedPartDataSource(partNumber, chunkedStream);
4646
///
4747
/// // Added to PartBufferManager
48-
/// _partBufferManager.AddBuffer(dataSource);
48+
/// _partBufferManager.AddDataSource(dataSource);
4949
///
5050
/// // Consumer reads sequentially
5151
/// await dataSource.ReadAsync(buffer, offset, count, ct);
@@ -79,6 +79,7 @@ internal class ChunkedPartDataSource : IPartDataSource
7979
/// <param name="partNumber">The part number for ordering.</param>
8080
/// <param name="stream">The ChunkedBufferStream containing the buffered part data. Must be in read mode.</param>
8181
/// <exception cref="ArgumentNullException">Thrown if stream is null.</exception>
82+
/// <exception cref="InvalidOperationException">Thrown if the stream is not in read mode (CanRead is false).</exception>
8283
public ChunkedPartDataSource(int partNumber, ChunkedBufferStream stream)
8384
{
8485
PartNumber = partNumber;
@@ -100,8 +101,12 @@ public ChunkedPartDataSource(int partNumber, ChunkedBufferStream stream)
100101
/// of bytes read into the buffer. This can be less than the requested count if that many bytes
101102
/// are not currently available, or zero if the end of the stream is reached.
102103
/// </returns>
104+
/// <exception cref="ObjectDisposedException">Thrown if the object has been disposed.</exception>
103105
public async Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
104106
{
107+
if (_disposed)
108+
throw new ObjectDisposedException(nameof(ChunkedPartDataSource));
109+
105110
return await _stream.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false);
106111
}
107112

0 commit comments

Comments
 (0)