@@ -42,6 +42,12 @@ namespace Amazon.S3.Transfer.Internal
4242 /// <item>Present standard Stream interface for easy integration</item>
4343 /// </list>
4444 ///
45+ /// <para><strong>Size Limits:</strong></para>
46+ /// <para>
47+ /// Maximum supported stream size is approximately 175TB (int.MaxValue * CHUNK_SIZE bytes).
48+ /// This limit exists because chunk indexing uses int for List indexing.
49+ /// </para>
50+ ///
4551 /// <para><strong>Usage Pattern:</strong></para>
4652 /// <code>
4753 /// var stream = new ChunkedBufferStream();
@@ -66,6 +72,12 @@ internal class ChunkedBufferStream : Stream
6672 /// </summary>
6773 private const int CHUNK_SIZE = 81920 ; // 80KB - safely below 85KB LOH threshold
6874
75+ /// <summary>
76+ /// Maximum supported stream size. This limit exists because chunk indexing uses int for List indexing.
77+ /// With 80KB chunks, this allows approximately 175TB of data.
78+ /// </summary>
79+ private const long MAX_STREAM_SIZE = ( long ) int . MaxValue * CHUNK_SIZE ;
80+
6981 private readonly List < byte [ ] > _chunks = new List < byte [ ] > ( ) ;
7082 private long _length = 0 ;
7183 private long _position = 0 ;
@@ -140,6 +152,7 @@ public override long Position
140152 /// <exception cref="NotSupportedException">Thrown if stream is in read mode.</exception>
141153 /// <exception cref="ArgumentNullException">Thrown if buffer is null.</exception>
142154 /// <exception cref="ArgumentOutOfRangeException">Thrown if offset or count is negative or exceeds buffer bounds.</exception>
155+ /// <exception cref="IOException">Thrown if the write would exceed the maximum supported stream size (approximately 175TB).</exception>
143156 public override void Write ( byte [ ] buffer , int offset , int count )
144157 {
145158 ThrowIfDisposed ( ) ;
@@ -156,6 +169,10 @@ public override void Write(byte[] buffer, int offset, int count)
156169 if ( offset + count > buffer . Length )
157170 throw new ArgumentException ( "Offset and count exceed buffer bounds" ) ;
158171
172+ // Check for overflow before writing - prevents chunk index overflow for extremely large streams
173+ if ( _length > MAX_STREAM_SIZE - count )
174+ throw new IOException ( $ "Write would exceed maximum supported stream size of { MAX_STREAM_SIZE } bytes (approximately 175TB).") ;
175+
159176 int remaining = count ;
160177 int sourceOffset = offset ;
161178
@@ -194,11 +211,11 @@ public override void Write(byte[] buffer, int offset, int count)
194211 /// <remarks>
195212 /// Delegates to synchronous <see cref="Write"/> as ArrayPool operations are fast and don't benefit from async.
196213 /// </remarks>
197- public override async Task WriteAsync ( byte [ ] buffer , int offset , int count , CancellationToken cancellationToken )
214+ public override Task WriteAsync ( byte [ ] buffer , int offset , int count , CancellationToken cancellationToken )
198215 {
199216 cancellationToken . ThrowIfCancellationRequested ( ) ;
200217 Write ( buffer , offset , count ) ;
201- await Task . CompletedTask . ConfigureAwait ( false ) ;
218+ return Task . CompletedTask ;
202219 }
203220
204221 /// <summary>
@@ -267,10 +284,10 @@ public override int Read(byte[] buffer, int offset, int count)
267284 /// <remarks>
268285 /// Delegates to synchronous <see cref="Read"/> as buffer operations are fast and don't benefit from async.
269286 /// </remarks>
270- public override async Task < int > ReadAsync ( byte [ ] buffer , int offset , int count , CancellationToken cancellationToken )
287+ public override Task < int > ReadAsync ( byte [ ] buffer , int offset , int count , CancellationToken cancellationToken )
271288 {
272289 cancellationToken . ThrowIfCancellationRequested ( ) ;
273- return await Task . FromResult ( Read ( buffer , offset , count ) ) . ConfigureAwait ( false ) ;
290+ return Task . FromResult ( Read ( buffer , offset , count ) ) ;
274291 }
275292
276293 /// <summary>
0 commit comments