diff --git a/google/cloud/storage/_experimental/asyncio/async_appendable_object_writer.py b/google/cloud/storage/_experimental/asyncio/async_appendable_object_writer.py index 12a7a23ce..b4f40b423 100644 --- a/google/cloud/storage/_experimental/asyncio/async_appendable_object_writer.py +++ b/google/cloud/storage/_experimental/asyncio/async_appendable_object_writer.py @@ -21,6 +21,7 @@ if you want to use these Rapid Storage APIs. """ +from io import BufferedReader from typing import Optional, Union from google_crc32c import Checksum @@ -339,6 +340,16 @@ async def append_from_stream(self, stream_obj): """ raise NotImplementedError("append_from_stream is not implemented yet.") - async def append_from_file(self, file_path: str): - """Create a file object from `file_path` and call append_from_stream(file_obj)""" - raise NotImplementedError("append_from_file is not implemented yet.") + async def append_from_file( + self, file_obj: BufferedReader, block_size: int = _DEFAULT_FLUSH_INTERVAL_BYTES + ): + """ + Appends data to an Appendable Object using file_handle which is opened + for reading in binary mode. + + :type file_obj: file + :param file_obj: A file handle opened in binary mode for reading. + + """ + while block := file_obj.read(block_size): + await self.append(block) diff --git a/tests/unit/asyncio/test_async_appendable_object_writer.py b/tests/unit/asyncio/test_async_appendable_object_writer.py index 32dbed38a..31013f9a7 100644 --- a/tests/unit/asyncio/test_async_appendable_object_writer.py +++ b/tests/unit/asyncio/test_async_appendable_object_writer.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from io import BytesIO import pytest from unittest import mock @@ -23,6 +24,7 @@ ) from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import ( _MAX_CHUNK_SIZE_BYTES, + _DEFAULT_FLUSH_INTERVAL_BYTES, ) from google.cloud import _storage_v2 @@ -287,9 +289,6 @@ async def test_unimplemented_methods_raise_error(mock_client): with pytest.raises(NotImplementedError): await writer.append_from_stream(mock.Mock()) - with pytest.raises(NotImplementedError): - await writer.append_from_file("file.txt") - @pytest.mark.asyncio @mock.patch( @@ -536,9 +535,6 @@ async def test_append_flushes_when_buffer_is_full( mock_write_object_stream, mock_client ): """Test that append flushes the stream when the buffer size is reached.""" - from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import ( - _DEFAULT_FLUSH_INTERVAL_BYTES, - ) writer = AsyncAppendableObjectWriter(mock_client, BUCKET, OBJECT) writer._is_stream_open = True @@ -559,9 +555,6 @@ async def test_append_flushes_when_buffer_is_full( ) async def test_append_handles_large_data(mock_write_object_stream, mock_client): """Test that append handles data larger than the buffer size.""" - from google.cloud.storage._experimental.asyncio.async_appendable_object_writer import ( - _DEFAULT_FLUSH_INTERVAL_BYTES, - ) writer = AsyncAppendableObjectWriter(mock_client, BUCKET, OBJECT) writer._is_stream_open = True @@ -602,3 +595,32 @@ async def test_append_data_two_times(mock_write_object_stream, mock_client): total_data_length = len(data1) + len(data2) assert writer.offset == total_data_length assert writer.simple_flush.await_count == 0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "file_size, block_size", + [ + (10, 4 * 1024), + (0, _DEFAULT_FLUSH_INTERVAL_BYTES), + (20 * 1024 * 1024, _DEFAULT_FLUSH_INTERVAL_BYTES), + (16 * 1024 * 1024, _DEFAULT_FLUSH_INTERVAL_BYTES), + ], +) +async def test_append_from_file(file_size, block_size, mock_client): + # arrange + fp = BytesIO(b"a" * file_size) + writer = AsyncAppendableObjectWriter(mock_client, BUCKET, OBJECT) + writer._is_stream_open = True + writer.append = mock.AsyncMock() + + # act + await writer.append_from_file(fp, block_size=block_size) + + # assert + exepected_calls = ( + file_size // block_size + if file_size % block_size == 0 + else file_size // block_size + 1 + ) + assert writer.append.await_count == exepected_calls