Skip to content

[WIP] Concurrent read large file #4

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 20 additions & 15 deletions alluxio.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from PIL import Image
from requests.adapters import HTTPAdapter
from torch.utils.data import Dataset
from concurrent.futures import ThreadPoolExecutor


class AlluxioDataset(Dataset):
Expand Down Expand Up @@ -75,6 +76,8 @@ def __init__(
self.page_size = humanfriendly.parse_size(alluxio_page_size)
self._logger = _logger
self.session = self.create_session(concurrency)
self.executor = ThreadPoolExecutor(max_workers=concurrency)


def create_session(self, concurrency):
session = requests.Session()
Expand All @@ -84,25 +87,27 @@ def create_session(self, concurrency):
session.mount("http://", adapter)
return session

def read_whole_file(self, file_path):
def read_whole_file(self, file_path, page_number):
file_id = self.get_file_id(file_path)
worker_address = self.get_worker_address(file_id)
page_index = 0

def page_generator():
nonlocal page_index
while True:
page_content = self.read_file(
worker_address, file_id, page_index
)
if not page_content:
return
yield page_content
if len(page_content) < self.page_size: # last page
return
page_index += 1

content = b"".join(page_generator())
def page_generator(page_index):
page_content = self.read_file(worker_address, file_id, page_index)
if not page_content:
return None
if len(page_content) < self.page_size: # last page
return page_content
return page_content

# Use the executor to map the page_generator function to the data
pages = list(self.executor.map(page_generator, range(page_number)))

# Remove None values from the list
pages = [page for page in pages if page is not None]

content = b"".join(pages)

return content

def read_file(self, worker_address, file_id, page_index):
Expand Down
3 changes: 2 additions & 1 deletion benchmark-large-datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def get_args():
default="1MB",
)


return parser.parse_args()


Expand Down Expand Up @@ -89,7 +90,7 @@ def benchmark_data_loading(self):
1, # Only using one thread
self._logger,
)
alluxio_rest.read_whole_file(self.alluxio_ufs_path)
alluxio_rest.read_whole_file(self.alluxio_ufs_path, 5094)
else:
self._logger.debug("Using alluxio FUSE/local dataset")
self._logger.info(f"Loading dataset from {self.path}")
Expand Down