Skip to content
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions src/datasets/arrow_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1120,6 +1120,7 @@ def from_generator(
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
split: NamedSplit = Split.TRAIN,
fingerprint: Optional[str] = None,
**kwargs,
):
"""Create a Dataset from a generator.
Expand All @@ -1146,6 +1147,12 @@ def from_generator(
Split name to be assigned to the dataset.

<Added version="2.21.0"/>
fingerprint (`str`, *optional*):
Fingerprint that will be used to generate dataset ID.
By default `fingerprint` is generated by hashing the generator function and all the args which can be slow
if it uses large objects like AI models.

<Added version="4.3.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to :[`GeneratorConfig`].

Expand Down Expand Up @@ -1183,6 +1190,7 @@ def from_generator(
gen_kwargs=gen_kwargs,
num_proc=num_proc,
split=split,
fingerprint=fingerprint,
**kwargs,
).read()

Expand Down
13 changes: 8 additions & 5 deletions src/datasets/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,7 @@ def __init__(
data_dir: Optional[str] = None,
storage_options: Optional[dict] = None,
writer_batch_size: Optional[int] = None,
config_id: Optional[str] = None,
**config_kwargs,
):
# DatasetBuilder name
Expand Down Expand Up @@ -343,6 +344,7 @@ def __init__(
self.config, self.config_id = self._create_builder_config(
config_name=config_name,
custom_features=features,
config_id=config_id,
**config_kwargs,
)

Expand Down Expand Up @@ -502,7 +504,7 @@ def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> st
return legacy_relative_data_dir

def _create_builder_config(
self, config_name=None, custom_features=None, **config_kwargs
self, config_name=None, custom_features=None, config_id=None, **config_kwargs
) -> tuple[BuilderConfig, str]:
"""Create and validate BuilderConfig object as well as a unique config id for this config.
Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
Expand Down Expand Up @@ -570,10 +572,11 @@ def _create_builder_config(
)

# compute the config id that is going to be used for caching
config_id = builder_config.create_config_id(
config_kwargs,
custom_features=custom_features,
)
if config_id is None:
config_id = builder_config.create_config_id(
config_kwargs,
custom_features=custom_features,
)
is_custom = (config_id not in self.builder_configs) and config_id != "default"
if is_custom:
logger.info(f"Using custom data configuration {config_id}")
Expand Down
4 changes: 4 additions & 0 deletions src/datasets/io/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ def __init__(
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
split: NamedSplit = Split.TRAIN,
fingerprint: Optional[str] = None,
**kwargs,
):
super().__init__(
Expand All @@ -32,6 +33,7 @@ def __init__(
generator=generator,
gen_kwargs=gen_kwargs,
split=split,
config_id="default-fingerprint=" + fingerprint if fingerprint else None,
**kwargs,
)

Expand All @@ -56,4 +58,6 @@ def read(self):
dataset = self.builder.as_dataset(
split=self.builder.config.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
if self.fingerprint:
dataset._fingerprint = self.fingerprint
return dataset
10 changes: 10 additions & 0 deletions tests/test_arrow_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4114,6 +4114,16 @@ def test_dataset_from_generator_split(split, data_generator, tmp_path):
_check_generator_dataset(dataset, expected_features, expected_split)


@pytest.mark.parametrize("fingerprint", [None, "test-dataset"])
def test_dataset_from_generator_fingerprint(fingerprint, data_generator, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, fingerprint=fingerprint)
_check_generator_dataset(dataset, expected_features, NamedSplit("train"))
if fingerprint:
assert dataset._fingerprint == fingerprint


@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
Expand Down