|
| 1 | +"""Snowstorm dataset module hosted on the IceCube Collaboration servers.""" |
| 2 | + |
| 3 | +import pandas as pd |
| 4 | +import re |
| 5 | +import os |
| 6 | +from typing import Dict, Any, Optional, List, Tuple, Union |
| 7 | +from glob import glob |
| 8 | +from sklearn.model_selection import train_test_split |
| 9 | + |
| 10 | +from graphnet.data.constants import FEATURES, TRUTH |
| 11 | +from graphnet.data.curated_datamodule import IceCubeHostedDataset |
| 12 | +from graphnet.data.utilities import query_database |
| 13 | +from graphnet.models.graphs import GraphDefinition |
| 14 | + |
| 15 | + |
| 16 | +class SnowStormDataset(IceCubeHostedDataset): |
| 17 | + """IceCube SnowStorm simulation dataset. |
| 18 | +
|
| 19 | + More information can be found at |
| 20 | + https://wiki.icecube.wisc.edu/index.php/SnowStorm_MC#File_Locations |
| 21 | + This is a IceCube Collaboration simulation dataset. |
| 22 | + Requires a username and password. |
| 23 | + """ |
| 24 | + |
| 25 | + _experiment = "IceCube SnowStorm dataset" |
| 26 | + _creator = "Severin Magel" |
| 27 | + _citation = "arXiv:1909.01530" |
| 28 | + _available_backends = ["sqlite"] |
| 29 | + |
| 30 | + _pulsemaps = ["SRTInIcePulses"] |
| 31 | + _truth_table = "truth" |
| 32 | + _pulse_truth = None |
| 33 | + _features = FEATURES.SNOWSTORM |
| 34 | + _event_truth = TRUTH.SNOWSTORM |
| 35 | + _data_root_dir = "/data/ana/graphnet/Snowstorm_l2" |
| 36 | + |
| 37 | + def __init__( |
| 38 | + self, |
| 39 | + run_ids: List[int], |
| 40 | + graph_definition: GraphDefinition, |
| 41 | + download_dir: str, |
| 42 | + truth: Optional[List[str]] = None, |
| 43 | + features: Optional[List[str]] = None, |
| 44 | + train_dataloader_kwargs: Optional[Dict[str, Any]] = None, |
| 45 | + validation_dataloader_kwargs: Optional[Dict[str, Any]] = None, |
| 46 | + test_dataloader_kwargs: Optional[Dict[str, Any]] = None, |
| 47 | + ): |
| 48 | + """Initialize SnowStorm dataset.""" |
| 49 | + self._run_ids = run_ids |
| 50 | + self._zipped_files = [ |
| 51 | + os.path.join(self._data_root_dir, f"{s}.tar.gz") for s in run_ids |
| 52 | + ] |
| 53 | + |
| 54 | + super().__init__( |
| 55 | + graph_definition=graph_definition, |
| 56 | + download_dir=download_dir, |
| 57 | + truth=truth, |
| 58 | + features=features, |
| 59 | + backend="sqlite", |
| 60 | + train_dataloader_kwargs=train_dataloader_kwargs, |
| 61 | + validation_dataloader_kwargs=validation_dataloader_kwargs, |
| 62 | + test_dataloader_kwargs=test_dataloader_kwargs, |
| 63 | + ) |
| 64 | + |
| 65 | + def _prepare_args( |
| 66 | + self, backend: str, features: List[str], truth: List[str] |
| 67 | + ) -> Tuple[Dict[str, Any], Union[List[int], None], Union[List[int], None]]: |
| 68 | + """Prepare arguments for dataset.""" |
| 69 | + assert backend == "sqlite" |
| 70 | + dataset_paths = [] |
| 71 | + for rid in self._run_ids: |
| 72 | + dataset_paths += glob( |
| 73 | + os.path.join(self.dataset_dir, str(rid), "**/*.db"), |
| 74 | + recursive=True, |
| 75 | + ) |
| 76 | + |
| 77 | + # get event numbers from all datasets |
| 78 | + event_no = [] |
| 79 | + |
| 80 | + # get RunID |
| 81 | + pattern = rf"{re.escape(self.dataset_dir)}/(\d+)/.*" |
| 82 | + event_counts: Dict[str, int] = {} |
| 83 | + event_counts = {} |
| 84 | + for path in dataset_paths: |
| 85 | + |
| 86 | + # Extract the ID |
| 87 | + match = re.search(pattern, path) |
| 88 | + assert match |
| 89 | + run_id = match.group(1) |
| 90 | + |
| 91 | + query_df = query_database( |
| 92 | + database=path, |
| 93 | + query=f"SELECT event_no FROM {self._truth_table}", |
| 94 | + ) |
| 95 | + query_df["path"] = path |
| 96 | + event_no.append(query_df) |
| 97 | + |
| 98 | + # save event count for description |
| 99 | + if run_id in event_counts: |
| 100 | + event_counts[run_id] += query_df.shape[0] |
| 101 | + else: |
| 102 | + event_counts[run_id] = query_df.shape[0] |
| 103 | + |
| 104 | + event_no = pd.concat(event_no, axis=0) |
| 105 | + |
| 106 | + # split the non-unique event numbers into train/val and test |
| 107 | + train_val, test = train_test_split( |
| 108 | + event_no, |
| 109 | + test_size=0.10, |
| 110 | + random_state=42, |
| 111 | + shuffle=True, |
| 112 | + ) |
| 113 | + |
| 114 | + train_val = train_val.groupby("path") |
| 115 | + test = test.groupby("path") |
| 116 | + |
| 117 | + # parse into right format for CuratedDataset |
| 118 | + train_val_selection = [] |
| 119 | + test_selection = [] |
| 120 | + for path in dataset_paths: |
| 121 | + train_val_selection.append( |
| 122 | + train_val["event_no"].get_group(path).tolist() |
| 123 | + ) |
| 124 | + test_selection.append(test["event_no"].get_group(path).tolist()) |
| 125 | + |
| 126 | + dataset_args = { |
| 127 | + "truth_table": self._truth_table, |
| 128 | + "pulsemaps": self._pulsemaps, |
| 129 | + "path": dataset_paths, |
| 130 | + "graph_definition": self._graph_definition, |
| 131 | + "features": features, |
| 132 | + "truth": truth, |
| 133 | + } |
| 134 | + |
| 135 | + self._create_comment(event_counts) |
| 136 | + |
| 137 | + return dataset_args, train_val_selection, test_selection |
| 138 | + |
| 139 | + @classmethod |
| 140 | + def _create_comment(cls, event_counts: Dict[str, int] = {}) -> None: |
| 141 | + """Print the number of events in each RunID.""" |
| 142 | + fixed_string = ( |
| 143 | + " Simulation produced by the IceCube Collaboration, " |
| 144 | + + "https://wiki.icecube.wisc.edu/index.php/SnowStorm_MC#File_Locations" # noqa: E501 |
| 145 | + ) |
| 146 | + tot = 0 |
| 147 | + runid_string = "" |
| 148 | + for k, v in event_counts.items(): |
| 149 | + runid_string += f"RunID {k} contains {v:10d} events\n" |
| 150 | + tot += v |
| 151 | + cls._comments = ( |
| 152 | + f"Contains ~{tot/1e6:.1f} million events:\n" |
| 153 | + + runid_string |
| 154 | + + fixed_string |
| 155 | + ) |
| 156 | + |
| 157 | + def _get_dir_name(self, source_file_path: str) -> str: |
| 158 | + file_name = os.path.basename(source_file_path).split(".")[0] |
| 159 | + return str(os.path.join(self.dataset_dir, file_name)) |
0 commit comments