diff --git a/README.md b/README.md index 087b7d89..2b3464d3 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ Enable [pre-commit hooks](https://pre-commit.com/), which enforces adherence to pre-commit install ``` -Please follow the [same conventions as `ctapipe`](https://cta-observatory.github.io/ctapipe/getting_started/index.html#developing-a-new-feature-or-code-change) regarding settings of Git remotes for pull requests. +Please follow the [same conventions as `ctapipe`](https://ctapipe.readthedocs.io/en/latest/getting_started/) regarding settings of Git remotes, and how to contribute to the code with pull requests. ### Optional DIRAC support @@ -110,8 +110,8 @@ or the [container alternative](#note-to-macos-users) as explained above. All contribution are welcome. -Guidelines are the same as [ctapipe's ones](https://cta-observatory.github.io/ctapipe/development/index.html) -See [here](https://cta-observatory.github.io/ctapipe/development/pullrequests.html) how to make a pull request to contribute. +Guidelines are the same as [ctapipe's ones](https://ctapipe.readthedocs.io/en/latest/getting_started/) +See [here](https://ctapipe.readthedocs.io/en/latest/development/pullrequests.html#pullrequests) how to make a pull request to contribute. ## Report issue / Ask a question diff --git a/src/nectarchain/calibration/container/charge.py b/src/nectarchain/calibration/container/charge.py index 1c457112..f743cc50 100644 --- a/src/nectarchain/calibration/container/charge.py +++ b/src/nectarchain/calibration/container/charge.py @@ -15,6 +15,7 @@ from .utils import CtaPipeExtractor from .waveforms import WaveformsContainer, WaveformsContainers + logging.basicConfig(format="%(asctime)s %(name)s %(levelname)s %(message)s") log = logging.getLogger(__name__) log.handlers = logging.getLogger("__main__").handlers @@ -22,6 +23,7 @@ __all__ = ["ChargeContainer", "ChargeContainers"] + list_ctapipe_charge_extractor = [ "FullWaveformSum", "FixedWindowSum", @@ -61,7 +63,7 @@ def make_histo(charge, all_range, mask_broken_pix, _mask, hist_ma_data): # print(f"hist_ma_data[0] = {hist_ma_data[0]}") # print(f"mask_broken_pix = {mask_broken_pix}") - if not mask_broken_pix: + if not (mask_broken_pix): # print("this pixel is not broken, let's continue computation") hist, _charge = np.histogram( charge, @@ -123,11 +125,11 @@ def __init__( self._nevents = nevents self._npixels = npixels - self.ucts_timestamp = np.zeros(self.nevents, dtype=np.uint64) - self.ucts_busy_counter = np.zeros(self.nevents, dtype=np.uint16) - self.ucts_event_counter = np.zeros(self.nevents, dtype=np.uint16) - self.event_type = np.zeros(self.nevents, dtype=np.uint8) - self.event_id = np.zeros(self.nevents, dtype=np.uint16) + self.ucts_timestamp = np.zeros((self.nevents), dtype=np.uint64) + self.ucts_busy_counter = np.zeros((self.nevents), dtype=np.uint16) + self.ucts_event_counter = np.zeros((self.nevents), dtype=np.uint16) + self.event_type = np.zeros((self.nevents), dtype=np.uint8) + self.event_id = np.zeros((self.nevents), dtype=np.uint16) self.trig_pattern_all = np.zeros( (self.nevents, self.CAMERA.n_pixels, 4), dtype=bool ) @@ -142,8 +144,8 @@ def from_waveforms( """create a new ChargeContainer from a WaveformsContainer Args: waveformContainer (WaveformsContainer): the waveforms - method (str, optional): Ctapipe ImageExtractor method. Defaults to - "FullWaveformSum". + method (str, optional): Ctapipe ImageExtractor method. + Defaults to "FullWaveformSum". Returns: chargeContainer : the ChargeContainer instance @@ -180,11 +182,14 @@ def from_waveforms( return chargeContainer - def write(self, path: Path, **kwargs): + def write(self, path: Path, start=None, end=None, block=None, **kwargs): """method to write in an output FITS file the ChargeContainer. Args: path (str): the directory where you want to save data + start (int): Start of event_id. Default is None. + end (int): End of event_id. Default is None. + block (int): Block number of SPE-WT scan. Default is None. """ suffix = kwargs.get("suffix", "") if suffix != "": @@ -193,35 +198,59 @@ def write(self, path: Path, **kwargs): log.info(f"saving in {path}") os.makedirs(path, exist_ok=True) + # table = Table(self.charge_hg) + # table.meta["pixels_id"] = self._pixels_id + # table.write(Path(path)/f"charge_hg_run{self.run_number}.ecsv",format='ascii.ecsv',overwrite=kwargs.get('overwrite',False)) + # + # table = Table(self.charge_lg) + # table.meta["pixels_id"] = self._pixels_id + # table.write(Path(path)/f"charge_lg_run{self.run_number}.ecsv",format='ascii.ecsv',overwrite=kwargs.get('overwrite',False)) + # + # table = Table(self.peak_hg) + # table.meta["pixels_id"] = self._pixels_id + # table.write(Path(path)/f"peak_hg_run{self.run_number}.ecsv",format='ascii.ecsv',overwrite=kwargs.get('overwrite',False)) + # + # table = Table(self.peak_lg) + # table.meta["pixels_id"] = self._pixels_id + # table.write(Path(path)/f"peak_lg_run{self.run_number}.ecsv",format='ascii.ecsv',overwrite=kwargs.get('overwrite',False)) + hdr = fits.Header() hdr["RUN"] = self._run_number hdr["NEVENTS"] = self.nevents hdr["NPIXELS"] = self.npixels hdr["COMMENT"] = ( - f"The charge containeur for run {self._run_number} with {self._method}" - f" method : primary is the pixels id, then you can find HG charge, " + f"The charge containeur for run {self._run_number} with {self._method} " + f"method : primary is the pixels id, then you can find HG charge, " f"LG charge, HG peak and LG peak, 2 last HDU are composed of event " f"properties and trigger patern" ) primary_hdu = fits.PrimaryHDU(self.pixels_id, header=hdr) - charge_hg_hdu = fits.ImageHDU(self.charge_hg, name="HG charge") - charge_lg_hdu = fits.ImageHDU(self.charge_lg, name="LG charge") - peak_hg_hdu = fits.ImageHDU(self.peak_hg, name="HG peak time") - peak_lg_hdu = fits.ImageHDU(self.peak_lg, name="LG peak time") + charge_hg_hdu = fits.ImageHDU(self.charge_hg[start:end], name="HG charge") + charge_lg_hdu = fits.ImageHDU(self.charge_lg[start:end], name="LG charge") + peak_hg_hdu = fits.ImageHDU(self.peak_hg[start:end], name="HG peak time") + peak_lg_hdu = fits.ImageHDU(self.peak_lg[start:end], name="LG peak time") - col1 = fits.Column(array=self.event_id, name="event_id", format="1I") - col2 = fits.Column(array=self.event_type, name="event_type", format="1I") + col1 = fits.Column(array=self.event_id[start:end], name="event_id", format="1J") + col2 = fits.Column( + array=self.event_type[start:end], name="event_type", format="1I" + ) col3 = fits.Column( - array=self.ucts_timestamp, name="ucts_timestamp", format="1K" + array=self.ucts_timestamp[start:end], name="ucts_timestamp", format="1K" ) col4 = fits.Column( - array=self.ucts_busy_counter, name="ucts_busy_counter", format="1I" + array=self.ucts_busy_counter[start:end], + name="ucts_busy_counter", + format="1I", ) col5 = fits.Column( - array=self.ucts_event_counter, name="ucts_event_counter", format="1I" + array=self.ucts_event_counter[start:end], + name="ucts_event_counter", + format="1I", + ) + col6 = fits.Column( + array=self.multiplicity[start:end], name="multiplicity", format="1I" ) - col6 = fits.Column(array=self.multiplicity, name="multiplicity", format="1I") coldefs = fits.ColDefs([col1, col2, col3, col4, col5, col6]) event_properties = fits.BinTableHDU.from_columns( @@ -229,13 +258,13 @@ def write(self, path: Path, **kwargs): ) col1 = fits.Column( - array=self.trig_pattern_all, + array=self.trig_pattern_all[start:end], name="trig_pattern_all", format=f"{4 * self.CAMERA.n_pixels}L", dim=f"({self.CAMERA.n_pixels},4)", ) col2 = fits.Column( - array=self.trig_pattern, + array=self.trig_pattern[start:end], name="trig_pattern", format=f"{self.CAMERA.n_pixels}L", ) @@ -255,7 +284,7 @@ def write(self, path: Path, **kwargs): ) try: hdul.writeto( - Path(path) / f"charge_run{self.run_number}{suffix}.fits", + Path(path) / f"charge_run{self.run_number}{suffix}_bloc_{block}.fits", overwrite=kwargs.get("overwrite", False), ) log.info( @@ -329,8 +358,8 @@ def compute_charge( Args: waveformContainer (WaveformsContainer): the waveforms channel (int): channel you want to compute charges - method (str, optional): ctapipe Image Extractor method method. Defaults - to "FullWaveformSum". + method (str, optional): ctapipe Image Extractor method method. + Defaults to "FullWaveformSum". Raises: ArgumentError: extraction method unknown @@ -359,8 +388,8 @@ def compute_charge( ) log.debug( - f"Extracting charges with method {method} and extractor_kwargs " - f"{extractor_kwargs}" + f"Extracting charges with method {method} and " + f"extractor_kwargs {extractor_kwargs}" ) ImageExtractor = eval(method)(waveformContainer.subarray, **extractor_kwargs) @@ -405,9 +434,9 @@ def histo_hg(self, n_bins: int = 1000, autoscale: bool = True) -> np.ndarray: Args: n_bins (int, optional): number of bins in charge (ADC counts). - Defaults to 1000. + Defaults to 1000. autoscale (bool, optional): auto detect number of bins by pixels - (bin witdh = 1 ADC). Defaults to True. + (bin witdh = 1 ADC). Defaults to True. Returns: np.ndarray: masked array of charge histograms (histo,charge) @@ -416,8 +445,8 @@ def histo_hg(self, n_bins: int = 1000, autoscale: bool = True) -> np.ndarray: (self.charge_hg == self.charge_hg.mean(axis=0)).mean(axis=0), dtype=bool ) log.debug( - f"there are {mask_broken_pix.sum()} broken pixels (charge stays at same " - f"level for each events)" + f"there are {mask_broken_pix.sum()} broken pixels " + f"(charge stays at same level for each events)" ) if autoscale: @@ -426,6 +455,10 @@ def histo_hg(self, n_bins: int = 1000, autoscale: bool = True) -> np.ndarray: np.uint16(np.max(self.charge_hg.T[~mask_broken_pix].T)) + 1.5, 1, ) + # hist_ma = ma.masked_array(np.zeros((self.charge_hg.shape[1], + # all_range.shape[0]),dtype = np.uint16), + # mask=np.zeros((self.charge_hg.shape[1], + # all_range.shape[0]),dtype = bool)) charge_ma = ma.masked_array( ( all_range.reshape(all_range.shape[0], 1) @@ -437,12 +470,13 @@ def histo_hg(self, n_bins: int = 1000, autoscale: bool = True) -> np.ndarray: ) broxen_pixels_mask = np.array( - [mask_broken_pix for _ in range(charge_ma.shape[1])] + [mask_broken_pix for i in range(charge_ma.shape[1])] ).T + # hist_ma.mask = new_data_mask.T start = time.time() _mask, hist_ma_data = make_histo( self.charge_hg.T, all_range, mask_broken_pix - ) + ) # , charge_ma.data, charge_ma.mask, hist_ma.data, hist_ma.mask) charge_ma.mask = np.logical_or(_mask, broxen_pixels_mask) hist_ma = ma.masked_array(hist_ma_data, mask=charge_ma.mask) log.debug(f"histogram hg computation time : {time.time() - start} sec") @@ -488,8 +522,8 @@ def histo_lg(self, n_bins: int = 1000, autoscale: bool = True) -> np.ndarray: (self.charge_lg == self.charge_lg.mean(axis=0)).mean(axis=0), dtype=bool ) log.debug( - f"there are {mask_broken_pix.sum()} broken pixels (charge stays at same " - f"level for each events)" + f"there are {mask_broken_pix.sum()} broken pixels " + f"(charge stays at same level for each events)" ) if autoscale: @@ -509,7 +543,7 @@ def histo_lg(self, n_bins: int = 1000, autoscale: bool = True) -> np.ndarray: ) broxen_pixels_mask = np.array( - [mask_broken_pix for _ in range(charge_ma.shape[1])] + [mask_broken_pix for i in range(charge_ma.shape[1])] ).T # hist_ma.mask = new_data_mask.T start = time.time() @@ -546,13 +580,14 @@ def histo_lg(self, n_bins: int = 1000, autoscale: bool = True) -> np.ndarray: def select_charge_hg(self, pixel_id: np.ndarray): """method to extract charge HG from a list of pixel ids - The output is the charge HG with a shape following the size of the input - pixel_id argument. - Pixel in pixel_id which are not present in the ChargeContaineur pixels_id are - skipped. + The output is the charge HG with a shape following the size + of the input pixel_id argument + Pixel in pixel_id which are not present in the + ChargeContaineur pixels_id are skipped Args: - pixel_id (np.ndarray): array of pixel ids you want to extract the charge + pixel_id (np.ndarray): array of pixel ids you want to + extract the charge Returns: (np.ndarray): charge array in the order of specified pixel_id @@ -562,8 +597,8 @@ def select_charge_hg(self, pixel_id: np.ndarray): ) for pixel in pixel_id[~mask_contain_pixels_id]: log.warning( - f"You asked for pixel_id {pixel} but it is not present in this " - f"ChargeContainer, skip this one" + f"You asked for pixel_id {pixel} but it is not present " + f"in this ChargeContainer, skip this one" ) return np.array( [ @@ -574,13 +609,14 @@ def select_charge_hg(self, pixel_id: np.ndarray): def select_charge_lg(self, pixel_id: np.ndarray): """method to extract charge LG from a list of pixel ids - The output is the charge LG with a shape following the size of the input - pixel_id argument. - Pixel in pixel_id which are not present in the ChargeContaineur pixels_id are - skipped. + The output is the charge LG with a shape following the + size of the input pixel_id argument + Pixel in pixel_id which are not present in the ChargeContaineur + pixels_id are skipped Args: - pixel_id (np.ndarray): array of pixel ids you want to extract the charge + pixel_id (np.ndarray): array of pixel ids you want to extract + the charge Returns: (np.ndarray): charge array in the order of specified pixel_id @@ -590,8 +626,8 @@ def select_charge_lg(self, pixel_id: np.ndarray): ) for pixel in pixel_id[~mask_contain_pixels_id]: log.warning( - f"You asked for pixel_id {pixel} but it is not present in this " - f"ChargeContainer, skip this one" + f"You asked for pixel_id {pixel} but it is not " + f"present in this ChargeContainer, skip this one" ) return np.array( [ @@ -665,8 +701,8 @@ def write(self, path: str, **kwargs): @staticmethod def from_file(path: Path, run_number: int, **kwargs): - """load ChargeContainers from FITS file previously written with - ChargeContainers.write() method + """load ChargeContainers from FITS file previously written + with ChargeContainers.write() method This method will search all the fits files corresponding to {path}/charge_run{run_number}_*.fits scheme @@ -695,8 +731,8 @@ def from_file(path: Path, run_number: int, **kwargs): @property def nChargeContainer(self): - """getter giving the number of chargeContainer into the ChargeContainers - instance + """getter giving the number of chargeContainer into the + ChargeContainers instance Returns: int: the number of chargeContainer diff --git a/src/nectarchain/calibration/container/waveforms.py b/src/nectarchain/calibration/container/waveforms.py index 30f712ae..dbae06a9 100644 --- a/src/nectarchain/calibration/container/waveforms.py +++ b/src/nectarchain/calibration/container/waveforms.py @@ -41,13 +41,14 @@ def __init__( Args: run_number (int): id of the run to be loaded - maxevents (int, optional): max of events to be loaded. Defaults to 0, to - load everythings. - nevents (int, optional) : number of events in run if known (parameter used - to save computing time) - merge_file (optional) : if True will load all fits.fz files of the run, else - merge_file can be integer to select the run fits.fz file according to - this number + maxevents (int, optional): max of events to be loaded. + Defaults to 0, to load everythings. + nevents (int, optional) : number of events in run if known + (parameter used to save computing time) + merge_file (optional) : if True will load all fits.fz files of + the run, else merge_file can be integer to select the run fits.fz + file according to this number + """ self.__run_number = run_number @@ -117,19 +118,19 @@ def __compute_broken_pixels(self, **kwargs): @staticmethod def load_run(run_number: int, max_events: int = None, run_file=None): - """Static method to load from $NECTARCAMDATA directory data for specified run - with max_events + """Static method to load from $NECTARCAMDATA directory data for + specified run with max_events Args: run_number (int): run_id - maxevents (int, optional): max of events to be loaded. Defaults to 0, to - load everything. - merge_file (optional) : if True will load all fits.fz files of the run, else - merge_file can be integer to select the run fits.fz file according to - this number + maxevents (int, optional): max of events to be loaded. + Defaults to 0, to load everythings. + merge_file (optional) : if True will load all fits.fz files of + the run, else merge_file can be integer to select the run + fits.fz file according to this number Returns: - List[ctapipe_io_nectarcam.NectarCAMEventSource]: List of EventSource for - each run files + List[ctapipe_io_nectarcam.NectarCAMEventSource]: List of + EventSource for each run files """ if run_file is None: generic_filename, _ = DataManagement.findrun(run_number) @@ -145,9 +146,9 @@ def load_run(run_number: int, max_events: int = None, run_file=None): return eventsource def check_events(self): - """method to check triggertype of events and counting number of events in all - readers. It prints the trigger type when trigger type changes over events (to - avoid writing one line by events) + """method to check triggertype of events and counting + number of events in all readers it prints the trigger type when + trigger type change over events (to not write one line by events) Returns: int : number of events @@ -168,12 +169,12 @@ def check_events(self): n_events += 1 return n_events - def load_wfs(self, compute_trigger_pattern=False): + def load_wfs(self, start=None, end=None, compute_trigger_patern=False): """mathod to extract waveforms data from the EventSource Args: - compute_trigger_pattern (bool, optional): To recompute on our side the - trigger pattern. Defaults to False. + compute_trigger_patern (bool, optional): To recompute on + our side the trigger patern. Defaults to False. """ if not (hasattr(self, "wfs_hg")): self.__init_arrays() @@ -181,45 +182,59 @@ def load_wfs(self, compute_trigger_pattern=False): wfs_hg_tmp = np.zeros((self.npixels, self.nsamples), dtype=np.uint16) wfs_lg_tmp = np.zeros((self.npixels, self.nsamples), dtype=np.uint16) + # n_traited_events = 0 + event_start, event_end = start, end + log.info(f" Loading Events ID from {event_start} to {event_end}") + for i, event in enumerate(self.__reader): - if i % 100 == 0: + if i % 5000 == 0: log.info(f"reading event number {i}") - self.event_id[i] = np.uint16(event.index.event_id) - self.ucts_timestamp[i] = event.nectarcam.tel[ - WaveformsContainer.TEL_ID - ].evt.ucts_timestamp - self.event_type[i] = event.trigger.event_type.value - self.ucts_busy_counter[i] = event.nectarcam.tel[ - WaveformsContainer.TEL_ID - ].evt.ucts_busy_counter - self.ucts_event_counter[i] = event.nectarcam.tel[ - WaveformsContainer.TEL_ID - ].evt.ucts_event_counter - - self.trig_pattern_all[i] = event.nectarcam.tel[ - WaveformsContainer.TEL_ID - ].evt.trigger_pattern.T - - for pix in range(self.npixels): - wfs_lg_tmp[pix] = event.r0.tel[0].waveform[1, self.pixels_id[pix]] - wfs_hg_tmp[pix] = event.r0.tel[0].waveform[0, self.pixels_id[pix]] - - self.wfs_hg[i] = wfs_hg_tmp - self.wfs_lg[i] = wfs_lg_tmp + if ( + event.nectarcam.tel[WaveformsContainer.TEL_ID].evt.event_id + >= event_start + and event.nectarcam.tel[WaveformsContainer.TEL_ID].evt.event_id + < event_end + ): + self.event_id[i] = np.uint16(event.index.event_id) + self.ucts_timestamp[i] = event.nectarcam.tel[ + WaveformsContainer.TEL_ID + ].evt.ucts_timestamp + self.event_type[i] = event.trigger.event_type.value + self.ucts_busy_counter[i] = event.nectarcam.tel[ + WaveformsContainer.TEL_ID + ].evt.ucts_busy_counter + self.ucts_event_counter[i] = event.nectarcam.tel[ + WaveformsContainer.TEL_ID + ].evt.ucts_event_counter + + self.trig_pattern_all[i] = event.nectarcam.tel[ + WaveformsContainer.TEL_ID + ].evt.trigger_pattern.T + + for pix in range(self.npixels): + wfs_lg_tmp[pix] = event.r0.tel[0].waveform[1, self.pixels_id[pix]] + wfs_hg_tmp[pix] = event.r0.tel[0].waveform[0, self.pixels_id[pix]] + + self.wfs_hg[i] = wfs_hg_tmp + self.wfs_lg[i] = wfs_lg_tmp self.__compute_broken_pixels() - # if compute_trigger_pattern and np.max(self.trig_pattern) == 0: - # self.compute_trigger_pattern() + # if compute_trigger_patern and np.max(self.trig_pattern) == 0: + # self.compute_trigger_patern() - def write(self, path: str, **kwargs): - """method to write in an output FITS file the WaveformsContainer. - Two files are created, one FITS representing the data and one HDF5 file - representing the subarray configuration. + def write(self, path: str, start=None, end=None, block=None, **kwargs): + """method to write in an output FITS file the + WaveformsContainer. Two files are created, one FITS + representing the data and one HDF5 file representing + the subarray configuration Args: path (str): the directory where you want to save data + start (int): Start of event_id. Default is None + end (int): End of event_is. Default is None + block (int): Block number of SPE-WT scan. Default is None """ suffix = kwargs.get("suffix", "") if suffix != "": @@ -241,28 +256,37 @@ def write(self, path: str, **kwargs): ) hdr["COMMENT"] = ( - f"The waveforms container for run {self.__run_number} : primary is the " - f"pixels id, 2nd HDU : high gain waveforms, 3rd HDU : low gain waveforms, " - f"4th HDU : event properties and 5th HDU trigger patterns." + f"The waveforms containeur for run {self.__run_number} : " + f"primary is the pixels id, 2nd HDU : high gain waveforms, " + f"3rd HDU : low gain waveforms, 4th HDU : event properties " + f"and 5th HDU trigger paterns." ) primary_hdu = fits.PrimaryHDU(self.pixels_id, header=hdr) - wfs_hg_hdu = fits.ImageHDU(self.wfs_hg, name="HG Waveforms") - wfs_lg_hdu = fits.ImageHDU(self.wfs_lg, name="LG Waveforms") + wfs_hg_hdu = fits.ImageHDU(self.wfs_hg[start:end], name="HG Waveforms") + wfs_lg_hdu = fits.ImageHDU(self.wfs_lg[start:end], name="LG Waveforms") - col1 = fits.Column(array=self.event_id, name="event_id", format="1J") - col2 = fits.Column(array=self.event_type, name="event_type", format="1I") + col1 = fits.Column(array=self.event_id[start:end], name="event_id", format="1J") + col2 = fits.Column( + array=self.event_type[start:end], name="event_type", format="1I" + ) col3 = fits.Column( - array=self.ucts_timestamp, name="ucts_timestamp", format="1K" + array=self.ucts_timestamp[start:end], name="ucts_timestamp", format="1K" ) col4 = fits.Column( - array=self.ucts_busy_counter, name="ucts_busy_counter", format="1J" + array=self.ucts_busy_counter[start:end], + name="ucts_busy_counter", + format="1J", ) col5 = fits.Column( - array=self.ucts_event_counter, name="ucts_event_counter", format="1J" + array=self.ucts_event_counter[start:end], + name="ucts_event_counter", + format="1J", + ) + col6 = fits.Column( + array=self.multiplicity[start:end], name="multiplicity", format="1I" ) - col6 = fits.Column(array=self.multiplicity, name="multiplicity", format="1I") coldefs = fits.ColDefs([col1, col2, col3, col4, col5, col6]) event_properties = fits.BinTableHDU.from_columns( @@ -270,25 +294,26 @@ def write(self, path: str, **kwargs): ) col1 = fits.Column( - array=self.trig_pattern_all, + array=self.trig_pattern_all[start:end], name="trig_pattern_all", format=f"{4 * self.CAMERA.n_pixels}L", dim=f"({self.CAMERA.n_pixels},4)", ) col2 = fits.Column( - array=self.trig_pattern, + array=self.trig_pattern[start:end], name="trig_pattern", format=f"{self.CAMERA.n_pixels}L", ) coldefs = fits.ColDefs([col1, col2]) - trigger_pattern = fits.BinTableHDU.from_columns(coldefs, name="trigger pattern") + trigger_patern = fits.BinTableHDU.from_columns(coldefs, name="trigger patern") hdul = fits.HDUList( - [primary_hdu, wfs_hg_hdu, wfs_lg_hdu, event_properties, trigger_pattern] + [primary_hdu, wfs_hg_hdu, wfs_lg_hdu, event_properties, trigger_patern] ) try: hdul.writeto( - Path(path) / f"waveforms_run{self.run_number}{suffix}.fits", + Path(path) + / f"waveforms_run{self.run_number}{suffix}_bloc_{block}.fits", overwrite=kwargs.get("overwrite", False), ) log.info( @@ -302,11 +327,11 @@ def write(self, path: str, **kwargs): @staticmethod def load(path: str): - """load WaveformsContainer from FITS file previously written with - WaveformsContainer.write() method. - Note : 2 files are loaded, the FITS one representing the waveforms data and - an HDF5 file representing the subarray configuration. - This second file has to be next to the FITS file. + """load WaveformsContainer from FITS file previously + written with WaveformsContainer.write() method + Note : 2 files are loaded, the FITS one representing + the waveforms data and a HDF5 file representing the subarray + configuration. This second file has to be next to the FITS file. Args: path (str): path of the FITS file @@ -345,12 +370,13 @@ def load(path: str): return cls - def compute_trigger_pattern(self): - """(preliminary) function to compute the trigger pattern""" + def compute_trigger_patern(self): + """(preliminary) function to compute the trigger patern""" # mean.shape nevents * npixels mean, std = np.mean(self.wfs_hg, axis=2), np.std(self.wfs_hg, axis=2) self.trig_pattern = self.wfs_hg.max(axis=2) > (mean + 3 * std) + # methods used to display def display(self, evt, cmap="gnuplot2"): """plot camera display @@ -387,12 +413,13 @@ def plot_waveform_hg(self, evt, **kwargs): def select_waveforms_hg(self, pixel_id: np.ndarray): """method to extract waveforms HG from a list of pixel ids - The output is the waveforms HG with a shape following the size of the input - pixel_id argument. - Pixels in pixel_id which are not present in the WaveformsContainer pixels_id - are skipped. + The output is the waveforms HG with a shape following the + size of the input pixel_id argument + Pixel in pixel_id which are not present in the + WaveformsContaineur pixels_id are skipped Args: - pixel_id (np.ndarray): array of pixel ids you want to extract the waveforms + pixel_id (np.ndarray): array of pixel ids you want to + extract the waveforms Returns: (np.ndarray): waveforms array in the order of specified pixel_id """ @@ -401,8 +428,8 @@ def select_waveforms_hg(self, pixel_id: np.ndarray): ) for pixel in pixel_id[~mask_contain_pixels_id]: log.warning( - f"You asked for pixel_id {pixel} but it is not present in this " - f"WaveformsContainer, skip this one" + f"You asked for pixel_id {pixel} but it is not present " + f"in this WaveformsContainer, skip this one" ) res = np.array( [ @@ -415,13 +442,14 @@ def select_waveforms_hg(self, pixel_id: np.ndarray): def select_waveforms_lg(self, pixel_id: np.ndarray): """method to extract waveforms LG from a list of pixel ids - The output is the waveforms LG with a shape following the size of the input - pixel_id argument. - Pixels in pixel_id which are not present in the WaveformsContainer pixels_id - are skipped. + The output is the waveforms LG with a shape following the + size of the input pixel_id argument + Pixel in pixel_id which are not present in the + WaveformsContaineur pixels_id are skipped Args: - pixel_id (np.ndarray): array of pixel ids you want to extract the waveforms + pixel_id (np.ndarray): array of pixel ids you want to + extract the waveforms Returns: (np.ndarray): waveforms array in the order of specified pixel_id @@ -431,8 +459,8 @@ def select_waveforms_lg(self, pixel_id: np.ndarray): ) for pixel in pixel_id[~mask_contain_pixels_id]: log.warning( - f"You asked for pixel_id {pixel} but it is not present in this " - f"WaveformsContainer, skip this one" + f"You asked for pixel_id {pixel} but it is not present " + f"in this WaveformsContainer, skip this one" ) res = np.array( [ @@ -502,8 +530,8 @@ def trig_pattern(self): class WaveformsContainers: - """This class is to be used for computing waveforms of a run treating run files one - by one""" + """This class is to be used for computing waveforms of a + run treating run files one by one""" def __new__(cls, *args, **kwargs): """base class constructor @@ -543,17 +571,17 @@ def __init__( if not (max_events is None) and max_events <= 0: break - def load_wfs(self, compute_trigger_pattern=False, index: int = None): + def load_wfs(self, compute_trigger_patern=False, index: int = None): """load waveforms from the Eventsource reader Args: - compute_trigger_pattern (bool, optional): to compute manually the trigger - pattern. Defaults to False. - index (int, optional): to only load waveforms from EventSource for the - WaveformsContainer at index, this parameters can be used to load, - write or perform some work step by step. Thus, all the event are not - necessarily loaded at the same time. In such a case, memory usage can be - saved. Defaults to None. + compute_trigger_patern (bool, optional): to compute + manually the trigger patern. Defaults to False. + index (int, optional): to only load waveforms from + EventSource for the waveformsContainer at index, this parameters + can be used to load, write or perform some work step by step. + Thus all the event are not nessessary loaded at the same time + In such case memory can be saved. Defaults to None. Raises: IndexError: the index is > nWaveformsContainer or < 0 @@ -561,12 +589,12 @@ def load_wfs(self, compute_trigger_pattern=False, index: int = None): if index is None: for i in range(self.__nWaveformsContainer): self.waveformsContainer[i].load_wfs( - compute_trigger_pattern=compute_trigger_pattern + compute_trigger_patern=compute_trigger_patern ) else: if index < self.__nWaveformsContainer and index >= 0: self.waveformsContainer[index].load_wfs( - compute_trigger_pattern=compute_trigger_pattern + compute_trigger_patern=compute_trigger_patern ) else: raise IndexError( @@ -578,8 +606,8 @@ def write(self, path: str, index: int = None, **kwargs): Args: path (str): path where to save the waveforms - index (int, optional): index of the waveforms list you want to write. - Defaults to None. + index (int, optional): index of the waveforms list you want + to write. Defaults to None. Raises: IndexError: the index is > nWaveformsContainer or < 0 @@ -602,7 +630,8 @@ def load(path: str): """method to load the waveforms list from splited fits files Args: - path (str): path where data should be, it contain filename without extension + path (str): path where data should be, it contain filename + without extension Raises: e: File not found @@ -626,8 +655,8 @@ def load(path: str): @property def nWaveformsContainer(self): - """getter giving the number of waveformsContainer into the WaveformsContainers - instance + """getter giving the number of waveformsContainer into the + waveformsContainers instance Returns: int: the number of WaveformsContainer diff --git a/src/nectarchain/tests/test_stats.py b/src/nectarchain/tests/test_stats.py new file mode 100644 index 00000000..747f19df --- /dev/null +++ b/src/nectarchain/tests/test_stats.py @@ -0,0 +1,169 @@ +import pytest + +def test_stats_simple(): + import numpy as np + from nectarchain.utils.stats import Stats + + s = Stats() + s.add(1) + s.add(2) + s.add(3) + + np.testing.assert_allclose( s.mean, np.array([2.]) ) + np.testing.assert_allclose( s.std, np.array([1.]) ) + np.testing.assert_allclose( s.max, np.array([3.])) + np.testing.assert_allclose( s.min, np.array([1.])) + np.testing.assert_allclose( s.count, np.array([3])) + +def test_stats_camera(): + import numpy as np + from nectarchain.utils.stats import Stats + + s = Stats( shape = (5) ) + s.add( np.array([0,0,0,0,0]), validmask = np.array( [False,False,False,False,False] ) ) + s.add( np.array([0,1,2,3,4]), validmask = np.array( [True,True,True,True,True] ) ) + s.add( np.array([1,2,3,4,5]), validmask = np.array( [True,True,True,False,False] ) ) + s.add( np.array([2,3,4,5,6]), validmask = None ) + s.add( np.array([3,4,5,6,7]), validmask = np.array( [False,False,False,False,False] ) ) + + np.testing.assert_allclose( s.count, np.array([3,3,3,2,2]) ) + np.testing.assert_allclose( s.mean, np.array([1,2,3,4,5]) ) + np.testing.assert_allclose( s.variance, np.array([1,1,1,2,2]) ) + np.testing.assert_allclose( s.min, np.array([0,1,2,3,4]) ) + np.testing.assert_allclose( s.max, np.array([2,3,4,5,6]) ) + +def test_stats_copy(): + import numpy as np + from nectarchain.utils.stats import Stats, CameraStats, CameraSampleStats + + a = Stats() + assert id(a) != id(a.copy()) + + a = CameraStats() + assert id(a) != id(a.copy()) + + a = CameraSampleStats() + assert id(a) != id(a.copy()) + +def test_stats_lowcounts(): + + import numpy as np + from nectarchain.utils.stats import Stats + + s = Stats( shape = (5) ) + s.add( np.array([0,0,0,0,0]), validmask = np.array( [True,True,True,True,False] )) + s.add( np.array([1,1,1,1,1])) + s.add( np.array([2,2,2,2,2])) + + np.testing.assert_array_equal( s.get_lowcount_mask(3), np.array([False,False,False,False,True]) ) + + +def test_stats_merge(): + import numpy as np + from nectarchain.utils.stats import Stats + + s = Stats( shape = (5) ) + s.add( np.array([0,0,0,0,0]), validmask = np.array( [False,False,False,False,False] ) ) + s.add( np.array([0,1,2,3,4]), validmask = np.array( [True,True,True,True,True] ) ) + + s2 = Stats( shape = (5) ) + s2.add( np.array([1,2,3,4,5]), validmask = np.array( [True,True,True,False,False] ) ) + s2.add( np.array([2,3,4,5,6]), validmask = None ) + s2.add( np.array([3,4,5,6,7]), validmask = np.array( [False,False,False,False,False] ) ) + + s.merge(s2) + + np.testing.assert_allclose( s.count, np.array([3,3,3,2,2]) ) + np.testing.assert_allclose( s.mean, np.array([1,2,3,4,5]) ) + np.testing.assert_allclose( s.variance, np.array([1,1,1,2,2]) ) + np.testing.assert_allclose( s.min, np.array([0,1,2,3,4]) ) + np.testing.assert_allclose( s.max, np.array([2,3,4,5,6]) ) + + +def test_stats_merge2(): + import numpy as np + from nectarchain.utils.stats import Stats + + s = Stats( shape = (5) ) + s.add( np.array([0,0,0,0,0]), validmask = np.array( [False,False,False,False,False] ) ) + s.add( np.array([0,1,2,3,4]), validmask = np.array( [True,True,True,True,True] ) ) + + s2 = Stats( shape = (5) ) + s2.add( np.array([1,2,3,4,5]), validmask = np.array( [True,True,True,False,False] ) ) + s2.add( np.array([2,3,4,5,6]), validmask = None ) + s2.add( np.array([3,4,5,6,7]), validmask = np.array( [False,False,False,False,False] ) ) + + s += s2 + + np.testing.assert_allclose( s.count, np.array([3,3,3,2,2]) ) + np.testing.assert_allclose( s.mean, np.array([1,2,3,4,5]) ) + np.testing.assert_allclose( s.variance, np.array([1,1,1,2,2]) ) + np.testing.assert_allclose( s.min, np.array([0,1,2,3,4]) ) + np.testing.assert_allclose( s.max, np.array([2,3,4,5,6]) ) + + +def test_stats_merge3(): + import numpy as np + from nectarchain.utils.stats import Stats + + s1 = Stats( shape = (5) ) + s1.add( np.array([0,0,0,0,0]), validmask = np.array( [False,False,False,False,False] ) ) + s1.add( np.array([0,1,2,3,4]), validmask = np.array( [True,True,True,True,True] ) ) + + s2 = Stats( shape = (5) ) + s2.add( np.array([1,2,3,4,5]), validmask = np.array( [True,True,True,False,False] ) ) + s2.add( np.array([2,3,4,5,6]), validmask = None ) + s2.add( np.array([3,4,5,6,7]), validmask = np.array( [False,False,False,False,False] ) ) + + s = s1 + s2 + + assert id(s) != id(s1) + assert id(s) != id(s2) + + np.testing.assert_allclose( s.count, np.array([3,3,3,2,2]) ) + np.testing.assert_allclose( s.mean, np.array([1,2,3,4,5]) ) + np.testing.assert_allclose( s.variance, np.array([1,1,1,2,2]) ) + np.testing.assert_allclose( s.min, np.array([0,1,2,3,4]) ) + np.testing.assert_allclose( s.max, np.array([2,3,4,5,6]) ) + +def test_stats_shape(): + import numpy as np + from nectarchain.utils.stats import Stats, CameraStats, CameraSampleStats + from ctapipe_io_nectarcam import constants as nc + + s = Stats() + assert s.shape == (1,) + + s = CameraStats() + assert s.shape == (nc.N_GAINS,nc.N_PIXELS) + + s = CameraSampleStats() + assert s.shape == (nc.N_GAINS,nc.N_PIXELS,nc.N_SAMPLES) + + +def test_stats_print(): + import numpy as np + from nectarchain.utils.stats import Stats, CameraStats, CameraSampleStats + + s = Stats() + s.add(1) + s.add(2) + s.add(3) + + + assert s.__str__() == 'mean: [2.]\nstd: [1.]\nmin: [1.]\nmax: [3.]\ncount: [3]\nshape: (1,)' + assert s.__repr__() == s.__str__() + +def test_stats_badmerge(): + import numpy as np + from nectarchain.utils.stats import Stats + + s = Stats() + s.add(1) + + s2 = Stats(5) + s2.add([1,2,3,4,5]) + + + with pytest.raises(ValueError, match="Trying to merge from a different shape this:.*"): + s.merge(s2) diff --git a/src/nectarchain/user_scripts/sona-patel/split_wfs_charge_spe_wt.py b/src/nectarchain/user_scripts/sona-patel/split_wfs_charge_spe_wt.py new file mode 100644 index 00000000..bf9c1e00 --- /dev/null +++ b/src/nectarchain/user_scripts/sona-patel/split_wfs_charge_spe_wt.py @@ -0,0 +1,225 @@ +import argparse +import glob +import json +import logging as log + +import ctapipe +import matplotlib.patches as patches +import numpy as np + +# from astropy import time as astropytime +from ctapipe.containers import EventType +from ctapipe.coordinates import EngineeringCameraFrame +from ctapipe.instrument import CameraGeometry + +# from ctapipe.io import EventSeeker, EventSource +# from ctapipe.visualization import CameraDisplay +from ctapipe_io_nectarcam import NectarCAMEventSource, constants + +# from matplotlib.path import Path +from traitlets.config import Config + +from nectarchain.calibration.container import ChargeContainer, WaveformsContainer + +ctapipe.__version__ + +parser = argparse.ArgumentParser( + prog="split_wfs_charge_spe_wt", + description="This program load waveforms from fits.fz run files and " + "compute the charge for a given start and end event_id ", +) + +parser.add_argument( + "--spe_run_number", + nargs="+", + help="SPE run number", + type=int, +) + +parser.add_argument( + "--nBlock", nargs="+", help="No. of block to be analysed", default="1", type=int +) + +parser.add_argument( + "--path", + nargs="+", + help="The path of NectarCAM raw data files", + default="./", + type=str, +) + +parser.add_argument( + "--extractorMethod", + choices=[ + "FullWaveformSum", + "FixedWindowSum", + "GlobalPeakWindowSum", + "LocalPeakWindowSum", + "SlidingWindowMaxSum", + "TwoPassWindowSum", + ], + default="LocalPeakWindowSum", + help="charge extractor method", + type=str, +) + +parser.add_argument( + "--extractor_kwargs", + default={"window_width": 16, "window_shift": 4}, + help="charge extractor kwargs", + type=json.loads, +) + +args = vars(parser.parse_args()) + + +def get_event_id_list(): + """ + Function to get the start event_id of each SPE-WT scan position + + :return: The list of start event_id + """ + + # npix = constants.N_PIXELS + + abspath = args["path"] + log.info(f"{abspath[0]}/NectarCAM.*.fits.fz") + fits_fz_list = glob.glob(f"{abspath[0]}/NectarCAM.*.fits.fz") + fits_fz_list.sort() + log.info(fits_fz_list) + + config = Config( + dict( + NectarCAMEventSource=dict( + NectarCAMR0Corrections=dict( + calibration_path=None, + apply_flatfield=False, + select_gain=False, + ) + ) + ) + ) + + reader = NectarCAMEventSource( + input_url=f"{abspath[0]}/NectarCAM.*.fits.fz", config=config + ) + + prev_ucts_timestamp = 0 + + ucts_timestamp_all_events = [] + delta_t_all = [] + events_id_list = [] + + n_events = 0 + + for j, event in enumerate(reader): # loop over reader + n_events = n_events + 1 + + if ( + event.trigger.event_type == EventType.SINGLE_PE + ): # For not mis-identifying the next block + if len(events_id_list) == 0: + events_id_list.append(event.nectarcam.tel[0].evt.event_id) + + ucts_timestamp = event.nectarcam.tel[0].evt.ucts_timestamp / 1e9 + if j == 0: + prev_ucts_timestamp = ucts_timestamp + + delta_t = ucts_timestamp - prev_ucts_timestamp + + ucts_timestamp_all_events.append(ucts_timestamp) + delta_t_all.append(delta_t) + + # waveform = event.r0.tel[0].waveform[0] + # print(np.shape(waveform)) + + if delta_t > 0.5: + print( + "New postion at Event number:", + j, + "id", + event.nectarcam.tel[0].evt.event_id, + ) + events_id_list.append(event.nectarcam.tel[0].evt.event_id) + + prev_ucts_timestamp = ucts_timestamp + + print("#Events:", n_events) + print("Events id list:", events_id_list) + + return events_id_list + + +def pixels_under_wt(x, y): + """ + This function finds the pixels geometrically under the white-target + + :param x: x-coordinate of white-target position + :param y: y-coordinate of white-target position + :return: The list of pixels + """ + coords = 0.001 * np.array( + [ + [17, -214], + [-93, -214], + [-197, -110], + [-197, 110], + [-93, 214], + [17, 214], + [197, 110], + [197, -110], + ] + ) + coords[:, 0] += x + coords[:, 1] += y + + wt = patches.Polygon(xy=coords) + # ax = plt.gca() + # ax.add_patch(Polygon(xy=coords, alpha=1, ec='k', facecolor='none')) + + geom = CameraGeometry.from_name("NectarCam", version=3).transform_to( + EngineeringCameraFrame() + ) + geomdata = geom.to_table() + + pixels_under_wt = [] + for i in range(constants.N_PIXELS): + if wt.contains_point([geomdata["pix_x"][i], geomdata["pix_y"][i]]): + pixels_under_wt.append(i) + # plt.plot(geomdata["pix_x"][i], geomdata["pix_y"][i], "b.") + + # plt.show() + + return pixels_under_wt + + +if __name__ == "__main__": + """ + centroids_file = (' + /home/patel/Sonal/NectarCAM/SPE_ana/spe_scan_centroids_20220209b.dat + ') + + centroids = pd.read_csv( + centroids_file, sep=None, names=['x', 'y'], + index_col=False, skiprows=1 + ) + + for i in range(40): + # print("Bloc:", i, centroids["x"][i], centroids["y"][i]) + pixels_under_wt(centroids["x"][i], centroids["y"][i]) + """ + + events_id_list = get_event_id_list() + + for i in range(args["nBlock"][0]): + block = i + start, end = events_id_list[i], events_id_list[i + 1] + + wfs = WaveformsContainer(args["spe_run_number"][0]) + wfs.load_wfs(start=start, end=end) + wfs.write(args["path"][0], start, end, block, overwrite=True) + + charge = ChargeContainer.from_waveforms(wfs, method=args["extractorMethod"]) + charge.write(args["path"][0], start, end, block, overwrite=True) + + del wfs diff --git a/src/nectarchain/user_scripts/vmarandon/CalibrationData.py b/src/nectarchain/user_scripts/vmarandon/CalibrationData.py index a146ee5b..f574f8cf 100644 --- a/src/nectarchain/user_scripts/vmarandon/CalibrationData.py +++ b/src/nectarchain/user_scripts/vmarandon/CalibrationData.py @@ -13,12 +13,14 @@ class CalibrationCameraDisplay(CameraDisplay): def __init__(self,*args, **kwargs): super().__init__(*args,**kwargs) + self.clickfunc = None def set_function(self,func_name): self.clickfunc = func_name def on_pixel_clicked(self, pix_id): - self.clickfunc(pix_id) + if self.clickfunc is not None: + self.clickfunc(pix_id) class CalibInfo: @@ -156,8 +158,25 @@ def ShowPedestal(self): plt.show() - - +class XYTableDataElement(TimedInfo): + '''Class to store waveforms for each position of the XY tables''' + def __init__(self,startTime=None, endTime=None,bloc=None): + super().__init__(startTime,endTime) + self.waveforms = None + self.masks = None + self.averaged_waveform = None + self.bloc_number = bloc + +class XYTableDataIntegratedElement(TimedInfo): + '''Class to store integrated waveforms for each position of the XY tables''' + def __init__(self,startTime=None,endTime=None,bloc=None): + super().__init__(startTime,endTime) + self.pixels = None + self.times = None + self.pedestals = None + self.pedwidths = None + self.integrated = None + self.bloc_number = bloc class FlatFieldInfo(TimedInfo): diff --git a/src/nectarchain/user_scripts/vmarandon/DBHandler.py b/src/nectarchain/user_scripts/vmarandon/DBHandler.py index a468d03d..523daddb 100644 --- a/src/nectarchain/user_scripts/vmarandon/DBHandler.py +++ b/src/nectarchain/user_scripts/vmarandon/DBHandler.py @@ -96,14 +96,14 @@ def __get_selection_condition__(self,table): #df1b = pd.read_sql(f"SELECT * FROM 'monitoring_drawer_temperatures' WHERE time>datetime({int(test_time.timestamp())}, 'unixepoch')", con=sqlite3.connect(db_url)) time_cond = "" if self.time_start is not None: - if isinstance( self.time_start, datetime ): + if isinstance( self.time_start, datetime.datetime ): time_cond = f" WHERE time >= datetime({self.time_start.timestamp()}, 'unixepoch') " else: print(f"WARNING> {self.time_start} of type {type(self.time_start)} is of a non handled type ==> Won't be used (please correct code)") if self.time_end is not None: - if isinstance( self.time_end, datetime ): + if isinstance( self.time_end, datetime.datetime ): link_word = " WHERE " if not time_cond else " AND " time_cond = f" {link_word} time <= datetime({self.time_end.timestamp()}, 'unixepoch') " else: diff --git a/src/nectarchain/user_scripts/vmarandon/DataUtils.py b/src/nectarchain/user_scripts/vmarandon/DataUtils.py index 3e396956..a91a0ec1 100644 --- a/src/nectarchain/user_scripts/vmarandon/DataUtils.py +++ b/src/nectarchain/user_scripts/vmarandon/DataUtils.py @@ -108,7 +108,8 @@ def GetLongRunTimeEdges(run,path=None,event_type=None,delta_t_second=10.): #print(nEvents) data = DataReader(run,path=path) - data.Connect("trigger") + if not data.Connect("trigger"): + data = GetNectarCamEvents(run=run,path=path,applycalib=False) times_edges = list() @@ -119,26 +120,27 @@ def GetLongRunTimeEdges(run,path=None,event_type=None,delta_t_second=10.): # if there is a time gap of more than delta_t seconds, then we consider that this is the end of a data block delta_t = TimeDelta(delta_t_second,format="sec") - - for evt in tqdm(data,total=nEvents): - current_time = data.trigger.time - - if time_start is None: - time_start = current_time - - if evt.trigger.event_type == EventType.SKY_PEDESTAL: - - if previous_time is None: - previous_time = current_time + try: + for evt in tqdm(data,total=nEvents): + current_time = evt.trigger.time - if current_time - previous_time > delta_t: - #print(f"time: {time} previous time: {previous_time} delta: {(time - previous_time).to_value('s')}") - #if (previous_time - time) > delta_t: - times_edges.append( (time_start,previous_time) ) + if time_start is None: time_start = current_time + + if evt.trigger.event_type == event_type: - previous_time = current_time - + if previous_time is None: + previous_time = current_time + + if current_time - previous_time > delta_t: + #print(f"time: {time} previous time: {previous_time} delta: {(time - previous_time).to_value('s')}") + #if (previous_time - time) > delta_t: + times_edges.append( (time_start,previous_time) ) + time_start = current_time + + previous_time = current_time + except Exception as err: + print(f"Error while reading file: [{err}]") times_edges.append( (time_start,current_time) ) # write the last time #print(f"There is : {len(times_edges)} intervals") diff --git a/src/nectarchain/user_scripts/vmarandon/ExtractInformation2.py b/src/nectarchain/user_scripts/vmarandon/ExtractInformation2.py index 7cf3b18d..f00f6917 100644 --- a/src/nectarchain/user_scripts/vmarandon/ExtractInformation2.py +++ b/src/nectarchain/user_scripts/vmarandon/ExtractInformation2.py @@ -16,6 +16,7 @@ from ctapipe.containers import EventType from multiprocessing.dummy import Pool as ThreadPool + #from multiprocessing import Pool as ThreadPool @@ -33,7 +34,7 @@ # data_path = FindDataPath(run,args.dataPath) -def ExtractInformationSingleRun(run,data_path,dest_path,data_block,applycalib=True,keepR1=True,nnint=False): +def ExtractInformationSingleRun(run,data_path,dest_path,data_block,applycalib=True,keepR1=True,nnint=False,onlytrigger=False): sleep_time = random.uniform(0.,1.) #print(sleep_time) @@ -68,6 +69,9 @@ def ExtractInformationSingleRun(run,data_path,dest_path,data_block,applycalib=Tr doIntegration = nnint + if onlytrigger: + doIntegration = False + for evt in tqdm(events): #if data_block != 42 and data_block!=8: # break @@ -113,7 +117,10 @@ def ExtractInformationSingleRun(run,data_path,dest_path,data_block,applycalib=Tr for k in evt.keys(): if k not in exclusion: - dd[k].dump( copy.deepcopy(getattr(evt,k)), time=event_time ) + if onlytrigger and k!="trigger": + continue + else: + dd[k].dump( copy.deepcopy(getattr(evt,k)), time=event_time ) def TrueOrFalse(arg): ua = str(arg).upper() @@ -140,6 +147,7 @@ def ExtractInformation(arglist): p.add_argument("--keep-r1",dest='keepR1',type=str,default="True",help="Save the R1 data if True") p.add_argument("--split",dest='split',action='store_true',help='Split the files per groups. 0-1 together in a file, 2-3 in another, etc... Need the ctapipe_io_nectarcam version compatible with ctapipe 0.18') p.add_argument("--nnint",dest='nnint',action='store_true',help='Do an integration of the data using Next Neighbor Peak Search. At the moment hard coded to be 10 ns -4 and +6 ns after the max. Will create charge and TO data set') + p.add_argument("--only-trigger",dest='onlytrig',action='store_true',help='Extract only the trigger information to a file. Useful for big runs') args = p.parse_args(arglist) @@ -164,7 +172,7 @@ def ExtractInformation(arglist): dest_path = args.destPath - if args.split: + if False and args.split: runs = list() paths = list() @@ -173,6 +181,7 @@ def ExtractInformation(arglist): calib = list() keepR1 = list() nnints = list() + trigonly = list() for block in range(GetNumberOfDataBlocks(run,data_path)): #for block in range(8): @@ -184,13 +193,14 @@ def ExtractInformation(arglist): calib.append(applyCalib) keepR1.append( keepR1) nnints.append(args.nnint) + trigonly.append(args.onlytrig) # Make the Pool of workers - pool = ThreadPool(1) + pool = ThreadPool(4) # Open the URLs in their own threads # and return the results - results = pool.starmap(ExtractInformationSingleRun, zip(runs,paths,dest_paths,blocks,calib,keepR1,nnints) ) + results = pool.starmap(ExtractInformationSingleRun, zip(runs,paths,dest_paths,blocks,calib,keepR1,nnints,trigonly) ) # Close the pool and wait for the work to finish pool.close() @@ -198,7 +208,14 @@ def ExtractInformation(arglist): #ExtractInformationSingleRun(run,args.dataPath,args.destPath,data_block=block) else: - ExtractInformationSingleRun(run=run,data_path=data_path,dest_path=dest_path,data_block=-1,applycalib=applyCalib,keepR1=keepR1,nnint=args.nnint) + if args.split: + nBlocks = GetNumberOfDataBlocks(run,data_path) + for block in range(nBlocks): + print(f'block: {block+1}/{nBlocks}') + ExtractInformationSingleRun(run=run,data_path=data_path,dest_path=dest_path,data_block=block,applycalib=applyCalib,keepR1=keepR1,nnint=args.nnint,onlytrigger=args.onlytrig) + else: + ExtractInformationSingleRun(run=run,data_path=data_path,dest_path=dest_path,data_block=-1,applycalib=applyCalib,keepR1=keepR1,nnint=args.nnint,onlytrigger=args.onlytrig) + #def ExtractInformationSingleRun(run,data_path,dest_path,data_block,applycalib=True,keepR1=True): diff --git a/src/nectarchain/user_scripts/vmarandon/FitSPEFFMultiProcess.py b/src/nectarchain/user_scripts/vmarandon/FitSPEFFMultiProcess.py new file mode 100644 index 00000000..a52f3ad3 --- /dev/null +++ b/src/nectarchain/user_scripts/vmarandon/FitSPEFFMultiProcess.py @@ -0,0 +1,410 @@ +try: + import sys + import numpy as np + import numpy.ma as ma + from scipy.stats import poisson, norm + from DataUtils import GetLongRunTimeEdges, CountEventTypes, GetTotalEventCounts + from Utils import GetCamera, SignalIntegration, IntegrationMethod, ConvertTitleToName, CustomFormatter, GetDefaultDataPath + from ctapipe.containers import EventType + from FileHandler import DataReader, GetNectarCamEvents + from Stats import CameraSampleStats, CameraStats + from tqdm import tqdm + from FitUtils import JPT2FitFunction + from iminuit import Minuit + import time + import copy + from matplotlib import pyplot as plt + from ctapipe.visualization import CameraDisplay + #from multiprocessing import Pool + import pickle + #from multiprocessing.dummy import Pool as ThreadPool + from multiprocessing import Pool + import argparse + + from IPython import embed +except ImportError as e: + print(e) + raise SystemExit + + +# %matplotlib tk +#ma.array(t0max[0],mask=css.get_lowcount_mask()[0,:,0]) +class FitResult(): + def __init__(self,res,params): + self.minuit_result = res + self.fit_parameters = params + +def split_array(a, n): + k, m = divmod(len(a), n) + return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)) + +def save_data(datas, fileName): + """Save info in a file using pickle""" + with open(fileName,'wb') as file: + pickle.dump(datas,file) + +def FitPart(pixels,charges,sigpeds,peds): + results = list() + for p,c,s,ped in tqdm(zip(pixels,charges,sigpeds,peds)): + res = FitPixel(c,s,ped) + if res is not None: + results.append( (p,) + res ) + return results + + +def FitPixel(hicharges,sigped=None,ped=None): + start = time.time() + pix_datas = hicharges + bins = np.linspace( np.min(pix_datas)-0.5,np.max(pix_datas)+0.5,int(np.max(pix_datas)-np.min(pix_datas)+2)) + vals,x_edges = np.histogram(pix_datas,bins=bins) + + if len(bins)<=100: + #print(f"Pixel: {pix} --> Too few bins... bad pixel ? --> SKIP !") + return #continue + + jpt2 = JPT2FitFunction(x_edges,vals) + amplitude = float(np.sum(vals)) + if ped is None: + hiPed = x_edges[ np.argmax(vals) ] + 0.5 + else: + hiPed = ped + + if sigped is None: + sigmaHiPed = 14. + else: + sigmaHiPed = sigped + + ns = 1. + meanillu = 1. + gain = 65. + sigmaGain = 0.45*gain + start_parameters = [amplitude,hiPed,sigmaHiPed,ns,meanillu,gain,sigmaGain] + + m = Minuit( jpt2.Minus2LogLikelihood, start_parameters, name=("Norm","Ped","SigmaPed","Ns","Illu","Gain","SigmaGain") ) + + m.errors["Norm"] = 0.1*amplitude + m.errors["Ped"] = 1.* sigmaHiPed + m.errors["SigmaPed"] = 0.2*sigmaHiPed + m.errors["Ns"] = 0.1 + m.errors["Illu"] = 0.3 + m.errors["Gain"] = 0.3*gain + m.errors["SigmaGain"] = 0.3*sigmaGain + + m.limits['Norm'] = (0.,None) + m.limits['Illu'] = (0.,8.) + m.limits['Gain'] = (10.,200.) + m.limits['SigmaGain'] = (10.,None) + m.limits['Ns'] = (0.,None) + m.limits['SigmaPed'] = (0.,None) + + try: + min_result = m.migrad() + fit_params = m.params + + end = time.time() + fitTime = end-start + #pixelFitTime[pix] = end-start + #pixelFitResult[pix] = FitResult( copy.deepcopy(min_result), copy.deepcopy(fit_params) ) + return FitResult( copy.deepcopy(min_result), copy.deepcopy(fit_params) ), fitTime + + except ValueError as err: + #print(f"Pixel: {pix} Error: {err}") + pass + + +def DoSPEFFFit(arglist): + + p = argparse.ArgumentParser(description='Perform SPE Fit for FF data', + epilog='examples:\n' + '\t python %(prog)s --run 3750 \n', + formatter_class=CustomFormatter) + + p.add_argument("--run", dest='run', type=int, help="Run number") + p.add_argument("--data-path",dest='dataPath',type=str,default=GetDefaultDataPath(),help="Path to the rawdata directory. The program will recursively search in all directory for matching rawdata") + p.add_argument("--njobs", dest='njobs', type=int, default=1, help="Number of CPU to use") + + args = p.parse_args(arglist) + + if args.run is None: + p.print_help() + return -1 + + run = args.run + path = args.dataPath + + # path = '/Users/vm273425/Programs/NectarCAM/data/' + + data = DataReader(run,path) + ok = data.Connect("trigger","r0","mon") + if not ok: + data = GetNectarCamEvents(run,path,applycalib=False) + + nEvents = GetTotalEventCounts(run,path) + + css = CameraSampleStats() + + + counter = 0 + nUsedEntries = 10000000 + for evt in tqdm(data,total=min(nEvents,nUsedEntries)): + css.add( evt.r0.tel[0].waveform, validmask = ~evt.mon.tel[0].pixel_status.hardware_failing_pixels) + counter += 1 + if counter>=nUsedEntries: + break + + mean_waveform = css.mean + + fig1 = plt.figure() + pixs = [777,747,320,380,123,1727,427,74] + for p in pixs: + plt.plot( mean_waveform[0,p,:], label=f'{p}') + plt.grid() + plt.legend() + + + + fig2 = plt.figure() + #pixs = [777,747,320,380,123,1727,427,74] + pixs = [923,483,1573,1751,1491,482,720] + for p in pixs: + plt.plot( mean_waveform[0,p,:], label=f'{p}') + plt.grid() + plt.legend() + + + t0max = np.argmax(mean_waveform,axis=2) + + fig3 = plt.figure() + cam_tom_hg = CameraDisplay(geometry=GetCamera(),image=ma.array(t0max[0],mask=css.get_lowcount_mask()[0,:,0]),cmap='coolwarm',title='High-Gain Mean ToM',allow_pick=True) + cam_tom_hg.highlight_pixels(range(1855),color='grey') + cam_tom_hg.add_colorbar() + #cam_tom_hg.show() + + + fig4 = plt.figure() + histdata = t0max[0] + bins = np.linspace(-0.05,50.05,502) + good_histdata = histdata[ histdata>0. ] + _ = plt.hist(good_histdata,bins=bins) + plt.xlim(np.min(good_histdata)-1,np.max(good_histdata)+1) + plt.title("ToM Average High Gain Waveform") + plt.grid() + + + data = DataReader(run,path) + ok = data.Connect("trigger","r0","mon") + if not ok: + data = GetNectarCamEvents(run,path,applycalib=False) + + nEvents = GetTotalEventCounts(run,path) + + camera = GetCamera() + + + ped_stats = CameraStats() + hicharges = list() + counter = 0 + + #data.rewind() + + counter = 0 + for evt in tqdm(data,total=nEvents): + charges, times = SignalIntegration( evt.r0.tel[0].waveform, exclusion_mask=evt.mon.tel[0].pixel_status.hardware_failing_pixels, peakpositions=t0max,method=IntegrationMethod.USERPEAK,left_bound=5,right_bound=11,camera=camera) + + ped = np.sum( evt.r0.tel[0].waveform[:,:,:16], axis=2 ) + ped_stats.add( ped, validmask=~evt.mon.tel[0].pixel_status.hardware_failing_pixels ) + + hicharges.append( charges[0] ) + counter += 1 +# if counter >=1000: +# break + + hicharges = np.array(hicharges) + + + + fig5 = plt.figure() + cam_pedw_hg = CameraDisplay(geometry=GetCamera(),image=ma.array(ped_stats.stddev[0],mask=ped_stats.get_lowcount_mask()[0]),cmap='coolwarm',title='High-Gain Mean Ped Width',allow_pick=True) + cam_pedw_hg.highlight_pixels(range(1855),color='grey') + cam_pedw_hg.add_colorbar() + #cam_pedw_hg.show() + + + + epedwidth = ped_stats.stddev[0] + eped = ped_stats.mean[0] + badpedwidth = ped_stats.get_lowcount_mask()[0] + + pixelFitResult = dict() + pixelFitTime = dict() + + use_multiprocess = args.njobs>1 + simple_multiprocess = False + # HiCharges = hicharges + # BadPedWidth = badpedwidth + # EPedWidth = epedwidth + + start_fit = time.time() + print(f"Starting the fit at : {start_fit}") + if use_multiprocess and simple_multiprocess: + print("Using the fit by pixel method") + pixlist = [pix for pix in range(1855)] + + with Pool(args.njobs) as p: + datas = list() + sigpeds = list() + peds = list() + for pix in pixlist: + datas.append( hicharges[:,pix] ) + sigpeds.append( 14. if badpedwidth[pix] else epedwidth[pix] ) + peds.append( None if badpedwidth[pix] else eped[pix] ) + + print(len(datas)) + results = p.starmap(FitPixel,zip(datas,sigpeds,peds)) + + p.close() + p.join() + print(len(results)) + + for pix in pixlist: + res = results[pix] + if res is not None: + pixelFitResult[pix] = res[0] + pixelFitTime[pix] = res[1] + elif use_multiprocess and not simple_multiprocess: + print("Using the fit by part method") + nPart = args.njobs + + npix2use = 1855 + part_pixels = list( split_array( [pix for pix in range(npix2use)], nPart ) ) + + part_charges = list( split_array( [hicharges[:,pix] for pix in range(npix2use) ], nPart ) ) + part_sigpeds = list( split_array( [ 14. if badpedwidth[pix] else epedwidth[pix] for pix in range(npix2use)], nPart ) ) + part_peds = list( split_array( [ None if badpedwidth[pix] else eped[pix] for pix in range(npix2use)], nPart ) ) + + #embed() + #for part in part_pixels: + # pcharges + # for pix in part: + with Pool(nPart) as p: + results = p.starmap(FitPart,zip(part_pixels,part_charges,part_sigpeds,part_peds)) + for rs in results: + for res in rs: + if res is not None: + cur_pixel = res[0] + pixelFitResult[cur_pixel] = res[1] + pixelFitTime[cur_pixel] = res[2] + + + + + else: + print("Using the standard approach") + for pix in tqdm(range(1855)): + sigped = 14. if badpedwidth[pix] else epedwidth[pix] + ped = None if badpedwidth[pix] else eped[pix] + res = FitPixel(hicharges[:,pix],sigped,ped ) + if res is not None: + pixelFitResult[pix] = res[0] + pixelFitTime[pix] = res[1] + #if pix>=100 : + # break + + end_fit = time.time() + + print(f"Finishing the fit at : {end_fit}") + print(f"Time to do it : {end_fit-start_fit} s") + + + print(len(pixelFitResult)) + + gains = list() + for p, f in pixelFitResult.items(): + fit_gain = f.fit_parameters['Gain'].value + gains.append(fit_gain) + + + cam_gain_values = np.zeros(1855) + for p, f in pixelFitResult.items(): + fit_gain = f.fit_parameters['Gain'].value + cam_gain_values[p] = fit_gain + + + fig6 = plt.figure(figsize=(8,8)) + cam_gain = CameraDisplay(geometry=GetCamera(),image=ma.array(cam_gain_values,mask=cam_gain_values<40.),cmap='turbo',title=f'High-Gain Gain (VIM Edition)\nRun: {run}',show_frame=False) + cam_gain.highlight_pixels(range(1855),color='grey') + cam_gain.add_colorbar() + cam_gain.show() + #fig.savefig(f'run_{run}_gain_camera.png') + fig6.savefig(f'run_{run}_FittedGain_VIMEdition.png') + + fig7 = plt.figure(figsize=(8,8)) + mean_gain = np.mean(gains) + std_gain = np.std(gains) + _ = plt.hist(gains,bins=60,label=f'$\mu:$ {mean_gain:.2f}\n$\sigma:$ {std_gain:.2f}') + plt.title(f"Fitted Gain Distribution (Run: {run})") + plt.xlabel('Gain (ADC per pe)') + plt.grid() + plt.legend() + #fig.savefig(f'run_{run}_gain_distribution.png') + fig7.savefig(f'run_{run}_FittedGainDistribution_VIMEdition.png') + + cam_illu_values = np.zeros(1855) + for p, f in pixelFitResult.items(): + fit_illu = f.fit_parameters['Illu'].value + cam_illu_values[p] = fit_illu + + fig8 = plt.figure(figsize=(8,8)) + cam_illu = CameraDisplay(geometry=GetCamera(),image=ma.array(cam_illu_values,mask=cam_gain_values<40.),cmap='turbo',title=f'Mean Illumination (VIM Edition)\nRun: {run}',show_frame=False) + cam_illu.highlight_pixels(range(1855),color='grey') + cam_illu.add_colorbar() + cam_illu.show() + fig8.savefig(f'run_{run}_FittedIllumination_VIMEdition.png') + + cam_ns_values = np.zeros(1855) + for p, f in pixelFitResult.items(): + fit_ns = f.fit_parameters['Ns'].value + cam_ns_values[p] = fit_ns + + fig9 = plt.figure(figsize=(8,8)) + cam_ns = CameraDisplay(geometry=GetCamera(),image=ma.array(cam_ns_values,mask=cam_gain_values<40.),cmap='turbo',title=f'Ns Parameter (VIM Edition)\nRun: {run}',show_frame=False) + cam_ns.highlight_pixels(range(1855),color='grey') + cam_ns.add_colorbar() + cam_ns.show() + + cam_pedw_values = np.zeros(1855) + for p, f in pixelFitResult.items(): + fit_pedw = f.fit_parameters['SigmaPed'].value + cam_pedw_values[p] = fit_pedw + + fig9 = plt.figure(figsize=(8,8)) + cam_pedw = CameraDisplay(geometry=GetCamera(),image=ma.array(cam_pedw_values,mask=cam_gain_values<40.),cmap='turbo',title=f'Pedestal Width (VIM Edition)\nRun: {run}',show_frame=False) + cam_pedw.highlight_pixels(range(1855),color='grey') + cam_pedw.add_colorbar() + cam_pedw.show() + fig9.savefig(f'run_{run}_FittedPedestalWidth_VIMEdition.png') + + + ## Save results : + outdataname = f'run_{run}_FittedSPEResult.dat' + + with open(outdataname,'w') as outfile: + outfile.write(f"pix Ped SigmaPed Ns Illu Gain SigmaGain\n") + for p, f in pixelFitResult.items(): + if f.minuit_result.valid: + outfile.write(f"{p} {f.fit_parameters['Ped'].value} {f.fit_parameters['SigmaPed'].value} {f.fit_parameters['Ns'].value} {f.fit_parameters['Illu'].value} {f.fit_parameters['Gain'].value} {f.fit_parameters['SigmaGain'].value}\n") + + # m.errors["Norm"] = 0.1*amplitude + # m.errors["Ped"] = 1.* sigmaHiPed + # m.errors["SigmaPed"] = 0.2*sigmaHiPed + # m.errors["Ns"] = 0.1 + # m.errors["Illu"] = 0.3 + # m.errors["Gain"] = 0.3*gain + # m.errors["SigmaGain"] = 0.3*sigmaGain + + plt.show() + + +if __name__ == '__main__': + DoSPEFFFit(sys.argv[1:]) + diff --git a/src/nectarchain/user_scripts/vmarandon/FitUtils.py b/src/nectarchain/user_scripts/vmarandon/FitUtils.py new file mode 100644 index 00000000..199203d6 --- /dev/null +++ b/src/nectarchain/user_scripts/vmarandon/FitUtils.py @@ -0,0 +1,95 @@ +try: + import sys + import numpy as np + from scipy.stats import poisson, norm + + from IPython import embed + +except ImportError as e: + print(e) + raise SystemExit + + +class GaussHistoFitFunction(): + def __init__(self,bin_edges,bin_contents,integrate=True): # Assume that the bin centers are "integers" and that they are spaced at 1.... refinement will be for later + self.xcentre = 0.5 * (bin_edges[1:] + bin_edges[:-1]) + self.xedge = bin_edges + self.y = bin_contents + self.use_integration = integrate + + def ExpectedValues(self,normalisation,mu,sigma): + y = normalisation*np.exp(-0.5*np.power((self.xcentre-mu)/sigma,2.))/(np.sqrt(2.*np.pi)*sigma) + return y + + + def IntegratedExpectedValues(self,normalisation,mu,sigma): + yup = norm.cdf(self.xedge[1:],loc=mu,scale=sigma) + ylow = norm.cdf(self.xedge[:-1],loc=mu,scale=sigma) + return normalisation*(yup-ylow) + + + def Minus2LogLikelihood(self,parameters): + normalisation = parameters[0] + mu = parameters[1] + sigma = parameters[2] + if self.use_integration: + y_fit = self.IntegratedExpectedValues(normalisation,mu,sigma) + else: + y_fit = self.ExpectedValues(normalisation,mu,sigma) + #print(y_fit) + log_likes = poisson.logpmf(self.y,y_fit) + total = np.sum(log_likes) + return -2.*total + + +class JPT2FitFunction(): + def __init__(self,bin_edges,bin_contents): + self.xcentre = 0.5 * (bin_edges[1:] + bin_edges[:-1]) + self.xedge = bin_edges + self.y = bin_contents.astype(float) + + def Prediction(self,parameters): + amplitude = parameters[0] + hiPed = parameters[1] + sigmaHiPed = parameters[2] + ns = parameters[3] + meanillu = parameters[4] + gain = parameters[5] + sigmaGain = parameters[6] + + ## Compute the maximum illumination to consider + nMuUsed = int(5.*np.sqrt(meanillu+1)) + 2 + + ## Precompute some parameters that will be needed for the full iteration + imu = np.arange(nMuUsed) + sig2 = imu * (sigmaGain * sigmaGain) + sigmaHiPed*sigmaHiPed + pos_x = imu * gain + hiPed + poiG = poisson.pmf(imu,meanillu) / np.sqrt(2.*np.pi*sig2) + poiG[1:] *= ns + + sig2x2 = 2.*sig2 + ## Loop on pixels: + + + predicted_entries = np.zeros_like( self.y ) + + #embed() + for i in range(self.xcentre.shape[0]): + predicted_entries[i] = np.sum( poiG * np.exp( -np.power( self.xcentre[i] - pos_x,2.)/sig2x2 ) ) + + predicted_entries *= amplitude + return predicted_entries + + + def Minus2LogLikelihood(self,parameters): + #print(f'Minuit2 Called {parameters}') + y_pred = self.Prediction(parameters) + + LogLikeBins = poisson.logpmf(self.y,y_pred) + LogLike = np.sum( LogLikeBins ) + #if np.isnan(LogLike): + # print("--> Nan") + Minus2LogLike = -2.*LogLike + return Minus2LogLike + + diff --git a/src/nectarchain/user_scripts/vmarandon/ShowHardwareProblem.py b/src/nectarchain/user_scripts/vmarandon/ShowHardwareProblem.py new file mode 100644 index 00000000..a902519c --- /dev/null +++ b/src/nectarchain/user_scripts/vmarandon/ShowHardwareProblem.py @@ -0,0 +1,166 @@ +try: + import sys + + import numpy as np + import numpy.ma as ma + + import argparse + + import matplotlib + from matplotlib import pyplot as plt + + from FileHandler import DataReader, GetNectarCamEvents + from DataUtils import GetTotalEventCounts + from Utils import GetEventTypeFromString, GetCamera, CustomFormatter, GetDefaultDataPath + from FitUtils import GaussHistoFitFunction + from Stats import CameraSampleStats, CameraStats + from CalibrationData import CalibrationCameraDisplay + + from scipy.optimize import curve_fit + from scipy.stats import norm + from iminuit import Minuit + + from ctapipe.visualization import CameraDisplay + + from tqdm import tqdm + + from IPython import embed + +except ImportError as e: + print(e) + raise SystemExit + +class HardwareProblemStatsMaker: + ''' + Class to compute stats on possible hardware problems + ''' + def __init__(self,telid = 0): + self.telid = telid + self.missing = None + +def ShowHardwareProblem(arglist): + + p = argparse.ArgumentParser(description='Show stats on the hardware_failing_pixels flag', + epilog='examples:\n' + '\t python %(prog)s --run 123456 \n', + formatter_class=CustomFormatter) + + p.add_argument("--run", dest='run', type=int, help="Run number to be converted") + p.add_argument("--data-path",dest='dataPath',type=str,default=None,help="Path to the rawdata directory. The program will recursively search in all directory for matching rawdata") + p.add_argument("--telid",dest="telId",type=int,default=0,help="Telescope id for which we show the data distributions") + p.add_argument("--list",dest="listEvent",action='store_true',help="List pixels for which there is hardware problems") + p.add_argument("--savefig",dest="savefig",action='store_true',help='Save figure') + p.add_argument("--debug",dest="debug",action='store_true',help='Debug mode (only 10000 events processed)') + + args = p.parse_args(arglist) + + if args.run is None: + p.print_help() + return -1 + + if args.dataPath is None: + path = GetDefaultDataPath() + else: + path = args.dataPath + + data = DataReader(args.run,path) + ok = data.Connect( "mon","trigger" ) + #evt.mon.tel[self.telid].pixel_status.hardware_failing_pixels + if not ok: + data = GetNectarCamEvents(args.run,path,applycalib=False) + + dataevents = GetTotalEventCounts(args.run,path) + + camstats = CameraStats() + + sum_failing = None + counter = 0 + try: + for evt in tqdm(data,total=dataevents): + hardware_failing = evt.mon.tel[args.telId].pixel_status.hardware_failing_pixels + camstats.add( hardware_failing ) + + if sum_failing is not None: + sum_failing += hardware_failing + else: + sum_failing = hardware_failing.copy().astype(int) + + + if args.debug: + counter += 1 + if counter > 10000: + break + except Exception as e: + print(e) + + lw = 0.2 + + fig_counts, axs_counts = plt.subplots(nrows=1,ncols=2, figsize=(12,6)) + + cam_count_hg = CameraDisplay(geometry=GetCamera(), cmap='turbo', image=ma.array(camstats.mean[0]*camstats.count[0],mask=camstats.mean[0]==0.), title=f'HG Hardware Failing Counts\nrun {args.run}',ax=axs_counts[0],show_frame=False,allow_pick=True) + cam_count_hg.highlight_pixels(range(1855),linewidth=lw,color='grey') + cam_count_hg.add_colorbar() + + cam_count_lg = CameraDisplay(geometry=GetCamera(), cmap='turbo', image=ma.array(camstats.mean[1]*camstats.count[1],mask=camstats.mean[1]==0.), title=f'LG Hardware Failing Counts\nrun {args.run}',ax=axs_counts[1],show_frame=False,allow_pick=True) + cam_count_lg.highlight_pixels(range(1855),linewidth=lw,color='grey') + cam_count_lg.add_colorbar() + + if args.savefig: + figname = f'run_{args.run}_Hardware_Failing_Pixels_Counter.png' + + fig_counts.show() + + + fig_counts2, axs_counts2 = plt.subplots(nrows=1,ncols=2, figsize=(12,6)) + + cam_count_hg2 = CameraDisplay(geometry=GetCamera(), cmap='turbo', image=ma.array(sum_failing[0],mask=sum_failing[0]==0.), title=f'HG Hardware Failing Counts 2\nrun {args.run}',ax=axs_counts2[0],show_frame=False,allow_pick=True) + cam_count_hg2.highlight_pixels(range(1855),linewidth=lw,color='grey') + cam_count_hg2.add_colorbar() + + cam_count_lg2 = CameraDisplay(geometry=GetCamera(), cmap='turbo', image=ma.array(sum_failing[1],mask=sum_failing[1]==0.), title=f'LG Hardware Failing Counts 2\nrun {args.run}',ax=axs_counts2[1],show_frame=False,allow_pick=True) + cam_count_lg2.highlight_pixels(range(1855),linewidth=lw,color='grey') + cam_count_lg2.add_colorbar() + + fig_counts2.show() + + + + fig_frac, axs_frac = plt.subplots(nrows=1,ncols=2, figsize=(12,6)) + + cam_frac_hg = CameraDisplay(geometry=GetCamera(), cmap='turbo', image=ma.array(camstats.mean[0]*100,mask=camstats.mean[0]==0.), title=f'HG Hardware Failing Fraction (%)\nrun {args.run}',ax=axs_frac[0],show_frame=False,allow_pick=True) + cam_frac_hg.add_colorbar() + cam_frac_hg.highlight_pixels(range(1855),linewidth=lw,color='grey') + cam_frac_hg.colorbar.set_label('%') + + cam_frac_lg = CameraDisplay(geometry=GetCamera(), cmap='turbo', image=ma.array(camstats.mean[1]*100,mask=camstats.mean[1]==0.), title=f'LG Hardware Failing Fraction (%)\nrun {args.run}',ax=axs_frac[1],show_frame=False,allow_pick=True) + cam_frac_lg.add_colorbar() + cam_frac_lg.highlight_pixels(range(1855),linewidth=lw,color='grey') + cam_frac_lg.colorbar.set_label('%') + + if args.savefig: + figname = f'run_{args.run}_Hardware_Failing_Pixels_Fraction.png' + + fig_frac.show() + + + #embed() + + ## To go further one could use this information : + #module_status[ evt.nectarcam.tel[0].svc.module_ids ] = evt.nectarcam.tel[0].evt.module_status + # to know the module that was expected to be there during acquisition in order to avoid false positive. + # There is a similar info for pixels + #pixel_status = np.zeros(N_PIXELS) + #pixel_status[self.camera_config.expected_pixels_id] = event.pixel_status + #status_container.hardware_failing_pixels[:] = pixel_status == 0 + + + + #plt.show() + input("Press Enter to quit\n") + + embed() + + + +if __name__ == "__main__": + ShowHardwareProblem(sys.argv[1:]) diff --git a/src/nectarchain/user_scripts/vmarandon/Utils.py b/src/nectarchain/user_scripts/vmarandon/Utils.py index a433ddc6..fe4af69c 100644 --- a/src/nectarchain/user_scripts/vmarandon/Utils.py +++ b/src/nectarchain/user_scripts/vmarandon/Utils.py @@ -6,6 +6,9 @@ import datetime import re import enum + import pickle + import lz4.frame + from ctapipe.io import EventSource from ctapipe.instrument import CameraGeometry @@ -37,6 +40,26 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass +def ConvertTitleToName(title: str): + title = title.replace(' ','_') + title = title.replace("\n","_") + title = title.replace("(","_") + title = title.replace(")","_") + title = title.replace(":","_") + title = title.replace(",","_") + title = title.replace("=","_") + title = title.replace("_-_","_") + title = title.replace(">","Above") + title = title.replace("<","Below") + #title = title.replace("0.","0d") + title = title.replace(".","d") + + + ## do a bit of cleanup of the _ + while title.find('__') != -1: + title = title.replace("__","_") + return title + def GetDefaultDataPath(): return os.environ.get( 'NECTARCAMDATA' , '/Users/vm273425/Programs/NectarCAM/data') @@ -145,11 +168,12 @@ def clean_waveform(wvf, use_nan = True): class IntegrationMethod(enum.Enum): PEAKSEARCH = enum.auto() NNSEARCH = enum.auto() + USERPEAK = enum.auto() -def SignalIntegration(waveform,exclusion_mask=None, method = IntegrationMethod.PEAKSEARCH,left_bound=5,right_bound=7,camera=None): +def SignalIntegration(waveform,exclusion_mask=None, method = IntegrationMethod.PEAKSEARCH,left_bound=5,right_bound=7,camera=None,peakpositions=None): wvf = waveform.astype(float) if exclusion_mask is not None: wvf[ exclusion_mask ] = 0. @@ -160,6 +184,9 @@ def SignalIntegration(waveform,exclusion_mask=None, method = IntegrationMethod.P if camera is None: camera = GetCamera() charge, time = NeighborPeakIntegration(waveform, camera.neighbor_matrix, left_bound=5, right_bound=7) + elif method == IntegrationMethod.USERPEAK: + #print("HERE") + charge, time = UserPeakIntegration(waveform,peakpositions=peakpositions,left_bound=left_bound,right_bound=right_bound) else: print("I Don't know about this method !") @@ -229,6 +256,37 @@ def NeighborPeakIntegration(waveform, neighbor_matrix, left_bound=5, right_bound return integrated_signal, signal_timeslice +@njit(parallel=True) +def UserPeakIntegration(waveform, peakpositions, left_bound=5, right_bound=7): + + wvf_shape = waveform.shape + n_channel = wvf_shape[0] + n_pixels = wvf_shape[1] + n_samples = wvf_shape[2] + + integrated_signal = np.zeros( (n_channel,n_pixels) ) + signal_timeslice = np.zeros( (n_channel,n_pixels) ) + + integ_window = left_bound+right_bound + + for chan in prange(2): + chan_wvf = waveform[chan,:,:] + for pix in range(n_pixels): + trace = chan_wvf[pix,:] + peak_pos = peakpositions[chan,pix] + if (peak_pos-left_bound) < 0: + lo_bound = 0 + up_bound = integ_window + elif (peak_pos + right_bound) > n_samples: + lo_bound = n_samples-integ_window + up_bound = n_samples + else: + lo_bound = peak_pos - left_bound + up_bound = peak_pos + right_bound + integrated_signal[chan,pix] = np.sum( trace[lo_bound:up_bound] ) + signal_timeslice[chan,pix] = peak_pos + + return integrated_signal, signal_timeslice def getPixelT0Spline(waveform): @@ -368,3 +426,17 @@ def GetEventTypeFromString(event_str): print(f"WARNING> Don't know about the event type [{event_str}]") evt_type = EventType.UNKNOWN return evt_type + + +def save_simple_data(datas, fileName): + """Save info in a file using pickle""" + with lz4.frame.open( fileName, 'wb' ) as f : + pickle.dump(datas,f) + + #with open(fileName,'wb') as file: + # pickle.dump(datas,file) + +def read_simple_data(fileName): + """Read info from a file using pickle""" + with lz4.frame.open( fileName, 'rb' ) as f : + return pickle.load(f) diff --git a/src/nectarchain/utils/__init__.py b/src/nectarchain/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/nectarchain/utils/stats.py b/src/nectarchain/utils/stats.py new file mode 100644 index 00000000..6b7d5a65 --- /dev/null +++ b/src/nectarchain/utils/stats.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Code that implement Welford's algorithm for stats computation + +This is inspired by the implementation done at https://github.com/a-mitani/welford +""" +import numpy as np +import warnings +from copy import deepcopy + +from ctapipe_io_nectarcam import constants as nc + +class Stats: + """class Stats + Accumulator object for Welfords online / parallel variance algorithm. + + + Examples + -------- + Example with only one variable + >>> from nectarchain.utils.stats import Stats + >>> s = Stats() + >>> s.add(1) + >>> s.add(2) + >>> s.add(3) + >>> s.mean + 2 + >>> s.std + 1 + """ + + def __init__(self,shape=(1,)): + """__init__ + + Initialize with an optional data. + For the calculation efficiency, Welford's method is not used on the initialization process. + + """ + # Initialize instance attributes + self._shape = shape + self._count = np.zeros( shape, dtype=int ) + self._m = np.zeros( shape, dtype=float ) + self._s = np.zeros( shape, dtype=float ) + self._min = np.full( shape, np.inf ) + self._max = np.full( shape, -np.inf ) + + def __str__(self): + infos = "" + infos += f"mean: {self.mean}" + "\n" + infos += f"std: {self.stddev}" + "\n" + infos += f"min: {self.min}" + "\n" + infos += f"max: {self.max}" + "\n" + infos += f"count: {self.count}" + "\n" + infos += f"shape: {self.shape}" + return infos + + def __repr__(self): + return self.__str__() + + def copy(self): + return deepcopy(self) + + def __add__(self, other): + r = self.copy() + r.merge(other) + return r + + def __iadd__(self,other): + self.merge(other) + return self + + @property + def shape(self): + return self._shape + + @property + def count(self): + return self._count + + @property + def mean(self): + return self._m + + @property + def variance(self): + return self._getvars(ddof=1) + + @property + def stddev(self): + return np.sqrt(self._getvars(ddof=1)) + + @property + def std(self): + return self.stddev + + @property + def min(self): + return self._min + + @property + def max(self): + return self._max + + def get_lowcount_mask(self,mincount=3): + return self._count>> from nectarchain.utils.stats import CameraSampleStats + >>> from ctapipe_io_nectarcam import NectarCAMEventSource + + >>> reader = NectarCAMEventSource(input_url='NectarCAM.Run4560.00??.fits.fz') + + >>> s = CameraSampleStats() + >>> for event in reader: + >>> s.add( event.r0.tel[0].waveform, validmask=~evt.mon.tel[0].pixel_status.hardware_failing_pixels ) + >>> print(s.mean) + """ + + def __init__(self,shape=(nc.N_GAINS,nc.N_PIXELS,nc.N_SAMPLES), *args, **kwargs): + super().__init__(shape,*args,**kwargs) + +