diff --git a/CHANGES.rst b/CHANGES.rst index 39f3bd3..7bb8bd1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -2,6 +2,7 @@ Change Log ========== Version 7 ---------- +- **7.12**: tukey from scipy.signal.windows and nan instead of NaN for numpy - **7.11**: quick fix for Phillips staging import - **7.10**: re-re-release - **7.09**: re-release diff --git a/wonambi/VERSION b/wonambi/VERSION index 67271a4..1e0d11a 100644 --- a/wonambi/VERSION +++ b/wonambi/VERSION @@ -1 +1 @@ -7.11 +7.12 diff --git a/wonambi/datatype.py b/wonambi/datatype.py index ed0296a..4aa13c7 100644 --- a/wonambi/datatype.py +++ b/wonambi/datatype.py @@ -10,7 +10,7 @@ from logging import getLogger from pathlib import Path -from numpy import arange, array, empty, ix_, NaN, squeeze, where +from numpy import arange, array, empty, ix_, nan, squeeze, where lg = getLogger() @@ -171,7 +171,7 @@ def __call__(self, trial=None, tolerance=None, **axes): output_shape.append(n_values) output[cnt] = empty(output_shape, dtype=self.data[i].dtype) - output[cnt].fill(NaN) + output[cnt].fill(nan) if all([len(x) > 0 for x in idx_data]): ix_output = ix_(*idx_output) diff --git a/wonambi/detect/spindle.py b/wonambi/detect/spindle.py index 4418dfc..73d9771 100644 --- a/wonambi/detect/spindle.py +++ b/wonambi/detect/spindle.py @@ -1,16 +1,17 @@ """Module to detect spindles. """ from logging import getLogger -from numpy import (absolute, arange, argmax, argmin, around, asarray, - concatenate, cos, diff, exp, empty, histogram, - hstack, insert, invert, log10, logical_and, mean, median, - nan, ones, percentile, pi, ptp, real, sqrt, square, std, +from numpy import (absolute, arange, argmax, argmin, around, asarray, + concatenate, cos, diff, exp, empty, histogram, + hstack, insert, invert, log10, logical_and, mean, median, + nan, ones, percentile, pi, ptp, real, sqrt, square, std, sum, vstack, where, zeros) from numpy.fft import rfftfreq from scipy.ndimage.filters import gaussian_filter -from scipy.signal import (argrelmax, butter, cheby2, filtfilt, - fftconvolve, hilbert, periodogram, remez, - sosfiltfilt, spectrogram, tukey) +from scipy.signal import (argrelmax, butter, cheby2, filtfilt, + fftconvolve, hilbert, periodogram, remez, + sosfiltfilt, spectrogram) +from scipy.signal.windows import tukey from scipy.fftpack import next_fast_len try: from PyQt5.QtCore import Qt @@ -37,34 +38,34 @@ class DetectSpindle: duration : tuple of float min and max duration of spindles merge : bool - if True, then after events are detected on every channel, events on - different channels that are separated by less than min_interval will be - merged into a single event, with 'chan' = the chan of the earlier-onset + if True, then after events are detected on every channel, events on + different channels that are separated by less than min_interval will be + merged into a single event, with 'chan' = the chan of the earlier-onset event. - + Attributes ---------- tolerance : float - during detection and prior to applying the duration criterion, - candidate events separated by less than this time interval are merged. - In this way, the detector becomes tolerant to short dips below the - eligibility threshold (e.g. if the spindle power drops for a split + during detection and prior to applying the duration criterion, + candidate events separated by less than this time interval are merged. + In this way, the detector becomes tolerant to short dips below the + eligibility threshold (e.g. if the spindle power drops for a split second). min_interval : float after the duration criterion is applied, events separated by less than this interval are merged into a single event, with 'chan' = the chan of the earlier-onset event. power_peaks : str or None - for peak power statistics. 'peak' or 'interval'. If None, values will + for peak power statistics. 'peak' or 'interval'. If None, values will all be NaN - + Notes ----- See individual detect_* functions for other attribute descriptions. """ def __init__(self, method='Moelle2011', frequency=None, duration=None, merge=False): - + self.method = method self.frequency = frequency self.merge = merge @@ -72,7 +73,7 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, self.min_interval = 0 self.power_peaks = 'interval' self.rolloff = None - + if method == 'Ferrarelli2007': if self.frequency is None: self.frequency = (11, 15) @@ -83,7 +84,7 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, } self.det_thresh = 8 self.sel_thresh = 2 - + elif method == 'Moelle2011': if self.frequency is None: self.frequency = (12, 15) @@ -97,7 +98,7 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, self.smooth = {'dur': .2, 'win': 'flat'} self.det_thresh = 1.5 - + elif method == 'Nir2011': if self.frequency is None: self.frequency = (9.2, 16.8) @@ -109,8 +110,8 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, self.smooth = {'dur': .04} # is in fact sigma self.det_thresh = 3 self.sel_thresh = 1 - - + + elif method == 'Wamsley2012': if self.frequency is None: self.frequency = (12, 15) @@ -135,7 +136,7 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, self.moving_rms = {'dur': .25, 'step': .25} self.det_thresh = 95 - + elif method == 'Ray2015': if self.frequency is None: self.frequency = (11, 16) @@ -153,7 +154,7 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, 'pcl_range': None} self.det_thresh = 2.33 self.sel_thresh = 0.1 - + elif method == 'Lacourse2018': if self.frequency is None: self.frequency = (11, 16) @@ -184,7 +185,7 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, self.rel_pow_thresh = 1.6 self.covar_thresh = 1.3 self.corr_thresh = 0.69 - + elif 'FASST' in method: if self.frequency is None: self.frequency = (11, 18) @@ -197,7 +198,7 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, self.smooth = {'dur': .1, 'win': 'flat'} self.det_thresh = 90 - + elif method == 'UCSD': if self.frequency is None: self.frequency = (10, 16) @@ -234,13 +235,13 @@ def __init__(self, method='Moelle2011', frequency=None, duration=None, self.det_thresh_hi = 10 self.tolerance = 0.2 self.sel_thresh = 1 - + else: raise ValueError('Unknown method') - + if frequency is not None: self.frequency = frequency - + if duration is not None: self.duration = duration @@ -265,22 +266,22 @@ def __call__(self, data, parent=None): description of the detected spindles """ if parent is not None: - progress = QProgressDialog('Finding spindles', 'Abort', + progress = QProgressDialog('Finding spindles', 'Abort', 0, data.number_of('chan')[0], parent) progress.setWindowModality(Qt.ApplicationModal) - + spindle = Spindles() spindle.chan_name = data.axis['chan'][0] spindle.det_values = empty(data.number_of('chan')[0], dtype='O') spindle.density = zeros(data.number_of('chan')[0]) - + if self.duration[1] is None: self.duration = self.duration[0], MAX_DURATION all_spindles = [] i = 0 for i, chan in enumerate(data.axis['chan'][0]): - + lg.info('Detecting spindles on channel %s', chan) time = hstack(data.axis['time']) dat_orig = hstack(data(chan=chan)) @@ -291,60 +292,60 @@ def __call__(self, data, parent=None): data.s_freq, time, self) - + elif self.method == 'Moelle2011': sp_in_chan, values, density = detect_Moelle2011(dat_orig, data.s_freq, time, self) - + elif self.method == 'Nir2011': sp_in_chan, values, density = detect_Nir2011(dat_orig, data.s_freq, time, self) - - + + elif self.method == 'Wamsley2012': sp_in_chan, values, density = detect_Wamsley2012(dat_orig, data.s_freq, time, self) - + elif self.method == 'Martin2013': sp_in_chan, values, density = detect_Martin2013(dat_orig, data.s_freq, time, self) - + elif self.method == 'Ray2015': sp_in_chan, values, density = detect_Ray2015(dat_orig, data.s_freq, time, self) - + elif self.method == 'Lacourse2018': sp_in_chan, values, density = detect_Lacourse2018(dat_orig, data.s_freq, - time, self) - + time, self) + elif self.method == 'FASST': sp_in_chan, values, density = detect_FASST(dat_orig, data.s_freq, time, self, submethod='abs') - + elif self.method == 'FASST2': sp_in_chan, values, density = detect_FASST(dat_orig, data.s_freq, time, self, submethod='rms') - + elif self.method == 'UCSD': sp_in_chan, values, density = detect_UCSD(dat_orig, data.s_freq, time, self) - + elif self.method == 'Concordia': sp_in_chan, values, density = detect_Concordia(dat_orig, data.s_freq, time, self) - + else: raise ValueError('Unknown method') @@ -355,7 +356,7 @@ def __call__(self, data, parent=None): sp.update({'chan': chan}) all_spindles.extend(sp_in_chan) - + if parent is not None: progress.setValue(i) if progress.wasCanceled(): @@ -369,8 +370,8 @@ def __call__(self, data, parent=None): spindle.events = merge_close(spindle.events, self.min_interval) if parent is not None: - progress.setValue(i + 1) - + progress.setValue(i + 1) + return spindle @@ -411,7 +412,7 @@ def detect_Ferrarelli2007(dat_orig, s_freq, time, opts): """ dat_det = transform_signal(dat_orig, s_freq, 'remez', opts.det_remez) dat_det = transform_signal(dat_det, s_freq, 'abs') - + idx_env = peaks_in_time(dat_det) envelope = dat_det[idx_env] idx_peak = idx_env[peaks_in_time(envelope)] # in raw data time @@ -420,22 +421,22 @@ def detect_Ferrarelli2007(dat_orig, s_freq, time, opts): troughs[idx_trough] = envelope[idx_trough] # all non-trough values are -1 det_value = define_threshold(dat_det, s_freq, 'mean', opts.det_thresh) - sel_value = define_threshold(dat_det[idx_peak], s_freq, 'histmax', + sel_value = define_threshold(dat_det[idx_peak], s_freq, 'histmax', opts.sel_thresh, nbins=120) - + events_env = detect_events(envelope, 'above_thresh', det_value) - + if events_env is not None: - events_env = _merge_close(envelope, events_env, time[idx_env], + events_env = _merge_close(envelope, events_env, time[idx_env], opts.tolerance) - events_env = select_events(troughs, events_env, - 'Ferrarelli2007', sel_value) + events_env = select_events(troughs, events_env, + 'Ferrarelli2007', sel_value) events = idx_env[events_env] # merging is necessary, because detected spindles may overlap if the - # signal envelope does not dip below sel_thresh between two peaks above + # signal envelope does not dip below sel_thresh between two peaks above # det_thresh events = _merge_close(dat_det, events, time, opts.min_interval) - events = within_duration(events, time, opts.duration) + events = within_duration(events, time, opts.duration) events = remove_straddlers(events, time, s_freq) power_peaks = peak_in_power(events, dat_orig, s_freq, opts.power_peaks) @@ -580,7 +581,7 @@ def detect_Nir2011(dat_orig, s_freq, time, opts): if events is not None: events = _merge_close(dat_det, events, time, opts.tolerance) events = select_events(dat_det, events, 'above_thresh', sel_value) - + events = within_duration(events, time, opts.duration) events = _merge_close(dat_det, events, time, opts.min_interval) events = remove_straddlers(events, time, s_freq) @@ -650,7 +651,7 @@ def detect_Wamsley2012(dat_orig, s_freq, time, opts): power_peaks = peak_in_power(events, dat_orig, s_freq, opts.power_peaks) powers = power_in_band(events, dat_orig, s_freq, opts.frequency) - sp_in_chan = make_spindles(events, power_peaks, powers, + sp_in_chan = make_spindles(events, power_peaks, powers, absolute(dat_wav), dat_orig, time, s_freq) else: @@ -666,7 +667,7 @@ def detect_Wamsley2012(dat_orig, s_freq, time, opts): def detect_Martin2013(dat_orig, s_freq, time, opts): """Spindle detection based on Martin et al. 2013 - + Parameters ---------- dat_orig : ndarray (dtype='float') @@ -682,7 +683,7 @@ def detect_Martin2013(dat_orig, s_freq, time, opts): parameters for 'moving_rms' 'det_thresh' : float percentile for detection threshold - + Returns ------- list of dict @@ -700,11 +701,11 @@ def detect_Martin2013(dat_orig, s_freq, time, opts): dat_filt = transform_signal(dat_orig, s_freq, 'remez', opts.det_remez) dat_det = transform_signal(dat_filt, s_freq, 'moving_rms', opts.moving_rms) # downsampled - + det_value = percentile(dat_det, opts.det_thresh) - + events = detect_events(dat_det, 'above_thresh', det_value) - + if events is not None: events *= int(around(s_freq * opts.moving_rms['step'])) # upsample events = _merge_close(dat_filt, events, time, opts.tolerance) @@ -730,7 +731,7 @@ def detect_Martin2013(dat_orig, s_freq, time, opts): def detect_Ray2015(dat_orig, s_freq, time, opts): """Spindle detection based on Ray et al., 2015 - + Parameters ---------- dat_orig : ndarray (dtype='float') @@ -771,21 +772,21 @@ def detect_Ray2015(dat_orig, s_freq, time, opts): """ dat_det = transform_signal(dat_orig, s_freq, 'butter', opts.det_butter) dat_det = transform_signal(dat_det, s_freq, 'cdemod', opts.cdemod) - dat_det = transform_signal(dat_det, s_freq, 'low_butter', + dat_det = transform_signal(dat_det, s_freq, 'low_butter', opts.det_low_butter) dat_det = transform_signal(dat_det, s_freq, 'smooth', opts.smooth) dat_det = transform_signal(dat_det, s_freq, 'abs_complex') dat_det = transform_signal(dat_det, s_freq, 'moving_zscore', opts.zscore) - + det_value = opts.det_thresh sel_value = opts.sel_thresh - + events = detect_events(dat_det, 'above_thresh', det_value) - + if events is not None: events = _merge_close(dat_det, events, time, opts.tolerance) events = select_events(dat_det, events, 'above_thresh', sel_value) - + events = within_duration(events, time, opts.duration) events = _merge_close(dat_det, events, time, opts.min_interval) events = remove_straddlers(events, time, s_freq) @@ -808,7 +809,7 @@ def detect_Ray2015(dat_orig, s_freq, time, opts): def detect_Lacourse2018(dat_orig, s_freq, time, opts): """Spindle detection based on Lacourse et al., 2018 - + Parameters ---------- dat_orig : ndarray (dtype='float') @@ -826,13 +827,13 @@ def detect_Lacourse2018(dat_orig, s_freq, time, opts): 'step' for downsampling and 'dur' for moving window duration 'moving_ms' : dict parameters for 'moving_rms' - 'moving_power_ratio' : + 'moving_power_ratio' : parameters for 'moving_power_ratio' 'zscore' : parameters for 'moving_zscore' - 'moving_covar' : + 'moving_covar' : parameters for 'moving_covar' - 'moving_sd' : + 'moving_sd' : parameters for 'moving_sd' 'smooth' : dict parameters for 'smooth' @@ -844,7 +845,7 @@ def detect_Lacourse2018(dat_orig, s_freq, time, opts): covariance threshold 'corr_thresh' : float coorelation threshold - + Returns ------- list of dict @@ -866,44 +867,44 @@ def detect_Lacourse2018(dat_orig, s_freq, time, opts): opts.tolerance *= step else: ds_freq = s_freq - + # Absolute sigma power - dat_sigma = transform_signal(dat_orig, s_freq, 'double_sosbutter', + dat_sigma = transform_signal(dat_orig, s_freq, 'double_sosbutter', opts.det_butter) dat_det = transform_signal(dat_sigma, s_freq, 'moving_ms', opts.moving_ms) dat_det[dat_det <= 0] = 0.000000001 # arbitrarily small value abs_sig_pow = log10(dat_det) # Option to adapt the absolute threshold, for low-amplitude recordings if opts.abs_pow_thresh < 0: - opts.abs_pow_thresh = (mean(abs_sig_pow) - + opts.abs_pow_thresh = (mean(abs_sig_pow) - opts.abs_pow_thresh * std(abs_sig_pow)) abs_sig_pow = transform_signal(abs_sig_pow, ds_freq, 'smooth', opts.smooth) - + # Relative sigma power - dat_det = transform_signal(dat_orig, s_freq, 'moving_power_ratio', + dat_det = transform_signal(dat_orig, s_freq, 'moving_power_ratio', opts.moving_power_ratio) dat_det[dat_det <= 0] = 0.000000001 dat_det = log10(dat_det) - rel_sig_pow = transform_signal(dat_det, ds_freq, 'moving_zscore', + rel_sig_pow = transform_signal(dat_det, ds_freq, 'moving_zscore', opts.zscore) rel_sig_pow = transform_signal(rel_sig_pow, ds_freq, 'smooth', opts.smooth) - + # Sigma covariance - dat_broad = transform_signal(dat_orig, s_freq, 'double_sosbutter', + dat_broad = transform_signal(dat_orig, s_freq, 'double_sosbutter', opts.det_butter2) - dat_covar = transform_signal(dat_sigma, s_freq, 'moving_covar', + dat_covar = transform_signal(dat_sigma, s_freq, 'moving_covar', opts.moving_covar, dat2=dat_broad) dat_det = dat_covar.copy() dat_det[dat_det < 0] = 0 # negative covariances are discarded dat_det = log10(dat_det + 1) # add 1 to avoid -inf - sigma_covar = transform_signal(dat_det, ds_freq, 'moving_zscore', + sigma_covar = transform_signal(dat_det, ds_freq, 'moving_zscore', opts.zscore) sigma_covar = transform_signal(sigma_covar, ds_freq, 'smooth', opts.smooth) - + # Sigma correlation - dat_sd_broad = transform_signal(dat_broad, s_freq, 'moving_sd', + dat_sd_broad = transform_signal(dat_broad, s_freq, 'moving_sd', opts.moving_sd) - dat_sd_sigma = transform_signal(dat_sigma, s_freq, 'moving_sd', + dat_sd_sigma = transform_signal(dat_sigma, s_freq, 'moving_sd', opts.moving_sd) dat_sd_broad[dat_sd_broad == 0] = 0.000000001 dat_sd_sigma[dat_sd_sigma == 0] = 0.000000001 @@ -915,17 +916,17 @@ def detect_Lacourse2018(dat_orig, s_freq, time, opts): sigma_covar >= opts.covar_thresh) concensus = logical_and.reduce((rel_sig_pow >= opts.rel_pow_thresh, sigma_corr >= opts.corr_thresh, - abs_and_cov)) + abs_and_cov)) events = detect_events(concensus, 'custom') # at s_freq * 0.1 - + if events is not None: events = _merge_close(dat_sigma, events, time, opts.tolerance) events = _select_period(events, abs_and_cov) + 1 - + if step: events = events * (s_freq * step) # upsample events = asarray(around(events), dtype=int) - + events = within_duration(events, time, opts.duration) events = _merge_close(dat_sigma, events, time, opts.min_interval) events = remove_straddlers(events, time, s_freq) @@ -939,8 +940,8 @@ def detect_Lacourse2018(dat_orig, s_freq, time, opts): lg.info('No spindle found') sp_in_chan = [] - values = {'abs_pow_thresh': opts.abs_pow_thresh, - 'rel_pow_thresh': opts.rel_pow_thresh, + values = {'abs_pow_thresh': opts.abs_pow_thresh, + 'rel_pow_thresh': opts.rel_pow_thresh, 'covar_thresh': opts.covar_thresh, 'corr_thresh': opts.corr_thresh} @@ -950,9 +951,9 @@ def detect_Lacourse2018(dat_orig, s_freq, time, opts): def detect_FASST(dat_orig, s_freq, time, opts, submethod='rms'): - """Spindle detection based on FASST method, itself based on Moelle et al. + """Spindle detection based on FASST method, itself based on Moelle et al. (2002). - + Parameters ---------- dat_orig : ndarray (dtype='float') @@ -988,17 +989,17 @@ def detect_FASST(dat_orig, s_freq, time, opts, submethod='rms'): Leclercq, Y. et al. Compu. Intel. and Neurosci. (2011). """ dat_det = transform_signal(dat_orig, s_freq, 'butter', opts.det_butter) - + det_value = percentile(dat_det, opts.det_thresh) - + if submethod == 'abs': dat_det = transform_signal(dat_det, s_freq, 'abs') elif submethod == 'rms': - dat_det = transform_signal(dat_det, s_freq, 'moving_rms', + dat_det = transform_signal(dat_det, s_freq, 'moving_rms', opts.moving_rms) - + dat_det = transform_signal(dat_det, s_freq, 'smooth', opts.smooth) - + events = detect_events(dat_det, 'above_thresh', det_value) if events is not None: @@ -1173,11 +1174,11 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): s_freq : float sampling frequency method : str - one of 'abs', 'abs_complex', 'butter', 'cdemod', 'cheby2', - 'double_butter', 'double_sosbutter', 'gaussian', 'hilbert', + one of 'abs', 'abs_complex', 'butter', 'cdemod', 'cheby2', + 'double_butter', 'double_sosbutter', 'gaussian', 'hilbert', 'high_butter', 'low_butter', 'morlet', 'moving_covar', 'moving_ms', - 'moving_periodogram', 'moving_power_ratio', 'moving_rms', 'moving_sd', - 'moving_zscore', 'remez', 'smooth', 'sosbutter', 'spectrogram', + 'moving_periodogram', 'moving_power_ratio', 'moving_rms', 'moving_sd', + 'moving_zscore', 'remez', 'smooth', 'sosbutter', 'spectrogram', 'wavelet_real'. method_opt : dict depends on methods @@ -1191,15 +1192,15 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): Notes ----- - double_butter implements an effective bandpass by applying a highpass, - followed by a lowpass. This method reduces filter instability, due to + double_butter implements an effective bandpass by applying a highpass, + followed by a lowpass. This method reduces filter instability, due to underlying numerical instability arising from nyquist / freq at low freq. - + Wavelets pass only absolute values already, it does not make sense to store the complex values. Methods - ------- + ------- butter has parameters: freq : tuple of float low and high values for bandpass @@ -1300,8 +1301,8 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): dur : float duration of the z-score sliding window (sec) pcl_range : tuple of float, or None - if not None, only data within this percentile range will be used - for determining the standard deviation for calculation of the + if not None, only data within this percentile range will be used + for determining the standard deviation for calculation of the z-score step: float step between consecutive windows (sec) @@ -1316,7 +1317,7 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): smooth has parameters: dur : float - duration of the convolution window (sec). For 'triangle', base of + duration of the convolution window (sec). For 'triangle', base of isosceles triangle. wavelet_real has parameters: @@ -1338,7 +1339,7 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): if 'butter' == method: freq = method_opt['freq'] N = method_opt['order'] - + nyquist = s_freq / 2 Wn = asarray(freq) / nyquist b, a = butter(N, Wn, btype='bandpass') @@ -1346,14 +1347,14 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): if 'cdemod' == method: carr_freq = method_opt['freq'] - + carr_sig = exp(-1j * 2 * pi * carr_freq * arange(0, len(dat)) / s_freq) - dat = dat * carr_sig + dat = dat * carr_sig if 'cheby2' == method: freq = method_opt['freq'] N = method_opt['order'] - + Rs = 40 nyquist = s_freq / 2 Wn = asarray(freq) / nyquist @@ -1364,12 +1365,12 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): freq = method_opt['freq'] N = method_opt['order'] nyquist = s_freq / 2 - + # Highpass Wn = freq[0] / nyquist b, a = butter(N, Wn, btype='highpass') dat = filtfilt(b, a, dat) - + # Lowpass Wn = freq[1] / nyquist b, a = butter(N, Wn, btype='lowpass') @@ -1379,12 +1380,12 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): freq = method_opt['freq'] N = method_opt['order'] nyquist = s_freq / 2 - + # Highpass Wn = freq[0] / nyquist sos = butter(N, Wn, btype='highpass', output='sos') dat = sosfiltfilt(sos, dat) - + # Lowpass Wn = freq[1] / nyquist sos = butter(N, Wn, btype='lowpass', output='sos') @@ -1392,33 +1393,33 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): if 'gaussian' == method: sigma = method_opt['dur'] - - dat = gaussian_filter(dat, sigma) + + dat = gaussian_filter(dat, sigma) if 'hilbert' == method: N = len(dat) dat = hilbert(dat, N=next_fast_len(N)) # much faster this way dat = dat[:N] # truncate away zero-padding - - + + if 'high_butter' == method: freq = method_opt['freq'] N = method_opt['order'] - + nyquist = s_freq / 2 Wn = freq / nyquist b, a = butter(N, Wn, btype='highpass') dat = filtfilt(b, a, dat) - + if 'low_butter' == method: freq = method_opt['freq'] N = method_opt['order'] - + nyquist = s_freq / 2 Wn = freq / nyquist b, a = butter(N, Wn, btype='lowpass') dat = filtfilt(b, a, dat) - + if 'morlet' == method: f0 = method_opt['f0'] sd = method_opt['sd'] @@ -1428,24 +1429,24 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): wm = _wmorlet(f0, sd, s_freq, dur) dat = fftconvolve(dat, wm, mode='same') if 'absolute' == output: - dat = absolute(dat) + dat = absolute(dat) if 'moving' in method: dur = method_opt['dur'] halfdur = dur / 2 total_dur = len(dat) / s_freq last = len(dat) - 1 - + if method_opt['step']: step = method_opt['step'] len_out = int(len(dat) / (step * s_freq)) else: step = 1 / s_freq len_out = len(dat) - + out = zeros((len_out)) - - if 'moving_covar' == method: + + if 'moving_covar' == method: for i, j in enumerate(arange(0, total_dur, step)[:-1]): beg = max(0, int((j - halfdur) * s_freq)) end = min(last, int((j + halfdur) * s_freq)) @@ -1453,15 +1454,15 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): win2 = dat2[beg:end] out[i] = mean((win1 - mean(win1)) * (win2 - mean(win2))) dat = out - - if 'moving_periodogram' == method: + + if 'moving_periodogram' == method: nfft = next_fast_len(dur * s_freq) sf = rfftfreq(nfft, 1 / s_freq) freq = method_opt['freq'] f0 = asarray([abs(x - freq[0]) for x in sf]).argmin() f1 = asarray([abs(x - freq[1]) for x in sf]).argmin() out = zeros((len_out, f1 - f0)) - + for i, j in enumerate(arange(0, total_dur, step)[:-1]): beg = max(0, int((j - halfdur) * s_freq)) end = min(last, int((j + halfdur) * s_freq)) @@ -1469,34 +1470,34 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): sf, psd = periodogram(windat, s_freq, 'hann', nfft=nfft, detrend='constant') out[i, :] = psd[f0:f1] - + dat = out - + if 'moving_power_ratio' == method: freq1 = method_opt['freq_narrow'] freq2 = method_opt['freq_broad'] fft_dur = method_opt['fft_dur'] nfft = int(s_freq * fft_dur) - + for i, j in enumerate(arange(0, total_dur, step)[:-1]): beg = max(0, int((j - halfdur) * s_freq)) end = min(last, int((j + halfdur) * s_freq)) windat = dat[beg:end] - + sf, psd = periodogram(windat, s_freq, 'hann', nfft=nfft, detrend='constant') f0 = asarray([abs(x - freq1[0]) for x in sf]).argmin() f1 = asarray([abs(x - freq1[1]) for x in sf]).argmin() pow1 = sum(psd[f0:f1]) - + f0 = asarray([abs(x - freq2[0]) for x in sf]).argmin() f1 = asarray([abs(x - freq2[1]) for x in sf]).argmin() pow2 = sum(psd[f0:f1]) - + out[i] = pow1 / pow2 - + dat = out - + if 'moving_sd' == method: for i, j in enumerate(arange(0, total_dur, step)[:-1]): beg = max(0, int((j - halfdur) * s_freq)) @@ -1504,67 +1505,67 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): win = dat[beg:end] out[i] = std(win) dat = out - - if 'moving_zscore' == method: + + if 'moving_zscore' == method: pcl_range = method_opt['pcl_range'] if pcl_range is not None: lo = percentile(dat, pcl_range[0]) hi = percentile(dat, pcl_range[1]) - + for i, j in enumerate(arange(0, total_dur, step)[:-1]): beg = max(0, int((j - halfdur) * s_freq)) end = min(last, int((j + halfdur) * s_freq)) windat = stddat = dat[beg:end] - + if pcl_range is not None: stddat = windat[logical_and(windat > lo, windat < hi)] out[i] = (dat[i] - mean(windat)) / std(stddat) dat = out - + if method in ['moving_rms', 'moving_ms']: for i, j in enumerate(arange(0, total_dur, step)[:-1]): beg = max(0, int((j - halfdur) * s_freq)) end = min(last, int((j + halfdur) * s_freq)) - out[i] = mean(square(dat[beg:end])) + out[i] = mean(square(dat[beg:end])) if method == 'moving_rms': out = sqrt(out) dat = out - + if 'remez' == method: Fp1, Fp2 = method_opt['freq'] rolloff = method_opt['rolloff'] dur = method_opt['dur'] - + N = int(s_freq * dur) nyquist = s_freq / 2 Fs1, Fs2 = Fp1 - rolloff, Fp2 + rolloff dens = 20 - bpass = remez(N, [0, Fs1, Fp1, Fp2, Fs2, nyquist], [0, 1, 0], + bpass = remez(N, [0, Fs1, Fp1, Fp2, Fs2, nyquist], [0, 1, 0], grid_density=dens, fs=s_freq) dat = filtfilt(bpass, 1, dat) - if 'smooth' == method: + if 'smooth' == method: dur = method_opt['dur'] win = method_opt['win'] - + if 'flat' in win: flat = ones(int(dur * s_freq)) H = flat / sum(flat) - + if 'flat_left' == win: H = concatenate((H, zeros(len(H)))) elif 'flat_right' == win: H = concatenate((zeros(len(H) - 1), H)) - + elif 'triangle' == win: T = int(dur * s_freq / 2) a = arange(T, 0, -1) - + H = hstack([a[-1:0:-1], a]) H = H / sum(H) - + dat = fftconvolve(dat, H, mode='same') - + if 'sosbutter' == method: freq = method_opt['freq'] N = method_opt['order'] @@ -1573,15 +1574,15 @@ def transform_signal(dat, s_freq, method, method_opt=None, dat2=None): Wn = asarray(freq) / nyquist sos = butter(N, Wn, btype='bandpass', output='sos') dat = sosfiltfilt(sos, dat) - + if 'spectrogram' == method: nperseg = method_opt['dur'] * s_freq noverlap = method_opt['step'] * s_freq detrend = method_opt['detrend'] - - dat = spectrogram(dat, fs=s_freq, nperseg=nperseg, noverlap=noverlap, + + dat = spectrogram(dat, fs=s_freq, nperseg=nperseg, noverlap=noverlap, detrend=detrend) - + if 'wavelet_real' == method: freqs = method_opt['freqs'] dur = method_opt['dur'] @@ -1641,33 +1642,33 @@ def define_threshold(dat, s_freq, method, value, nbins=120): def peaks_in_time(dat, troughs=False): """Find indices of peaks or troughs in data. - + Parameters ---------- dat : ndarray (dtype='float') vector with the data troughs : bool if True, will return indices of troughs instead of peaks - + Returns ------- nadarray of int indices of peaks (or troughs) in dat - + Note ---- - This function does not deal well with flat signal; when the signal is not + This function does not deal well with flat signal; when the signal is not increasing, it is assumed to be descreasing. As a result, this function - finds troughs where the signal begins to increase after either decreasing + finds troughs where the signal begins to increase after either decreasing or remaining constant """ diff_dat = diff(dat) increasing = zeros(len(diff_dat)) increasing[diff_dat > 0] = 1 # mask for all points where dat is increasing flipping = diff(increasing) # peaks are -1, troughs are 1, the rest is zero - + target = -1 if not troughs else 1 - + return where(flipping == target)[0] + 1 @@ -1708,14 +1709,14 @@ def detect_events(dat, method, value=None): below_det = dat < value[1] between_det = logical_and(above_det, below_det) detected = _detect_start_end(between_det) - + if method == 'custom': detected = _detect_start_end(dat) if detected is None: return None - - if method in ['above_thresh', 'custom']: + + if method in ['above_thresh', 'custom']: # add the location of the peak in the middle detected = insert(detected, 1, 0, axis=1) for i in detected: @@ -1768,7 +1769,7 @@ def select_events(dat, detected, method, value): positive = dat >= 0 below_sel_positive = invert(logical_and(below_sel, positive)) detected = _select_period(detected, below_sel_positive) - + return detected @@ -1838,10 +1839,10 @@ def within_duration(events, time, limits): N x M matrix with start sample first and end samples last on M """ min_dur = max_dur = ones(events.shape[0], dtype=bool) - + if limits[0] is not None: min_dur = time[events[:, -1] - 1] - time[events[:, 0]] >= limits[0] - + if limits[1] is not None: max_dur = time[events[:, -1] - 1] - time[events[:, 0]] <= limits[1] @@ -1849,7 +1850,7 @@ def within_duration(events, time, limits): def remove_straddlers(events, time, s_freq, tolerance=0.1): - """Reject an event if it straddles a stitch, by comparing its + """Reject an event if it straddles a stitch, by comparing its duration to its timespan. Parameters @@ -1870,9 +1871,9 @@ def remove_straddlers(events, time, s_freq, tolerance=0.1): """ dur = (events[:, -1] - 1 - events[:, 0]) / s_freq continuous = time[events[:, -1] - 1] - time[events[:, 0]] - dur < tolerance - + return events[continuous, :] - + def power_ratio(events, dat, s_freq, limits, ratio_thresh): @@ -2132,7 +2133,7 @@ def _detect_start_end(true_values): N x 2 matrix with starting and ending times. """ neg = zeros((1), dtype='bool') - int_values = asarray(concatenate((neg, true_values[:-1], neg)), + int_values = asarray(concatenate((neg, true_values[:-1], neg)), dtype='int') # must discard last value to avoid axis out of bounds cross_threshold = diff(int_values) @@ -2209,7 +2210,7 @@ def _merge_close(dat, events, time, min_interval): """ if not events.any(): return events - + no_merge = time[events[1:, 0] - 1] - time[events[:-1, 2]] >= min_interval if no_merge.any(): @@ -2240,7 +2241,7 @@ def _wmorlet(f0, sd, sampling_rate, ns=5): sd : standard deviation of frequency sampling_rate : samplingrate ns : window length in number of standard deviations - + Returns ------- ndarray diff --git a/wonambi/ioeeg/abf.py b/wonambi/ioeeg/abf.py index d604c95..460963a 100644 --- a/wonambi/ioeeg/abf.py +++ b/wonambi/ioeeg/abf.py @@ -2,7 +2,7 @@ Adapted from axonrawio.py in python-neo. Strongly simplified. """ from datetime import datetime, timedelta -from numpy import c_, empty, float64, NaN, memmap, dtype, newaxis, array +from numpy import c_, empty, float64, nan, memmap, dtype, newaxis, array from os import SEEK_SET from struct import unpack, calcsize @@ -123,13 +123,13 @@ def return_dat(self, chan, begsam, endsam): if begsam < 0: pad = empty((dat.shape[0], 0 - begsam)) - pad.fill(NaN) + pad.fill(nan) dat = c_[pad, dat] if endsam >= self.n_samples: pad = empty((dat.shape[0], endsam - self.n_samples)) - pad.fill(NaN) + pad.fill(nan) dat = c_[dat, pad] return dat diff --git a/wonambi/ioeeg/bci2000.py b/wonambi/ioeeg/bci2000.py index f79b40c..06a77d4 100644 --- a/wonambi/ioeeg/bci2000.py +++ b/wonambi/ioeeg/bci2000.py @@ -12,7 +12,7 @@ empty, hstack, ndarray, - NaN, + nan, vstack, where, dtype, @@ -142,13 +142,13 @@ def return_dat(self, chan, begsam, endsam): if begsam < 0: pad = empty((dat.shape[0], 0 - begsam)) - pad.fill(NaN) + pad.fill(nan) dat = c_[pad, dat] if endsam >= self.n_samples: pad = empty((dat.shape[0], endsam - self.n_samples)) - pad.fill(NaN) + pad.fill(nan) dat = c_[dat, pad] return dat[chan, :] * self.gain[chan][:, None] # apply gain diff --git a/wonambi/ioeeg/blackrock.py b/wonambi/ioeeg/blackrock.py index f222fb6..1c61f81 100644 --- a/wonambi/ioeeg/blackrock.py +++ b/wonambi/ioeeg/blackrock.py @@ -5,7 +5,7 @@ from struct import unpack from pathlib import Path -from numpy import (asarray, empty, expand_dims, fromfile, iinfo, NaN, ones, +from numpy import (asarray, empty, expand_dims, fromfile, iinfo, nan, ones, reshape, where) lg = getLogger(__name__) @@ -178,12 +178,12 @@ def _read_nsx(filename, BOData, sess_begin, sess_end, factor, begsam, endsam): ----- Tested on NEURALCD - It returns NaN if you select an interval outside of the data + It returns nan if you select an interval outside of the data """ n_chan = factor.shape[0] dat = empty((n_chan, endsam - begsam)) - dat.fill(NaN) + dat.fill(nan) sess_to_read = where((begsam < sess_end) & (endsam > sess_begin))[0] diff --git a/wonambi/ioeeg/brainvision.py b/wonambi/ioeeg/brainvision.py index 6963a16..a10ae65 100644 --- a/wonambi/ioeeg/brainvision.py +++ b/wonambi/ioeeg/brainvision.py @@ -6,7 +6,7 @@ array, c_, empty, - NaN, + nan, float64, ) import wonambi @@ -204,13 +204,13 @@ def _read_memmap(filename, dat_shape, begsam, endsam, datatype='double', if begsam < 0: pad = empty((dat.shape[0], 0 - begsam)) - pad.fill(NaN) + pad.fill(nan) dat = c_[pad, dat] if endsam >= n_samples: pad = empty((dat.shape[0], endsam - n_samples)) - pad.fill(NaN) + pad.fill(nan) dat = c_[dat, pad] return dat diff --git a/wonambi/ioeeg/edf.py b/wonambi/ioeeg/edf.py index 0afd634..a1aa499 100644 --- a/wonambi/ioeeg/edf.py +++ b/wonambi/ioeeg/edf.py @@ -15,7 +15,7 @@ iinfo, ones, max, - NaN, + nan, newaxis, repeat, ) @@ -199,7 +199,7 @@ def return_dat(self, chan, begsam, endsam): assert begsam < endsam dat = empty((len(chan), endsam - begsam)) - dat.fill(NaN) + dat.fill(nan) with self.filename.open('rb') as f: @@ -239,14 +239,14 @@ def _read_record(self, f, blk, chans): f.seek(offset) x = fromfile(f, count=n_smp_per_chan, dtype=EDF_FORMAT) - ratio = self.max_smp / n_smp_per_chan + ratio = self.max_smp / n_smp_per_chan if ratio.is_integer(): - dat_in_rec[i_ch_in_dat, :] = repeat(x, int(ratio)) + dat_in_rec[i_ch_in_dat, :] = repeat(x, int(ratio)) else: fract = round(Fraction(ratio), 2) up, down = fract.numerator, fract.denominator dat_in_rec[i_ch_in_dat, :] = resample_poly(x, up, down) - i_ch_in_dat += 1 + i_ch_in_dat += 1 return dat_in_rec @@ -283,7 +283,7 @@ def return_markers(self): return markers -def write_edf(data, filename, subj_id='X X X X', physical_max=1000, +def write_edf(data, filename, subj_id='X X X X', physical_max=1000, physical_min=None): """Export data to FieldTrip. @@ -321,7 +321,7 @@ def write_edf(data, filename, subj_id='X X X X', physical_max=1000, if physical_min is None: physical_min = -1 * physical_max - + dat = data.data[0] / physical_max * DIGITAL_MAX dat = dat.astype(EDF_FORMAT) dat[dat > DIGITAL_MAX] = DIGITAL_MAX diff --git a/wonambi/ioeeg/eeglab.py b/wonambi/ioeeg/eeglab.py index a8a1e12..3181fbf 100644 --- a/wonambi/ioeeg/eeglab.py +++ b/wonambi/ioeeg/eeglab.py @@ -1,5 +1,5 @@ from datetime import datetime -from numpy import c_, NaN, empty, memmap, float64 +from numpy import c_, nan, empty, memmap, float64 from pathlib import Path from scipy.io import loadmat @@ -86,13 +86,13 @@ def return_hdr(self): EEG_starttime = [int(x[0]) for x in EEG['etc']['T0']] start_time = datetime(*EEG_starttime) elif 'rec_startdate' in list(EEG['etc']): - EEG_starttime = EEG['etc']['rec_startdate'][()] - start_time_char = EEG_starttime.tobytes().decode('utf-16-le') - start_time = datetime.fromisoformat(start_time_char) + EEG_starttime = EEG['etc']['rec_startdate'][()] + start_time_char = EEG_starttime.tobytes().decode('utf-16-le') + start_time = datetime.fromisoformat(start_time_char) #EEG_starttime = EEG['etc']['rec_startdate'] - + #start_time_char = ''.join([chr(x) for x in EEG_starttime]) #start_time = datetime.fromisoformat(start_time_char) else: @@ -128,13 +128,13 @@ def return_dat(self, chan, begsam, endsam): if begsam < 0: pad = empty((dat.shape[0], 0 - begsam)) - pad.fill(NaN) + pad.fill(nan) dat = c_[pad, dat] if endsam >= n_samples: pad = empty((dat.shape[0], endsam - n_samples)) - pad.fill(NaN) + pad.fill(nan) dat = c_[dat, pad] return dat[chan, :] diff --git a/wonambi/ioeeg/egimff.py b/wonambi/ioeeg/egimff.py index 6d3a29f..70b2e0b 100644 --- a/wonambi/ioeeg/egimff.py +++ b/wonambi/ioeeg/egimff.py @@ -5,7 +5,7 @@ from struct import unpack from xml.etree.ElementTree import parse -from numpy import (append, asarray, cumsum, diff, empty, NaN, sum, +from numpy import (append, asarray, cumsum, diff, empty, nan, sum, where, ndarray, unique) from .utils import DEFAULT_DATETIME @@ -130,7 +130,7 @@ def return_dat(self, chan, begsam, endsam): assert begsam < endsam data = empty((len(chan), endsam - begsam)) - data.fill(NaN) + data.fill(nan) chan = asarray(chan) diff --git a/wonambi/ioeeg/fieldtrip.py b/wonambi/ioeeg/fieldtrip.py index 19bf9e3..ae8c383 100644 --- a/wonambi/ioeeg/fieldtrip.py +++ b/wonambi/ioeeg/fieldtrip.py @@ -1,6 +1,6 @@ from datetime import datetime from logging import getLogger -from numpy import around, c_, empty, float64, NaN +from numpy import around, c_, empty, float64, nan from scipy.io import loadmat, savemat from .utils import read_hdf5_chan_name @@ -9,7 +9,7 @@ try: from h5py import File except ImportError as err: - File = MissingDependency(err) + File = MissingDependency(err) lg = getLogger(__name__) VAR = 'data' @@ -80,8 +80,8 @@ def return_hdr(self): s_freq = int(f[VAR]['fsample'].value.squeeze()) except: s_freq = int(f[VAR]['fsample'][0].squeeze()) - - + + chan_name = read_hdf5_chan_name(f, f[VAR]['label']) n_samples = int(around(f[f[VAR]['trial'][0].item()].shape[0])) @@ -106,14 +106,14 @@ def return_dat(self, chan, begsam, endsam): """ TRL = 0 - + try: ft_data = loadmat(self.filename, struct_as_record=True, squeeze_me=True) - + n_samples = ft_data['shape'][1] ft_data = ft_data[VAR] - + data = ft_data['trial'].item(TRL) @@ -126,17 +126,17 @@ def return_dat(self, chan, begsam, endsam): except: data = f[f[VAR]['trial'][TRL].item()][:].T n_samples = data.shape[1] - + dat = data[:, begsam:endsam] - + if begsam < 0: pad = empty((dat.shape[0], 0 - begsam)) - pad.fill(NaN) + pad.fill(nan) dat = c_[pad, dat] if endsam >= n_samples: pad = empty((dat.shape[0], endsam - n_samples)) - pad.fill(NaN) + pad.fill(nan) dat = c_[dat, pad] return dat[chan, :] diff --git a/wonambi/ioeeg/ktlx.py b/wonambi/ioeeg/ktlx.py index 2c68106..abbbb04 100644 --- a/wonambi/ioeeg/ktlx.py +++ b/wonambi/ioeeg/ktlx.py @@ -31,7 +31,7 @@ expand_dims, fromfile, int32, - NaN, + nan, ones, unpackbits, where, @@ -541,7 +541,7 @@ def _read_erd(erd_file, begsam, endsam): When we save the data as memory-mapped, we only save the real channels. However, the data in the output have both shorted and non-shorted channels. - Shorted channels have NaN's only. + Shorted channels have nan's only. About the actual implementation, we always follow the python convention that the first sample is included and the last sample is not. @@ -562,7 +562,7 @@ def _read_erd(erd_file, begsam, endsam): n_smp = endsam - begsam data = empty((n_allchan, n_smp)) - data.fill(NaN) + data.fill(nan) # it includes the sample in both cases etc = _read_etc(erd_file.with_suffix('.etc')) @@ -594,11 +594,11 @@ def _read_erd(erd_file, begsam, endsam): data[:, d1:d2] = dat[:, begpos_rec:endpos_rec] - # fill up the output data, put NaN for shorted channels + # fill up the output data, put nan for shorted channels if n_shorted > 0: full_channels = where(asarray([x == 0 for x in shorted]))[0] output = empty((n_allchan, n_smp)) - output.fill(NaN) + output.fill(nan) output[full_channels, :] = data else: output = data @@ -907,7 +907,7 @@ def return_dat(self, chan, begsam, endsam): Returns ------- ndarray - 2-d matrix with data (might contain NaN) + 2-d matrix with data (might contain nan) Notes ----- @@ -920,7 +920,7 @@ def return_dat(self, chan, begsam, endsam): hundreds samples are nan. """ dat = empty((len(chan), endsam - begsam)) - dat.fill(NaN) + dat.fill(nan) stc, all_stamp = _read_stc(self._filename.with_suffix('.stc')) diff --git a/wonambi/ioeeg/micromed.py b/wonambi/ioeeg/micromed.py index 63f9426..797e486 100644 --- a/wonambi/ioeeg/micromed.py +++ b/wonambi/ioeeg/micromed.py @@ -3,7 +3,7 @@ from logging import getLogger from struct import unpack -from numpy import array, dtype, empty, fromfile, iinfo, memmap, NaN, pad, where +from numpy import array, dtype, empty, fromfile, iinfo, memmap, nan, pad, where from numpy.lib.recfunctions import append_fields N_ZONES = 15 @@ -94,7 +94,7 @@ def return_dat(self, chan, begsam, endsam): if (begsam >= self._n_smp) or (endsam < 0): dat = empty((len(chan), endsam - begsam)) - dat.fill(NaN) + dat.fill(nan) return dat if begsam < 0: @@ -116,7 +116,7 @@ def return_dat(self, chan, begsam, endsam): shape=dshape, offset=offset).astype('float') dat = pad(dat[chan, :], ((0, 0), (begpad, endpad)), mode='constant', - constant_values=NaN) + constant_values=nan) return (dat - self._offset[chan, None]) * self._factors[chan, None] diff --git a/wonambi/ioeeg/moberg.py b/wonambi/ioeeg/moberg.py index 008b53b..1d7ebdd 100644 --- a/wonambi/ioeeg/moberg.py +++ b/wonambi/ioeeg/moberg.py @@ -2,7 +2,7 @@ from xml.etree.ElementTree import parse from datetime import datetime, timedelta, timezone -from numpy import NaN, pad, reshape, zeros +from numpy import nan, pad, reshape, zeros TIMEZONE = timezone.utc # 24bit precision @@ -136,7 +136,7 @@ def return_dat(self, chan, begsam, endsam): dat = reshape(dat, (self.n_chan, -1), 'F') dat = self.convertion(dat[chan, :]) dat = pad(dat, ((0, 0), (begpad, endpad)), - mode='constant', constant_values=NaN) + mode='constant', constant_values=nan) return dat diff --git a/wonambi/ioeeg/openephys.py b/wonambi/ioeeg/openephys.py index 45c7d9c..a9f6944 100644 --- a/wonambi/ioeeg/openephys.py +++ b/wonambi/ioeeg/openephys.py @@ -13,7 +13,7 @@ from re import search, match from xml.etree import ElementTree -from numpy import array, empty, NaN, fromfile, unique, where, arange, hstack, vstack, concatenate +from numpy import array, empty, nan, fromfile, unique, where, arange, hstack, vstack, concatenate lg = getLogger(__name__) @@ -156,7 +156,7 @@ def return_dat(self, chan, begsam, endsam): """ data_length = endsam - begsam dat = empty((len(chan), data_length)) - dat.fill(NaN) + dat.fill(nan) all_blocks = _select_blocks(self.blocks_dat, begsam, endsam) for i_chan, sel_chan in enumerate(chan): diff --git a/wonambi/ioeeg/wonambi.py b/wonambi/ioeeg/wonambi.py index fe61ed1..1148e46 100644 --- a/wonambi/ioeeg/wonambi.py +++ b/wonambi/ioeeg/wonambi.py @@ -3,7 +3,7 @@ from datetime import datetime, timedelta from json import dump, load from pathlib import Path -from numpy import c_, empty, float64, NaN, memmap +from numpy import c_, empty, float64, nan, memmap class Wonambi: @@ -72,7 +72,7 @@ def return_dat(self, chan, begsam, endsam): Notes ----- - When asking for an interval outside the data boundaries, it returns NaN + When asking for an interval outside the data boundaries, it returns nan for those values. It then converts the memmap to a normal numpy array, I think, and so it reads the data into memory. However, I'm not 100% sure that this is what happens. @@ -90,13 +90,13 @@ def return_dat(self, chan, begsam, endsam): if begsam < 0: pad = empty((dat.shape[0], 0 - begsam)) - pad.fill(NaN) + pad.fill(nan) dat = c_[pad, dat] if endsam >= n_smp: pad = empty((dat.shape[0], endsam - n_smp)) - pad.fill(NaN) + pad.fill(nan) dat = c_[dat, pad] return dat