Skip to content

Utilities API Reference

This module contains helper functions and utilities for nelpy.

PrettyBytes

Bases: int

Prints number of bytes in a more readable format

Source code in nelpy/utils.py
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
class PrettyBytes(int):
    """Prints number of bytes in a more readable format"""

    def __init__(self, val):
        self.val = val

    def __str__(self):
        if self.val < 1024:
            return "{} bytes".format(self.val)
        elif self.val < 1024**2:
            return "{:.3f} kilobytes".format(self.val / 1024)
        elif self.val < 1024**3:
            return "{:.3f} megabytes".format(self.val / 1024**2)
        elif self.val < 1024**4:
            return "{:.3f} gigabytes".format(self.val / 1024**3)

    def __repr__(self):
        return self.__str__()

PrettyDuration

Bases: float

Time duration with pretty print.

Behaves like a float, and can always be cast to a float.

Source code in nelpy/utils.py
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
class PrettyDuration(float):
    """Time duration with pretty print.

    Behaves like a float, and can always be cast to a float.
    """

    def __init__(self, seconds):
        self.duration = seconds

    def __str__(self):
        return self.time_string(self.duration)

    def __repr__(self):
        return self.time_string(self.duration)

    @staticmethod
    def to_dhms(seconds):
        """
        Convert seconds into days, hours, minutes, seconds, and milliseconds.

        Parameters
        ----------
        seconds : float
            Time duration in seconds.

        Returns
        -------
        namedtuple
            Named tuple with fields: pos (bool), dd (days), hh (hours),
            mm (minutes), ss (seconds), ms (milliseconds).

        Examples
        --------
        >>> PrettyDuration.to_dhms(3661.5)
        Time(pos=True, dd=0, hh=1, mm=1, ss=1, ms=500.0)
        """
        pos = seconds >= 0
        if not pos:
            seconds = -seconds
        ms = seconds % 1
        ms = round(ms * 10000) / 10
        seconds = floor(seconds)
        m, s = divmod(seconds, 60)
        h, m = divmod(m, 60)
        d, h = divmod(h, 24)
        Time = namedtuple("Time", "pos dd hh mm ss ms")
        time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms)
        return time

    @staticmethod
    def time_string(seconds):
        """
        Return a formatted time string.

        Parameters
        ----------
        seconds : float
            Time duration in seconds.

        Returns
        -------
        str
            Formatted time string (e.g., "1:01:01.5 hours", "30.5 seconds").

        Examples
        --------
        >>> PrettyDuration.time_string(3661.5)
        '1:01:01.5 hours'
        >>> PrettyDuration.time_string(30.5)
        '30.5 seconds'
        """
        if np.isinf(seconds):
            return "inf"
        pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds)
        if s > 0:
            if mm == 0:
                # in this case, represent milliseconds in terms of
                # seconds (i.e. a decimal)
                sstr = str(s / 1000).lstrip("0")
                if s >= 999.5:
                    ss += 1
                    s = 0
                    sstr = ""
                    # now propagate the carry:
                    if ss == 60:
                        mm += 1
                        ss = 0
                    if mm == 60:
                        hh += 1
                        mm = 0
                    if hh == 24:
                        dd += 1
                        hh = 0
            else:
                # for all other cases, milliseconds will be represented
                # as an integer
                if s >= 999.5:
                    ss += 1
                    s = 0
                    sstr = ""
                    # now propagate the carry:
                    if ss == 60:
                        mm += 1
                        ss = 0
                    if mm == 60:
                        hh += 1
                        mm = 0
                    if hh == 24:
                        dd += 1
                        hh = 0
                else:
                    sstr = ":{:03d}".format(int(s))
        else:
            sstr = ""
        if dd > 0:
            daystr = "{:01d} days ".format(dd)
        else:
            daystr = ""
        if hh > 0:
            timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr)
        elif mm > 0:
            timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr)
        elif ss > 0:
            timestr = daystr + "{:01d}{} seconds".format(ss, sstr)
        else:
            timestr = daystr + "{} milliseconds".format(s)
        if not pos:
            timestr = "-" + timestr
        return timestr

    def __add__(self, other):
        """
        Add another value to this duration.

        Parameters
        ----------
        other : float or PrettyDuration
            Value to add.

        Returns
        -------
        PrettyDuration
            New duration object.
        """
        return PrettyDuration(self.duration + other)

    def __radd__(self, other):
        """
        Add this duration to another value (right addition).

        Parameters
        ----------
        other : float or PrettyDuration
            Value to add to.

        Returns
        -------
        PrettyDuration
            New duration object.
        """
        return self.__add__(other)

    def __sub__(self, other):
        """
        Subtract another value from this duration.

        Parameters
        ----------
        other : float or PrettyDuration
            Value to subtract.

        Returns
        -------
        PrettyDuration
            New duration object.
        """
        return PrettyDuration(self.duration - other)

    def __rsub__(self, other):
        """
        Subtract this duration from another value (right subtraction).

        Parameters
        ----------
        other : float or PrettyDuration
            Value to subtract from.

        Returns
        -------
        PrettyDuration
            New duration object.
        """
        return other - self.duration

    def __mul__(self, other):
        """
        Multiply this duration by another value.

        Parameters
        ----------
        other : float
            Value to multiply by.

        Returns
        -------
        PrettyDuration
            New duration object.
        """
        return PrettyDuration(self.duration * other)

    def __rmul__(self, other):
        """
        Multiply another value by this duration (right multiplication).

        Parameters
        ----------
        other : float
            Value to multiply.

        Returns
        -------
        PrettyDuration
            New duration object.
        """
        return self.__mul__(other)

    def __truediv__(self, other):
        """
        Divide this duration by another value.

        Parameters
        ----------
        other : float
            Value to divide by.

        Returns
        -------
        PrettyDuration
            New duration object.
        """
        return PrettyDuration(self.duration / other)

time_string(seconds) staticmethod

Return a formatted time string.

Parameters:

Name Type Description Default
seconds float

Time duration in seconds.

required

Returns:

Type Description
str

Formatted time string (e.g., "1:01:01.5 hours", "30.5 seconds").

Examples:

>>> PrettyDuration.time_string(3661.5)
'1:01:01.5 hours'
>>> PrettyDuration.time_string(30.5)
'30.5 seconds'
Source code in nelpy/utils.py
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
@staticmethod
def time_string(seconds):
    """
    Return a formatted time string.

    Parameters
    ----------
    seconds : float
        Time duration in seconds.

    Returns
    -------
    str
        Formatted time string (e.g., "1:01:01.5 hours", "30.5 seconds").

    Examples
    --------
    >>> PrettyDuration.time_string(3661.5)
    '1:01:01.5 hours'
    >>> PrettyDuration.time_string(30.5)
    '30.5 seconds'
    """
    if np.isinf(seconds):
        return "inf"
    pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds)
    if s > 0:
        if mm == 0:
            # in this case, represent milliseconds in terms of
            # seconds (i.e. a decimal)
            sstr = str(s / 1000).lstrip("0")
            if s >= 999.5:
                ss += 1
                s = 0
                sstr = ""
                # now propagate the carry:
                if ss == 60:
                    mm += 1
                    ss = 0
                if mm == 60:
                    hh += 1
                    mm = 0
                if hh == 24:
                    dd += 1
                    hh = 0
        else:
            # for all other cases, milliseconds will be represented
            # as an integer
            if s >= 999.5:
                ss += 1
                s = 0
                sstr = ""
                # now propagate the carry:
                if ss == 60:
                    mm += 1
                    ss = 0
                if mm == 60:
                    hh += 1
                    mm = 0
                if hh == 24:
                    dd += 1
                    hh = 0
            else:
                sstr = ":{:03d}".format(int(s))
    else:
        sstr = ""
    if dd > 0:
        daystr = "{:01d} days ".format(dd)
    else:
        daystr = ""
    if hh > 0:
        timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr)
    elif mm > 0:
        timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr)
    elif ss > 0:
        timestr = daystr + "{:01d}{} seconds".format(ss, sstr)
    else:
        timestr = daystr + "{} milliseconds".format(s)
    if not pos:
        timestr = "-" + timestr
    return timestr

to_dhms(seconds) staticmethod

Convert seconds into days, hours, minutes, seconds, and milliseconds.

Parameters:

Name Type Description Default
seconds float

Time duration in seconds.

required

Returns:

Type Description
namedtuple

Named tuple with fields: pos (bool), dd (days), hh (hours), mm (minutes), ss (seconds), ms (milliseconds).

Examples:

>>> PrettyDuration.to_dhms(3661.5)
Time(pos=True, dd=0, hh=1, mm=1, ss=1, ms=500.0)
Source code in nelpy/utils.py
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
@staticmethod
def to_dhms(seconds):
    """
    Convert seconds into days, hours, minutes, seconds, and milliseconds.

    Parameters
    ----------
    seconds : float
        Time duration in seconds.

    Returns
    -------
    namedtuple
        Named tuple with fields: pos (bool), dd (days), hh (hours),
        mm (minutes), ss (seconds), ms (milliseconds).

    Examples
    --------
    >>> PrettyDuration.to_dhms(3661.5)
    Time(pos=True, dd=0, hh=1, mm=1, ss=1, ms=500.0)
    """
    pos = seconds >= 0
    if not pos:
        seconds = -seconds
    ms = seconds % 1
    ms = round(ms * 10000) / 10
    seconds = floor(seconds)
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    d, h = divmod(h, 24)
    Time = namedtuple("Time", "pos dd hh mm ss ms")
    time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms)
    return time

PrettyInt

Bases: int

Prints integers in a more readable format

Source code in nelpy/utils.py
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
class PrettyInt(int):
    """Prints integers in a more readable format"""

    def __init__(self, val):
        self.val = val

    def __str__(self):
        return "{:,}".format(self.val)

    def __repr__(self):
        return "{:,}".format(self.val)

argsort(seq)

Return indices that would sort a sequence.

Parameters:

Name Type Description Default
seq sequence

Sequence to sort (list, tuple, array, etc.).

required

Returns:

Name Type Description
indices list

List of indices that would sort the sequence.

Examples:

>>> argsort([3, 1, 4, 1, 5])
[1, 3, 0, 2, 4]
>>> seq = ["c", "a", "b"]
>>> [seq[i] for i in argsort(seq)]
['a', 'b', 'c']
Notes

Based on http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python

Source code in nelpy/utils.py
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
def argsort(seq):
    """
    Return indices that would sort a sequence.

    Parameters
    ----------
    seq : sequence
        Sequence to sort (list, tuple, array, etc.).

    Returns
    -------
    indices : list
        List of indices that would sort the sequence.

    Examples
    --------
    >>> argsort([3, 1, 4, 1, 5])
    [1, 3, 0, 2, 4]
    >>> seq = ["c", "a", "b"]
    >>> [seq[i] for i in argsort(seq)]
    ['a', 'b', 'c']

    Notes
    -----
    Based on http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
    """
    return sorted(range(len(seq)), key=seq.__getitem__)

asa_indices_within_epochs(asa, intervalarray)

Return indices of ASA within epochs.

Parameters:

Name Type Description Default
asa AnalogSignalArray

AnalogSignalArray object.

required
intervalarray IntervalArray

IntervalArray containing epochs of interest.

required

Returns:

Name Type Description
indices ndarray

Array of shape (n_epochs, 2) containing [start, stop] indices for each epoch, so that data can be associated with asa._data[:,start:stop] for each epoch.

Examples:

>>> epochs = nelpy.EpochArray([[0, 10], [20, 30]])
>>> indices = asa_indices_within_epochs(asa, epochs)
>>> data_in_epochs = [asa._data[:, start:stop] for start, stop in indices]
Source code in nelpy/utils.py
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def asa_indices_within_epochs(asa, intervalarray):
    """
    Return indices of ASA within epochs.

    Parameters
    ----------
    asa : AnalogSignalArray
        AnalogSignalArray object.
    intervalarray : IntervalArray
        IntervalArray containing epochs of interest.

    Returns
    -------
    indices : np.ndarray
        Array of shape (n_epochs, 2) containing [start, stop] indices
        for each epoch, so that data can be associated with
        asa._data[:,start:stop] for each epoch.

    Examples
    --------
    >>> epochs = nelpy.EpochArray([[0, 10], [20, 30]])
    >>> indices = asa_indices_within_epochs(asa, epochs)
    >>> data_in_epochs = [asa._data[:, start:stop] for start, stop in indices]
    """
    indices = []
    intervalarray = intervalarray[asa.support]
    for interval in intervalarray.merge().data:
        a_start = interval[0]
        a_stop = interval[1]
        frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop))
        indices.append((frm, to))
    indices = np.array(indices, ndmin=2)

    return indices

cartesian(xcenters, ycenters)

Find every combination of elements in two arrays (Cartesian product).

Parameters:

Name Type Description Default
xcenters ndarray

First array of values.

required
ycenters ndarray

Second array of values.

required

Returns:

Name Type Description
cartesian ndarray

Array of shape (n_samples, 2) containing all combinations.

Examples:

>>> x = np.array([1, 2])
>>> y = np.array([3, 4, 5])
>>> cartesian(x, y)
array([[1, 3],
       [1, 4],
       [1, 5],
       [2, 3],
       [2, 4],
       [2, 5]])
Source code in nelpy/utils.py
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
def cartesian(xcenters, ycenters):
    """
    Find every combination of elements in two arrays (Cartesian product).

    Parameters
    ----------
    xcenters : np.ndarray
        First array of values.
    ycenters : np.ndarray
        Second array of values.

    Returns
    -------
    cartesian : np.ndarray
        Array of shape (n_samples, 2) containing all combinations.

    Examples
    --------
    >>> x = np.array([1, 2])
    >>> y = np.array([3, 4, 5])
    >>> cartesian(x, y)
    array([[1, 3],
           [1, 4],
           [1, 5],
           [2, 3],
           [2, 4],
           [2, 5]])
    """
    return np.transpose(
        [np.tile(xcenters, len(ycenters)), np.repeat(ycenters, len(xcenters))]
    )

collapse_time(obj, gap=0)

Collapse all epochs in an object into a single, contiguous object.

Parameters:

Name Type Description Default
obj SpikeTrainArray or AnalogSignalArray

Object with multiple epochs to collapse.

required
gap float

Gap to insert between epochs in the collapsed object. Default is 0.

0

Returns:

Type Description
obj

New object of the same type with all epochs collapsed into a single contiguous epoch.

Examples:

>>> # Collapse a SpikeTrainArray with multiple epochs
>>> collapsed_st = collapse_time(spiketrain)
>>> # Collapse an AnalogSignalArray with gaps between epochs
>>> collapsed_asa = collapse_time(analogsignal, gap=0.1)
Notes

For SpikeTrainArrays, gaps are not yet supported. The function adjusts spike times or signal times to create a continuous timeline while preserving the original epoch boundaries information.

Source code in nelpy/utils.py
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
def collapse_time(obj, gap=0):
    """
    Collapse all epochs in an object into a single, contiguous object.

    Parameters
    ----------
    obj : SpikeTrainArray or AnalogSignalArray
        Object with multiple epochs to collapse.
    gap : float, optional
        Gap to insert between epochs in the collapsed object. Default is 0.

    Returns
    -------
    obj
        New object of the same type with all epochs collapsed into a single
        contiguous epoch.

    Examples
    --------
    >>> # Collapse a SpikeTrainArray with multiple epochs
    >>> collapsed_st = collapse_time(spiketrain)
    >>> # Collapse an AnalogSignalArray with gaps between epochs
    >>> collapsed_asa = collapse_time(analogsignal, gap=0.1)

    Notes
    -----
    For SpikeTrainArrays, gaps are not yet supported.
    The function adjusts spike times or signal times to create a continuous
    timeline while preserving the original epoch boundaries information.
    """

    # TODO: redo SpikeTrainArray so as to keep the epochs separate!, and to support gaps!

    # We'll have to ajust all the spikes per epoch... and we'll have to compute a new support. Also set a flag!

    # If it's a SpikeTrainArray, then we left-shift the spike times. If it's an AnalogSignalArray, then we
    # left-shift the time and tdata.

    # Also set a new attribute, with the boundaries in seconds.

    if isinstance(obj, core.RegularlySampledAnalogSignalArray):
        new_obj = type(obj)(empty=True)
        new_obj._data = obj._data

        durations = obj.support.durations
        starts = np.insert(np.cumsum(durations + gap), 0, 0)[:-1]
        stops = starts + durations
        newsupport = type(obj._abscissa.support)(np.vstack((starts, stops)).T)
        new_obj._support = newsupport

        new_time = obj.time.astype(float)  # fast copy
        time_idx = np.insert(np.cumsum(obj.lengths), 0, 0)

        new_offset = 0
        for epidx in range(obj.n_epochs):
            if epidx > 0:
                new_time[time_idx[epidx] : time_idx[epidx + 1]] = (
                    new_time[time_idx[epidx] : time_idx[epidx + 1]]
                    - obj.time[time_idx[epidx]]
                    + new_offset
                    + gap
                )
                new_offset += durations[epidx] + gap
            else:
                new_time[time_idx[epidx] : time_idx[epidx + 1]] = (
                    new_time[time_idx[epidx] : time_idx[epidx + 1]]
                    - obj.time[time_idx[epidx]]
                    + new_offset
                )
                new_offset += durations[epidx]
        new_obj._time = new_time

        new_obj._fs = obj._fs

    elif isinstance(obj, core.EventArray):
        if gap > 0:
            raise ValueError("gaps not supported for SpikeTrainArrays yet!")
        new_obj = type(obj)(empty=True)
        new_time = [[] for _ in range(obj.n_series)]
        duration = 0
        for st_ in obj:
            le = st_.support.start
            for unit_ in range(obj.n_series):
                new_time[unit_].extend(st_._data[unit_] - le + duration)
            duration += st_.support.duration
        new_time = np.asanyarray(
            [np.asanyarray(unittime) for unittime in new_time], dtype=object
        )
        new_obj._data = new_time
        new_obj.support = type(obj._abscissa.support)([0, duration])
        new_obj._series_ids = obj._series_ids
        new_obj._series_labels = obj._series_labels
        new_obj._series_tags = obj._series_tags
    elif isinstance(obj, core.BinnedEventArray):
        raise NotImplementedError(
            "BinnedEventArrays are not yet supported, but bst.data is essentially already collapsed!"
        )
    else:
        raise TypeError("unsupported type for collapse_time")

    return new_obj

ddt_asa(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None, norm=False)

Numerical differentiation of a regularly sampled AnalogSignalArray.

Optionally also smooths result with a Gaussian kernel.

Smoothing is applied in time, and the same smoothing is applied to each signal in the AnalogSignalArray.

Differentiation, (and if requested, smoothing) is applied within each epoch.

Parameters:

Name Type Description Default
asa RegularlySampledAnalogSignalArray

Input object.

required
fs float

Sampling rate (in Hz) of input RSASA. If not provided, it will be obtained from asa.fs.

None
smooth bool

If true, result will be smoothed. Default is False

False
rectify bool

If True, absolute value of derivative is computed. Default is True.

True
sigma float

Standard deviation of Gaussian kernel, in seconds. Default is 0.05 (50 ms).

None
truncate float

Bandwidth outside of which the filter value will be zero. Default is 4.0

None
norm

If True, then apply the L2 norm to the result.

False

Returns:

Name Type Description
out RegularlySampledAnalogSignalArray

A RegularlySampledAnalogSignalArray with derivative data (in units per second) is returned.

Notes

Central differences are used here.

Source code in nelpy/utils.py
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
@keyword_deprecation(replace_x_with_y={"bw": "truncate"})
def ddt_asa(
    asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None, norm=False
):
    """Numerical differentiation of a regularly sampled AnalogSignalArray.

    Optionally also smooths result with a Gaussian kernel.

    Smoothing is applied in time, and the same smoothing is applied to each
    signal in the AnalogSignalArray.

    Differentiation, (and if requested, smoothing) is applied within each epoch.

    Parameters
    ----------
    asa : nelpy.RegularlySampledAnalogSignalArray
        Input object.
    fs : float, optional
        Sampling rate (in Hz) of input RSASA. If not provided, it will be obtained
        from asa.fs.
    smooth : bool, optional
        If true, result will be smoothed. Default is False
    rectify : bool, optional
        If True, absolute value of derivative is computed. Default is True.
    sigma : float, optional
        Standard deviation of Gaussian kernel, in seconds. Default is 0.05
        (50 ms).
    truncate : float, optional
        Bandwidth outside of which the filter value will be zero. Default is 4.0
    norm: boolean, optional
        If True, then apply the L2 norm to the result.
    Returns
    -------
    out : nelpy.RegularlySampledAnalogSignalArray
        A RegularlySampledAnalogSignalArray with derivative data (in units
        per second) is returned.

    Notes
    -----
    Central differences are used here.
    """

    if not (
        isinstance(asa, core.RegularlySampledAnalogSignalArray)
        or isinstance(asa, core._analogsignalarray.PositionArray)
    ):
        raise TypeError("Input object must be a RegularlySampledAnalogSignalArray!")
    if fs is None:
        fs = asa.fs
    if sigma is None:
        sigma = 0.05  # 50 ms default

    out = asa.copy()
    cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)

    # ensure that datatype is float
    # TODO: this will break complex data
    out._data = out.data.astype(float)

    # now obtain the derivative for each epoch separately
    for idx in range(asa.n_epochs):
        # if 1D:
        if asa.n_signals == 1:
            if (cum_lengths[idx + 1] - cum_lengths[idx]) < 2:
                # only single sample
                out._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]] = 0
            else:
                out._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]] = np.gradient(
                    asa._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]], axis=1
                )
        else:
            if (cum_lengths[idx + 1] - cum_lengths[idx]) < 2:
                # only single sample
                out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]] = 0
            else:
                out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]] = np.gradient(
                    asa._data[:, cum_lengths[idx] : cum_lengths[idx + 1]], axis=1
                )

    out._data = out._data * fs

    if norm:
        out._data = np.atleast_2d(np.linalg.norm(out._data, axis=0))

    if rectify:
        out._data = np.abs(out._data)

    if smooth:
        out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)

    return out

dxdt_AnalogSignalArray(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None)

Numerical differentiation of a regularly sampled AnalogSignalArray.

Optionally also smooths result with a Gaussian kernel.

Smoothing is applied in time, and the same smoothing is applied to each signal in the AnalogSignalArray.

Differentiation, (and if requested, smoothing) is applied within each epoch.

Parameters:

Name Type Description Default
asa AnalogSignalArray
required
fs float

Sampling rate (in Hz) of AnalogSignalArray. If not provided, it will be obtained from asa.fs

None
smooth bool

If true, result will be smoothed. Default is False

False
rectify bool

If True, absolute value of derivative is computed. Default is True.

True
sigma float

Standard deviation of Gaussian kernel, in seconds. Default is 0.05 (50 ms).

None
truncate float

Bandwidth outside of which the filter value will be zero. Default is 4.0

None

Returns:

Name Type Description
out AnalogSignalArray

An AnalogSignalArray with derivative data (in units per second) is returned.

Source code in nelpy/utils.py
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
@keyword_deprecation(replace_x_with_y={"bw": "truncate"})
def dxdt_AnalogSignalArray(
    asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None
):
    """Numerical differentiation of a regularly sampled AnalogSignalArray.

    Optionally also smooths result with a Gaussian kernel.

    Smoothing is applied in time, and the same smoothing is applied to each
    signal in the AnalogSignalArray.

    Differentiation, (and if requested, smoothing) is applied within each epoch.

    Parameters
    ----------
    asa : AnalogSignalArray
    fs : float, optional
        Sampling rate (in Hz) of AnalogSignalArray. If not provided, it will
        be obtained from asa.fs
    smooth : bool, optional
        If true, result will be smoothed. Default is False
    rectify : bool, optional
        If True, absolute value of derivative is computed. Default is True.
    sigma : float, optional
        Standard deviation of Gaussian kernel, in seconds. Default is 0.05
        (50 ms).
    truncate : float, optional
        Bandwidth outside of which the filter value will be zero. Default is 4.0

    Returns
    -------
    out : AnalogSignalArray
        An AnalogSignalArray with derivative data (in units per second) is returned.
    """

    raise DeprecationWarning("use ddt_asa instead!")

    if fs is None:
        fs = asa.fs
    if fs is None:
        raise ValueError(
            "fs must either be specified, or must be contained in the AnalogSignalArray!"
        )
    if sigma is None:
        sigma = 0.05  # 50 ms default

    out = copy.deepcopy(asa)
    cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)

    # ensure that datatype is float
    out._data = out.data.astype(float)

    if asa.n_signals == 2:
        out._data = out._data[[0], :]

    # now obtain the derivative for each epoch separately
    for idx in range(asa.n_epochs):
        # if 1D:
        if asa.n_signals == 1:
            if (cum_lengths[idx + 1] - cum_lengths[idx]) < 2:
                # only single sample
                out._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]] = 0
            else:
                out._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]] = np.gradient(
                    asa._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]], axis=1
                )
        elif asa.n_signals == 2:
            if (cum_lengths[idx + 1] - cum_lengths[idx]) < 2:
                # only single sample
                out._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]] = 0
            else:
                out._data[[0], cum_lengths[idx] : cum_lengths[idx + 1]] = (
                    np.linalg.norm(
                        np.gradient(
                            asa._data[:, cum_lengths[idx] : cum_lengths[idx + 1]],
                            axis=1,
                        ),
                        axis=0,
                    )
                )
        else:
            raise TypeError("more than 2D not currently supported!")

    out._data = out._data * fs

    if rectify:
        out._data = np.abs(out._data)

    if smooth:
        out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)

    return out

find_nearest_idx(array, val)

Find the index of the nearest value in an array.

Parameters:

Name Type Description Default
array ndarray

Input array to search in.

required
val float

Value to find the nearest index for.

required

Returns:

Type Description
int

Index into array that is closest to val.

Examples:

>>> array = np.array([1, 3, 5, 7, 9])
>>> find_nearest_idx(array, 4)
1
>>> find_nearest_idx(array, 6)
2
Notes

TODO: A more efficient version using searchsorted should be incorporated: Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array

Source code in nelpy/utils.py
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
def find_nearest_idx(array, val):
    """
    Find the index of the nearest value in an array.

    Parameters
    ----------
    array : np.ndarray
        Input array to search in.
    val : float
        Value to find the nearest index for.

    Returns
    -------
    int
        Index into array that is closest to val.

    Examples
    --------
    >>> array = np.array([1, 3, 5, 7, 9])
    >>> find_nearest_idx(array, 4)
    1
    >>> find_nearest_idx(array, 6)
    2

    Notes
    -----
    TODO: A more efficient version using searchsorted should be incorporated:
    Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
    """
    return (np.abs(array - val)).argmin()

find_nearest_indices(array, vals)

Find indices of nearest values in an array for multiple values.

Parameters:

Name Type Description Default
array ndarray

Input array to search in.

required
vals ndarray

Array of values to find nearest indices for.

required

Returns:

Type Description
ndarray

Array of indices into array that are closest to vals.

Examples:

>>> array = np.array([1, 3, 5, 7, 9])
>>> vals = np.array([2, 4, 6, 8])
>>> find_nearest_indices(array, vals)
array([0, 1, 2, 3])
Notes

Wrapper around find_nearest_idx().

Source code in nelpy/utils.py
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
def find_nearest_indices(array, vals):
    """
    Find indices of nearest values in an array for multiple values.

    Parameters
    ----------
    array : np.ndarray
        Input array to search in.
    vals : np.ndarray
        Array of values to find nearest indices for.

    Returns
    -------
    np.ndarray
        Array of indices into array that are closest to vals.

    Examples
    --------
    >>> array = np.array([1, 3, 5, 7, 9])
    >>> vals = np.array([2, 4, 6, 8])
    >>> find_nearest_indices(array, vals)
    array([0, 1, 2, 3])

    Notes
    -----
    Wrapper around find_nearest_idx().
    """
    return np.array([find_nearest_idx(array, val) for val in vals], dtype=int)

find_threshold_crossing_events(x, threshold, *, mode='above')

Find threshold crossing events. INCLUSIVE

Parameters:

Name Type Description Default
x numpy array

Input data

required
threshold float

The value whose crossing triggers an event

required
mode string, optional in ['above', 'below']; default 'above'

event triggering above, or below threshold

'above'

Returns:

Name Type Description
eventlist list

List containing the indices corresponding to threshold crossings

eventmax list

List containing the maximum value of each event

Source code in nelpy/utils.py
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
def find_threshold_crossing_events(x, threshold, *, mode="above"):
    """Find threshold crossing events. INCLUSIVE

    Parameters
    ----------
    x : numpy array
        Input data
    threshold : float
        The value whose crossing triggers an event
    mode : string, optional in ['above', 'below']; default 'above'
        event triggering above, or below threshold

    Returns
    -------
    eventlist : list
        List containing the indices corresponding to threshold crossings
    eventmax : list
        List containing the maximum value of each event
    """
    from itertools import groupby
    from operator import itemgetter

    if mode == "below":
        cross_threshold = np.where(x <= threshold, 1, 0)
    elif mode == "above":
        cross_threshold = np.where(x >= threshold, 1, 0)
    else:
        raise NotImplementedError(
            "mode {} not understood for find_threshold_crossing_events".format(
                str(mode)
            )
        )
    eventlist = []
    eventmax = []
    for k, v in groupby(enumerate(cross_threshold), key=itemgetter(1)):
        if k:
            v = list(v)
            eventlist.append([v[0][0], v[-1][0]])
            try:
                eventmax.append(x[v[0][0] : (v[-1][0] + 1)].max())
            except Exception:
                print(v, x[v[0][0] : v[-1][0]])
    eventmax = np.asarray(eventmax)
    eventlist = np.asarray(eventlist)
    return eventlist, eventmax

frange(start, stop, step)

Generate a range of floating point values with a given step size.

Parameters:

Name Type Description Default
start float

Start value.

required
stop float

Stop value (exclusive).

required
step float

Step size.

required

Returns:

Name Type Description
out ndarray

Array of values from start to stop (exclusive) with given step.

Examples:

>>> frange(0, 1, 0.2)
array([0. , 0.2, 0.4, 0.6, 0.8])
Source code in nelpy/utils.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
def frange(start, stop, step):
    """
    Generate a range of floating point values with a given step size.

    Parameters
    ----------
    start : float
        Start value.
    stop : float
        Stop value (exclusive).
    step : float
        Step size.

    Returns
    -------
    out : np.ndarray
        Array of values from start to stop (exclusive) with given step.

    Examples
    --------
    >>> frange(0, 1, 0.2)
    array([0. , 0.2, 0.4, 0.6, 0.8])
    """
    num_steps = int(np.floor((stop - start) / step))
    return np.linspace(start, stop, num=num_steps, endpoint=False)

gaussian_filter(obj, *, fs=None, sigma=None, truncate=None, inplace=False, mode=None, cval=None, within_intervals=False)

Smooths with a Gaussian kernel.

Smoothing is applied along the abscissa, and the same smoothing is applied to each signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray.

Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported.

Parameters:

Name Type Description Default
obj RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray.
required
fs float

Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will be inferred.

None
sigma float

Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05 (50 ms if base_unit=seconds).

None
truncate float

Bandwidth outside of which the filter value will be zero. Default is 4.0.

None
inplace bool

If True the data will be replaced with the smoothed data. Default is False.

False
mode (reflect, constant, nearest, mirror, wrap)

The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to 'constant'. Default is 'reflect'.

'reflect'
cval scalar

Value to fill past edges of input if mode is 'constant'. Default is 0.0.

None
within_intervals boolean

If True, then smooth within each epoch. Otherwise smooth across epochs. Default is False. Note that when mode = 'wrap', then smoothing within epochs aren't affected by wrapping.

False

Returns:

Name Type Description
out same type as obj

An object with smoothed data is returned.

Source code in nelpy/utils.py
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
@keyword_deprecation(replace_x_with_y={"bw": "truncate"})
def gaussian_filter(
    obj,
    *,
    fs=None,
    sigma=None,
    truncate=None,
    inplace=False,
    mode=None,
    cval=None,
    within_intervals=False,
):
    """Smooths with a Gaussian kernel.

    Smoothing is applied along the abscissa, and the same smoothing is applied to each
    signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray.

    Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported.

    Parameters
    ----------
    obj : RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray.
    fs : float, optional
        Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will
        be inferred.
    sigma : float, optional
        Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05
        (50 ms if base_unit=seconds).
    truncate : float, optional
        Bandwidth outside of which the filter value will be zero. Default is 4.0.
    inplace : bool
        If True the data will be replaced with the smoothed data.
        Default is False.
    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The mode parameter determines how the array borders are handled,
        where cval is the value when mode is equal to 'constant'. Default is
        'reflect'.
    cval : scalar, optional
        Value to fill past edges of input if mode is 'constant'. Default is 0.0.
    within_intervals : boolean, optional
        If True, then smooth within each epoch. Otherwise smooth across epochs.
        Default is False.
        Note that when mode = 'wrap', then smoothing within epochs aren't affected
        by wrapping.

    Returns
    -------
    out : same type as obj
        An object with smoothed data is returned.

    """
    if sigma is None:
        sigma = 0.05
    if truncate is None:
        truncate = 4
    if mode is None:
        mode = "reflect"
    if cval is None:
        cval = 0.0

    if not inplace:
        out = copy.deepcopy(obj)
    else:
        out = obj

    if isinstance(out, core.RegularlySampledAnalogSignalArray) or isinstance(
        out, core._analogsignalarray.PositionArray
    ):
        if fs is None:
            fs = out.fs
        if fs is None:
            raise ValueError(
                "fs must either be specified, or must be contained in the {}!".format(
                    out.type_name
                )
            )
    elif isinstance(out, core.BinnedEventArray) or isinstance(
        out, core._valeventarray.BinnedValueEventArray
    ):
        bst = out
        if fs is None:
            fs = 1 / bst.ds
        if fs is None:
            raise ValueError(
                "fs must either be specified, or must be contained in the {}!".format(
                    out.type_name
                )
            )
    else:
        raise NotImplementedError(
            "gaussian_filter for {} is not yet supported!".format(str(type(out)))
        )

    sigma = sigma * fs

    if not within_intervals:
        # see https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
        # (1) if smoothing across intervals, we work on a merged support
        # (2) build abscissa_vals, including existing ones, and out-of-support ones
        # (3) to smooth U, build auxiliary arrays V and W, with (V=U).nan=0, and (W=1).nan=0
        # (4) Z = smooth(V)/smooth(W)
        # (5) only keep original support, and original abscissa_vals

        if isinstance(
            out,
            (
                core.RegularlySampledAnalogSignalArray,
                core.BinnedEventArray,
                core._analogsignalarray.PositionArray,
                core._valeventarray.BinnedValueEventArray,
            ),
        ):
            support = out._abscissa.support.merge()
            if not support.domain.is_finite:
                support.domain = (
                    support.start,
                    support.stop,
                )  # TODO: #FIXME might come from abscissa definition, and not from support

            missing_abscissa_vals = []
            for interval in ~support:
                missing_vals = frange(interval.start, interval.stop, 1 / fs)
                missing_abscissa_vals.extend(missing_vals)

            if isinstance(
                out,
                (
                    core.RegularlySampledAnalogSignalArray,
                    core._analogsignalarray.PositionArray,
                ),
            ):
                n_signals = out.n_signals
                n_samples = out.n_samples
                V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals)))
                W = np.ones(V.shape)
                all_abscissa_vals = np.sort(
                    np.append(out._abscissa_vals, missing_abscissa_vals)
                )
                data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
                missing_idx = np.searchsorted(all_abscissa_vals, missing_abscissa_vals)
                V[:, data_idx] = out.data
                W[:, missing_idx] = 0

                VV = scipy.ndimage.gaussian_filter(
                    V, sigma=(0, sigma), truncate=truncate, mode=mode, cval=cval
                )
                WW = scipy.ndimage.gaussian_filter(
                    W, sigma=(0, sigma), truncate=truncate, mode=mode, cval=cval
                )

                Z = VV[:, data_idx] / WW[:, data_idx]
                out._data = Z
            elif isinstance(
                out, (core.BinnedEventArray, core._valeventarray.BinnedValueEventArray)
            ):
                n_signals = out.n_series
                n_samples = out.n_bins
                if out.data.ndim == 2:
                    # (n_series, n_bins) - legacy BinnedEventArray
                    V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals)))
                    W = np.ones(V.shape)
                    all_abscissa_vals = np.sort(
                        np.append(out._abscissa_vals, missing_abscissa_vals)
                    )
                    data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
                    missing_idx = np.searchsorted(
                        all_abscissa_vals, missing_abscissa_vals
                    )
                    V[:, data_idx] = out.data
                    W[:, missing_idx] = 0

                    VV = scipy.ndimage.gaussian_filter(
                        V, sigma=(0, sigma), truncate=truncate, mode=mode, cval=cval
                    )
                    WW = scipy.ndimage.gaussian_filter(
                        W, sigma=(0, sigma), truncate=truncate, mode=mode, cval=cval
                    )

                    Z = VV[:, data_idx] / WW[:, data_idx]
                    out._data = Z
                elif out.data.ndim == 3:
                    # (n_series, n_bins, n_values) - BinnedValueEventArray
                    n_values = out.data.shape[2]
                    V = np.zeros(
                        (n_signals, n_samples + len(missing_abscissa_vals), n_values)
                    )
                    W = np.ones(V.shape)
                    all_abscissa_vals = np.sort(
                        np.append(out._abscissa_vals, missing_abscissa_vals)
                    )
                    data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
                    missing_idx = np.searchsorted(
                        all_abscissa_vals, missing_abscissa_vals
                    )
                    V[:, data_idx, :] = out.data
                    W[:, missing_idx, :] = 0
                    VV = np.empty_like(V)
                    WW = np.empty_like(W)
                    for v in range(n_values):
                        VV[..., v] = scipy.ndimage.gaussian_filter(
                            V[..., v],
                            sigma=(0, sigma),
                            truncate=truncate,
                            mode=mode,
                            cval=cval,
                        )
                        WW[..., v] = scipy.ndimage.gaussian_filter(
                            W[..., v],
                            sigma=(0, sigma),
                            truncate=truncate,
                            mode=mode,
                            cval=cval,
                        )
                    Z = VV[:, data_idx, :] / WW[:, data_idx, :]
                    out._data = Z
                else:
                    raise ValueError(
                        "Unsupported data shape for BinnedValueEventArray: {}".format(
                            out.data.shape
                        )
                    )
        else:
            raise NotImplementedError(
                "gaussian_filter across intervals for {} is not yet supported!".format(
                    str(type(out))
                )
            )
    else:  # within intervals:
        cum_lengths = np.insert(np.cumsum(out.lengths), 0, 0)
        out._data = out._data.astype(float)

        if isinstance(out, core.RegularlySampledAnalogSignalArray):
            # now smooth each interval separately
            for idx in range(out.n_intervals):
                out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]] = (
                    scipy.ndimage.filters.gaussian_filter(
                        out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]],
                        sigma=(0, sigma),
                        truncate=truncate,
                    )
                )
        elif isinstance(out, core.BinnedSpikeTrainArray):
            # now smooth each interval separately
            for idx in range(out.n_epochs):
                out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]] = (
                    scipy.ndimage.filters.gaussian_filter(
                        out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]],
                        sigma=(0, sigma),
                        truncate=truncate,
                    )
                )
                # out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = self._smooth_array(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], w=w)
        elif isinstance(out, core._valeventarray.BinnedValueEventArray):
            # now smooth each interval separately for each value type
            if out.data.ndim == 3:
                for idx in range(out.n_intervals):
                    for v in range(out.data.shape[2]):
                        out._data[:, cum_lengths[idx] : cum_lengths[idx + 1], v] = (
                            scipy.ndimage.filters.gaussian_filter(
                                out._data[
                                    :, cum_lengths[idx] : cum_lengths[idx + 1], v
                                ],
                                sigma=(0, sigma),
                                truncate=truncate,
                            )
                        )
            else:
                # fallback to 2D smoothing (shouldn't happen for BinnedValueEventArray, but for safety)
                for idx in range(out.n_intervals):
                    out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]] = (
                        scipy.ndimage.filters.gaussian_filter(
                            out._data[:, cum_lengths[idx] : cum_lengths[idx + 1]],
                            sigma=(0, sigma),
                            truncate=truncate,
                        )
                    )

    return out

get_PBEs(data, fs=None, ds=None, sigma=None, truncate=None, unsorted_id=0, min_active=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None)

Determine PBEs from multiunit activity or spike trains.

Definitions

MUA : multiunit activity PBE : population burst event

Summary

This function can be used to identify PBE epochs from spike trains, binned spike trains, or multiunit activity (in the form of an AnalogSignalArray).

It is recommended to either pass in a SpikeTrainArray or a BinnedSpikeTrainArray, so that a min_active number of sorted units can be set.

It is also recommended that the unsorted units (but not noise artifacts!) should be included in the spike train that is used to estimate the PBEs. By default, unit_id=0 is assumed to be unsorted, but this can be changed, or if no unsorted units are present, you can set unsorted_id=None. Equivalently, if min_active=0, then no restriction will apply, and the unsorted_id will have no effect on the final PBE epochs.

Examples:

PBE_epochs = get_PBEs(mua_asa) PBE_epochs = get_PBEs(spiketrain, min_active=5) PBE_epochs = get_PBEs(binnedspiketrain, min_active=5)

Parameters:

Name Type Description Default
data AnalogSignalArray

AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz]. -- OR --

required
data SpikeTrainArray

SpikeTrainArray with multiple units, including unsorted unit(s), but excluding any noise artifects. -- OR --

required
data BinnedSpikeTrainArray

BinnedSpikeTrainArray containing multiunit activity.

required
fs float

Sampling frequency of mua, in Hz. If not specified, it will be inferred from data.

None
ds float

Time step in which to bin spikes. Default is 1 ms.

None
sigma float

Standard deviation (in seconds) of Gaussian smoothing kernel. Default is 10 ms. If sigma==0 then no smoothing is applied.

None
truncate float

Bandwidth of the Gaussian filter. Default is 6.

None
unsorted_id int

unit_id of the unsorted unit. Default is 0. If no unsorted unit is present, then set unsorted_id = None

0
min_active int

Minimum number of active units per event, excluding unsorted unit. Default is 5.

None
minLength float

Minimum event duration in seconds. Default is 50 ms.

None
maxLength float

Maximum event duration in seconds. Default is 750 ms.

None
PrimaryThreshold float

Primary threshold to exceed. Default is mean() + 3*std()

None
SecondaryThreshold float

Secondary threshold to fall back to. Default is mean().

None
minThresholdLength float

Minimum duration to stay above PrimaryThreshold. Default is 0 ms.

None

Returns:

Name Type Description
PBE_epochs EpochArray

EpochArray containing all the PBEs.

Future improvements

As of now, it is possible, but not easy to specify the Primary and Secondary thresholds for event detection. A slight change in API might be needed to make this specification more flexible.

Source code in nelpy/utils.py
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
@keyword_deprecation(replace_x_with_y={"bw": "truncate"})
def get_PBEs(
    data,
    fs=None,
    ds=None,
    sigma=None,
    truncate=None,
    unsorted_id=0,
    min_active=None,
    minLength=None,
    maxLength=None,
    PrimaryThreshold=None,
    minThresholdLength=None,
    SecondaryThreshold=None,
):
    """Determine PBEs from multiunit activity or spike trains.

    Definitions
    -----------
    MUA : multiunit activity
    PBE : population burst event

    Summary
    -------
    This function can be used to identify PBE epochs from spike trains, binned
    spike trains, or multiunit activity (in the form of an AnalogSignalArray).

    It is recommended to either pass in a SpikeTrainArray or a
    BinnedSpikeTrainArray, so that a `min_active` number of sorted units can be
    set.

    It is also recommended that the unsorted units (but not noise artifacts!)
    should be included in the spike train that is used to estimate the PBEs. By
    default, unit_id=0 is assumed to be unsorted, but this can be changed, or if
    no unsorted units are present, you can set unsorted_id=None. Equivalently,
    if min_active=0, then no restriction will apply, and the unsorted_id will
    have no effect on the final PBE epochs.

    Examples
    --------
    PBE_epochs = get_PBEs(mua_asa)
    PBE_epochs = get_PBEs(spiketrain, min_active=5)
    PBE_epochs = get_PBEs(binnedspiketrain, min_active=5)

    Parameters
    ----------
    data : AnalogSignalArray
        AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
     -- OR --
    data : SpikeTrainArray
        SpikeTrainArray with multiple units, including unsorted unit(s), but
        excluding any noise artifects.
     -- OR --
    data : BinnedSpikeTrainArray
        BinnedSpikeTrainArray containing multiunit activity.
    fs : float, optional
        Sampling frequency of mua, in Hz. If not specified, it will be inferred
        from data.
    ds : float, optional
        Time step in which to bin spikes. Default is 1 ms.
    sigma : float, optional
        Standard deviation (in seconds) of Gaussian smoothing kernel.
        Default is 10 ms. If sigma==0 then no smoothing is applied.
    truncate : float, optional
        Bandwidth of the Gaussian filter. Default is 6.
    unsorted_id : int, optional
        unit_id of the unsorted unit. Default is 0. If no unsorted unit is
        present, then set unsorted_id = None
    min_active : int, optional
        Minimum number of active units per event, excluding unsorted unit.
        Default is 5.
    minLength : float, optional
        Minimum event duration in seconds. Default is 50 ms.
    maxLength : float, optional
        Maximum event duration in seconds. Default is 750 ms.
    PrimaryThreshold : float, optional
        Primary threshold to exceed. Default is mean() + 3*std()
    SecondaryThreshold : float, optional
        Secondary threshold to fall back to. Default is mean().
    minThresholdLength : float, optional
        Minimum duration to stay above PrimaryThreshold. Default is 0 ms.

    Returns
    -------
    PBE_epochs : EpochArray
        EpochArray containing all the PBEs.

    Future improvements
    -------------------
    As of now, it is possible, but not easy to specify the Primary and Secondary
    thresholds for event detection. A slight change in API might be needed to
    make this specification more flexible.
    """

    if sigma is None:
        sigma = 0.01  # 10 ms standard deviation
    if truncate is None:
        truncate = 6

    if isinstance(data, core.AnalogSignalArray):
        # if we have only mua, then we cannot set (ds, unsorted_id, min_active)
        if ds is not None:
            raise ValueError(
                "if data is an AnalogSignalArray then ds cannot be specified!"
            )
        if unsorted_id:
            raise ValueError(
                "if data is an AnalogSignalArray then unsorted_id cannot be specified!"
            )
        if min_active is not None:
            raise ValueError(
                "if data is an AnalogSignalArray then min_active cannot be specified!"
            )
        mua = data
        mua._data = mua._data.astype(float)
        if (sigma != 0) and (truncate > 0):
            mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)

    elif isinstance(data, (core.EventArray, core.BinnedEventArray)):
        # set default parameter values:
        if ds is None:
            ds = 0.001  # default 1 ms
        if min_active is None:
            min_active = 5
        mua = get_mua(data, ds=ds, sigma=sigma, truncate=truncate, _fast=True)
    else:
        raise TypeError(
            "data has to be one of (AnalogSignalArray, SpikeTrainArray, BinnedSpikeTrainArray)"
        )

    # set default parameter values:
    if fs is None:
        fs = mua.fs
    if minLength is None:
        minLength = 0.050  # 50 ms minimum event duration
    if maxLength is None:
        maxLength = 0.750  # 750 ms maximum event duration
    if minThresholdLength is None:
        minThresholdLength = 0.0
    # if PrimaryThreshold is None:
    #         PrimaryThreshold =
    # if SecondaryThreshold is None:
    #     SecondaryThreshold =
    PBE_epochs = get_mua_events(
        mua=mua,
        fs=fs,
        minLength=minLength,
        maxLength=maxLength,
        PrimaryThreshold=PrimaryThreshold,
        minThresholdLength=minThresholdLength,
        SecondaryThreshold=SecondaryThreshold,
    )

    # now require min_active number of sorted cells
    if isinstance(data, (core.EventArray, core.BinnedEventArray)):
        if min_active > 0:
            if unsorted_id is not None:
                # remove unsorted unit, if present:
                unit_ids = copy.deepcopy(data.unit_ids)
                try:
                    unit_ids.remove(unsorted_id)
                except ValueError:
                    pass
                # data_ = data._unit_subset(unit_ids)
                data_ = data.loc[:, unit_ids]
            else:
                data_ = data
            # determine number of active units per epoch:
            n_active = np.array([snippet.n_active for snippet in data_[PBE_epochs]])
            active_epochs_idx = np.argwhere(n_active > min_active).squeeze()
            # only keep those epochs where sufficiently many units are active:
            PBE_epochs = PBE_epochs[active_epochs_idx]
    return PBE_epochs

get_contiguous_segments(data, *, step=None, assume_sorted=None, in_core=True, index=False, inclusive=False, fs=None, sort=None, in_memory=None)

Compute contiguous segments (seperated by step) in a list.

Note! This function requires that a sorted list is passed. It first checks if the list is sorted O(n), and only sorts O(n log(n)) if necessary. But if you know that the list is already sorted, you can pass assume_sorted=True, in which case it will skip the O(n) check.

Returns an array of size (n_segments, 2), with each row being of the form ([start, stop]) [inclusive, exclusive].

NOTE: when possible, use assume_sorted=True, and step=1 as explicit arguments to function call.

WARNING! Step is robustly computed in-core (i.e., when in_core is True), but is assumed to be 1 when out-of-core.

Examples:

>>> data = [1, 2, 3, 4, 10, 11, 12]
>>> get_contiguous_segments(data)
([1,5], [10,13])
>>> get_contiguous_segments(data, index=True)
([0,4], [4,7])

Parameters:

Name Type Description Default
data array - like

1D array of sequential data, typically assumed to be integral (sample numbers).

required
step float

Expected step size for neighboring samples. Default uses numpy to find the median, but it is much faster and memory efficient to explicitly pass in step=1.

None
assume_sorted bool

If assume_sorted == True, then data is not inspected or re-ordered. This can be significantly faster, especially for out-of-core computation, but it should only be used when you are confident that the data is indeed sorted, otherwise the results from get_contiguous_segments will not be reliable.

None
in_core bool

If True, then we use np.diff which requires all the data to fit into memory simultaneously, otherwise we use groupby, which uses a generator to process potentially much larger chunks of data, but also much slower.

True
index bool

If True, the indices of segment boundaries will be returned. Otherwise, the segment boundaries will be returned in terms of the data itself. Default is False.

False
inclusive bool

If True, the boundaries are returned as [(inclusive idx, inclusive idx)] Default is False, and can only be used when index==True.

False
Source code in nelpy/utils.py
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
def get_contiguous_segments(
    data,
    *,
    step=None,
    assume_sorted=None,
    in_core=True,
    index=False,
    inclusive=False,
    fs=None,
    sort=None,
    in_memory=None,
):
    """Compute contiguous segments (seperated by step) in a list.

    Note! This function requires that a sorted list is passed.
    It first checks if the list is sorted O(n), and only sorts O(n log(n))
    if necessary. But if you know that the list is already sorted,
    you can pass assume_sorted=True, in which case it will skip
    the O(n) check.

    Returns an array of size (n_segments, 2), with each row
    being of the form ([start, stop]) [inclusive, exclusive].

    NOTE: when possible, use assume_sorted=True, and step=1 as explicit
          arguments to function call.

    WARNING! Step is robustly computed in-core (i.e., when in_core is
        True), but is assumed to be 1 when out-of-core.

    Examples
    -------
    >>> data = [1, 2, 3, 4, 10, 11, 12]
    >>> get_contiguous_segments(data)
    ([1,5], [10,13])
    >>> get_contiguous_segments(data, index=True)
    ([0,4], [4,7])

    Parameters
    ----------
    data : array-like
        1D array of sequential data, typically assumed to be integral (sample
        numbers).
    step : float, optional
        Expected step size for neighboring samples. Default uses numpy to find
        the median, but it is much faster and memory efficient to explicitly
        pass in step=1.
    assume_sorted : bool, optional
        If assume_sorted == True, then data is not inspected or re-ordered. This
        can be significantly faster, especially for out-of-core computation, but
        it should only be used when you are confident that the data is indeed
        sorted, otherwise the results from get_contiguous_segments will not be
        reliable.
    in_core : bool, optional
        If True, then we use np.diff which requires all the data to fit
        into memory simultaneously, otherwise we use groupby, which uses
        a generator to process potentially much larger chunks of data,
        but also much slower.
    index : bool, optional
        If True, the indices of segment boundaries will be returned. Otherwise,
        the segment boundaries will be returned in terms of the data itself.
        Default is False.
    inclusive : bool, optional
        If True, the boundaries are returned as [(inclusive idx, inclusive idx)]
        Default is False, and can only be used when index==True.

    Deprecated
    ----------
    in_memory : bool, optional
        This is equivalent to the new 'in-core'.
    sort : bool, optional
        This is equivalent to the new 'assume_sorted'
    fs : sampling rate (Hz) used to extend half-open interval support by 1/fs
    """

    # handle deprecated API calls:
    if in_memory:
        in_core = in_memory
        logging.warning("'in_memory' has been deprecated; use 'in_core' instead")
    if sort:
        assume_sorted = sort
        logging.warning("'sort' has been deprecated; use 'assume_sorted' instead")
    if fs:
        step = 1 / fs
        logging.warning("'fs' has been deprecated; use 'step' instead")

    if inclusive:
        assert index, "option 'inclusive' can only be used with 'index=True'"
    if in_core:
        data = np.asarray(data)

        if not assume_sorted:
            if not is_sorted(data):
                data = np.sort(data)  # algorithm assumes sorted list

        if step is None:
            step = np.median(np.diff(data))

        # assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as
        # data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen
        # that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps.

        if np.any(np.diff(data) < step):
            logging.warning(
                "some steps in the data are smaller than the requested step size."
            )

        breaks = np.argwhere(np.diff(data) >= 2 * step)
        starts = np.insert(breaks + 1, 0, 0)
        stops = np.append(breaks, len(data) - 1)
        bdries = np.vstack((data[starts], data[stops] + step)).T
        if index:
            if inclusive:
                indices = np.vstack((starts, stops)).T
            else:
                indices = np.vstack((starts, stops + 1)).T
            return indices
    else:
        from itertools import groupby
        from operator import itemgetter

        if not assume_sorted:
            if not is_sorted(data):
                # data = np.sort(data)  # algorithm assumes sorted list
                raise NotImplementedError(
                    "out-of-core sorting has not been implemented yet..."
                )

        if step is None:
            step = 1

        bdries = []

        if not index:
            for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
                f = itemgetter(1)
                gen = (f(x) for x in g)
                start = next(gen)
                stop = start
                for stop in gen:
                    pass
                bdries.append([start, stop + step])
        else:
            counter = 0
            for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
                f = itemgetter(1)
                gen = (f(x) for x in g)
                _ = next(gen)
                start = counter
                stop = start
                for _ in gen:
                    stop += 1
                if inclusive:
                    bdries.append([start, stop])
                else:
                    bdries.append([start, stop + 1])
                counter = stop + 1

    return np.asarray(bdries)

get_direction(asa, *, sigma=None)

Return epochs during which an animal was running left to right, or right to left.

Parameters:

Name Type Description Default
asa AnalogSignalArray 1D

AnalogSignalArray containing the 1D position data.

required
sigma float

Smoothing to apply to position (x) before computing gradient estimate. Default is 0.

None

Returns:

Type Description
l2r, r2l : EpochArrays

EpochArrays corresponding to left-to-right and right-to-left movement.

Source code in nelpy/utils.py
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
def get_direction(asa, *, sigma=None):
    """Return epochs during which an animal was running left to right, or right
    to left.

    Parameters
    ----------
    asa : AnalogSignalArray 1D
        AnalogSignalArray containing the 1D position data.
    sigma : float, optional
        Smoothing to apply to position (x) before computing gradient estimate.
        Default is 0.

    Returns
    -------
    l2r, r2l : EpochArrays
        EpochArrays corresponding to left-to-right and right-to-left movement.
    """
    if sigma is None:
        sigma = 0
    if not isinstance(asa, core.AnalogSignalArray):
        raise TypeError("AnalogSignalArray expected!")
    assert asa.n_signals == 1, "1D AnalogSignalArray expected!"

    direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma), rectify=False).data
    direction[direction >= 0] = 1
    direction[direction < 0] = -1
    direction = direction.squeeze()

    l2r = get_contiguous_segments(np.argwhere(direction > 0).squeeze(), step=1)
    l2r[:, 1] -= (
        1  # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
    )
    l2r = core.EpochArray(asa.abscissa_vals[l2r])

    r2l = get_contiguous_segments(np.argwhere(direction < 0).squeeze(), step=1)
    r2l[:, 1] -= (
        1  # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
    )
    r2l = core.EpochArray(asa.abscissa_vals[r2l])

    return l2r, r2l

get_events_boundaries(x, *, PrimaryThreshold=None, SecondaryThreshold=None, minThresholdLength=None, minLength=None, maxLength=None, ds=None, mode='above')

get event boundaries such that event.max >= PrimaryThreshold and the event extent is defined by SecondaryThreshold.

Note that when PrimaryThreshold==SecondaryThreshold, then this is a simple threshold crossing algorithm.

NB. minLength and maxLength are applied to the SecondaryThreshold events, whereas minThresholdLength is applied to the PrimaryThreshold events.

Parameters:

Name Type Description Default
x numpy array

Input data

required
mode string, optional in ['above', 'below']; default 'above'

event triggering above, or below threshold

'above'
PrimaryThreshold float

If mode=='above', requires that event.max >= PrimaryThreshold If mode=='below', requires that event.min <= PrimaryThreshold

None
SecondaryThreshold float

The value that defines the event extent

None
minThresholdLength float

Minimum duration for which the PrimaryThreshold is crossed

None
minLength float

Minimum duration for which the SecondaryThreshold is crossed

None
maxLength float

Maximum duration for which the SecondaryThreshold is crossed

None
ds float

Time step of the input data x

None

Returns:

Type Description
returns bounds, maxes, events

where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive maxes <==> maximum value during each event events <==> PrimaryThreshold to PrimaryThreshold, inclusive

Source code in nelpy/utils.py
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
def get_events_boundaries(
    x,
    *,
    PrimaryThreshold=None,
    SecondaryThreshold=None,
    minThresholdLength=None,
    minLength=None,
    maxLength=None,
    ds=None,
    mode="above",
):
    """get event boundaries such that event.max >= PrimaryThreshold
    and the event extent is defined by SecondaryThreshold.

    Note that when PrimaryThreshold==SecondaryThreshold, then this is a
    simple threshold crossing algorithm.

    NB. minLength and maxLength are applied to the SecondaryThreshold
        events, whereas minThresholdLength is applied to the
        PrimaryThreshold events.

    Parameters
    ----------
    x : numpy array
        Input data
    mode : string, optional in ['above', 'below']; default 'above'
        event triggering above, or below threshold
    PrimaryThreshold : float, optional
        If mode=='above', requires that event.max >= PrimaryThreshold
        If mode=='below', requires that event.min <= PrimaryThreshold
    SecondaryThreshold : float, optional
        The value that defines the event extent
    minThresholdLength : float, optional
        Minimum duration for which the PrimaryThreshold is crossed
    minLength : float, optional
        Minimum duration for which the SecondaryThreshold is crossed
    maxLength : float, optional
        Maximum duration for which the SecondaryThreshold is crossed
    ds : float, optional
        Time step of the input data x

    Returns
    -------
    returns bounds, maxes, events
        where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive
              maxes  <==> maximum value during each event
              events <==> PrimaryThreshold to PrimaryThreshold, inclusive
    """

    # TODO: x must be a numpy array
    # TODO: ds is often used, but we have no default, and no check for when
    #       it is left as None.
    # TODO: the Docstring should equally be improved.

    x = x.squeeze()
    if x.ndim > 1:
        raise TypeError("multidimensional arrays not supported!")

    if PrimaryThreshold is None:  # by default, threshold is 3 SDs above mean of x
        PrimaryThreshold = np.mean(x) + 3 * np.std(x)

    if SecondaryThreshold is None:  # by default, revert back to mean of x
        SecondaryThreshold = np.mean(x)  # + 0*np.std(x)

    events, primary_maxes = find_threshold_crossing_events(
        x=x, threshold=PrimaryThreshold, mode=mode
    )

    # apply minThresholdLength criterion:
    if minThresholdLength is not None and len(events) > 0:
        durations = (events[:, 1] - events[:, 0] + 1) * ds
        events = events[durations >= minThresholdLength]

    if len(events) == 0:
        bounds, maxes, events = [], [], []
        logging.warning("no events satisfied criteria")
        return bounds, maxes, events

    # Find periods where value is > SecondaryThreshold; note that the previous periods should be within these!
    if mode == "above":
        assert SecondaryThreshold <= PrimaryThreshold, (
            "Secondary Threshold by definition should include more data than Primary Threshold"
        )
    elif mode == "below":
        assert SecondaryThreshold >= PrimaryThreshold, (
            "Secondary Threshold by definition should include more data than Primary Threshold"
        )
    else:
        raise NotImplementedError(
            "mode {} not understood for find_threshold_crossing_events".format(
                str(mode)
            )
        )

    bounds, broader_maxes = find_threshold_crossing_events(
        x=x, threshold=SecondaryThreshold, mode=mode
    )

    # Find corresponding big windows for potential events
    #  Specifically, look for closest left edge that is just smaller
    outer_boundary_indices = np.searchsorted(bounds[:, 0], events[:, 0], side="right")
    #  searchsorted finds the index after, so subtract one to get index before
    outer_boundary_indices = outer_boundary_indices - 1

    # Find extended boundaries for events by pairing to larger windows
    #   (Note that there may be repeats if the larger window contains multiple > 3SD sections)
    bounds = bounds[outer_boundary_indices, :]
    maxes = broader_maxes[outer_boundary_indices]

    if minLength is not None and len(events) > 0:
        durations = (bounds[:, 1] - bounds[:, 0] + 1) * ds
        # TODO: refactor [durations <= maxLength] but be careful about edge cases
        bounds = bounds[durations >= minLength]
        maxes = maxes[durations >= minLength]
        events = events[durations >= minLength]

    if maxLength is not None and len(events) > 0:
        durations = (bounds[:, 1] - bounds[:, 0] + 1) * ds
        # TODO: refactor [durations <= maxLength] but be careful about edge cases
        bounds = bounds[durations <= maxLength]
        maxes = maxes[durations <= maxLength]
        events = events[durations <= maxLength]

    if len(events) == 0:
        bounds, maxes, events = [], [], []
        logging.warning("no events satisfied criteria")
        return bounds, maxes, events

    # Now, since all that we care about are the larger windows, so we should get rid of repeats
    _, unique_idx = np.unique(bounds[:, 0], return_index=True)
    bounds = bounds[unique_idx, :]  # SecondaryThreshold to SecondaryThreshold
    maxes = maxes[unique_idx]  # maximum value during event
    events = events[unique_idx, :]  # PrimaryThreshold to PrimaryThreshold

    return bounds, maxes, events

get_inactive_epochs(speed, v1=5, v2=7)

Return epochs where animal is running no faster than specified by v1 and v2.

Parameters:

Name Type Description Default
speed AnalogSignalArray

AnalogSignalArray containing single channel speed, in units/sec

required
v1 float

Minimum speed (in same units as speed) that has to be reached / exceeded during an event. Default is 10 [units/sec]

5
v2 float

Speed that defines the event boundaries. Default is 8 [units/sec]

7

Returns:

Name Type Description
inactive_epochs EpochArray

EpochArray with all the epochs where speed satisfied the criteria.

Source code in nelpy/utils.py
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
def get_inactive_epochs(speed, v1=5, v2=7):
    """Return epochs where animal is running no faster than specified by
    v1 and v2.

    Parameters
    ----------
    speed : AnalogSignalArray
        AnalogSignalArray containing single channel speed, in units/sec
    v1 : float, optional
        Minimum speed (in same units as speed) that has to be reached /
        exceeded during an event. Default is 10 [units/sec]
    v2 : float, optional
        Speed that defines the event boundaries. Default is 8 [units/sec]
    Returns
    -------
    inactive_epochs : EpochArray
        EpochArray with all the epochs where speed satisfied the criteria.
    """
    inactive_epochs = get_threshold_crossing_epochs(
        asa=speed, t1=v1, t2=v2, mode="below"
    )
    return inactive_epochs

get_mua(st, ds=None, sigma=None, truncate=None, _fast=True)

Compute the multiunit activity (MUA) from a spike train.

Parameters:

Name Type Description Default
st SpikeTrainArray

SpikeTrainArray containing one or more units. -- OR --

required
st BinnedSpikeTrainArray

BinnedSpikeTrainArray containing multiunit activity.

required
ds float

Time step in which to bin spikes. Default is 1 ms.

None
sigma float

Standard deviation (in seconds) of Gaussian smoothing kernel. Default is 10 ms. If sigma==0 then no smoothing is applied.

None
truncate float

Bandwidth of the Gaussian filter. Default is 6.

None

Returns:

Name Type Description
mua AnalogSignalArray

AnalogSignalArray with MUA.

Source code in nelpy/utils.py
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
@keyword_deprecation(replace_x_with_y={"bw": "truncate"})
def get_mua(st, ds=None, sigma=None, truncate=None, _fast=True):
    """Compute the multiunit activity (MUA) from a spike train.

    Parameters
    ----------
    st : SpikeTrainArray
        SpikeTrainArray containing one or more units.
     -- OR --
    st : BinnedSpikeTrainArray
        BinnedSpikeTrainArray containing multiunit activity.
    ds : float, optional
        Time step in which to bin spikes. Default is 1 ms.
    sigma : float, optional
        Standard deviation (in seconds) of Gaussian smoothing kernel.
        Default is 10 ms. If sigma==0 then no smoothing is applied.
    truncate : float, optional
        Bandwidth of the Gaussian filter. Default is 6.

    Returns
    -------
    mua : AnalogSignalArray
        AnalogSignalArray with MUA.
    """

    if ds is None:
        ds = 0.001  # 1 ms bin size
    if sigma is None:
        sigma = 0.01  # 10 ms standard deviation
    if truncate is None:
        truncate = 6

    if isinstance(st, core.EventArray):
        # bin spikes, so that we can count the spikes
        mua_binned = st.bin(ds=ds).flatten()
    elif isinstance(st, core.BinnedEventArray):
        mua_binned = st.flatten()
        ds = mua_binned.ds
    else:
        raise TypeError("st has to be one of (SpikeTrainArray, BinnedSpikeTrainArray)")

    # make sure data type is float, so that smoothing works, and convert to rate
    mua_binned._data = mua_binned._data.astype(float) / ds

    # TODO: now that we can simply cast from BST to ASA and back, the following logic could be simplified:
    # put mua rate inside an AnalogSignalArray
    if _fast:
        mua = core.AnalogSignalArray([], empty=True)
        mua._data = mua_binned.data
        mua._abscissa_vals = mua_binned.bin_centers
        mua._abscissa.support = mua_binned.support
    else:
        mua = core.AnalogSignalArray(
            mua_binned.data, timestamps=mua_binned.bin_centers, fs=1 / ds
        )

    mua._fs = 1 / ds

    if (sigma != 0) and (truncate > 0):
        mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)

    return mua

get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None)

Determine MUA/PBEs from multiunit activity.

MUA : multiunit activity PBE : population burst event

Parameters:

Name Type Description Default
mua AnalogSignalArray

AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].

required
fs float

Sampling frequency of mua, in Hz. If not specified, it will be inferred from mua.fs

None
minLength float
None
maxLength float
None
PrimaryThreshold float
None
SecondaryThreshold float
None
minThresholdLength float
None

Returns:

Name Type Description
mua_epochs EpochArray

EpochArray containing all the MUA events / PBEs.

Examples:

mua = get_mua(spiketrain) mua_epochs = get_mua_events(mua) PBEs = get_PBEs(spiketrain, min_active=5) = get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5)

Source code in nelpy/utils.py
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
def get_mua_events(
    mua,
    fs=None,
    minLength=None,
    maxLength=None,
    PrimaryThreshold=None,
    minThresholdLength=None,
    SecondaryThreshold=None,
):
    """Determine MUA/PBEs from multiunit activity.

    MUA : multiunit activity
    PBE : population burst event

    Parameters
    ----------
    mua : AnalogSignalArray
        AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
    fs : float, optional
        Sampling frequency of mua, in Hz. If not specified, it will be inferred from
        mua.fs
    minLength : float, optional
    maxLength : float, optional
    PrimaryThreshold : float, optional
    SecondaryThreshold : float, optional
    minThresholdLength : float, optional

    Returns
    -------
    mua_epochs : EpochArray
        EpochArray containing all the MUA events / PBEs.

    Examples
    --------
    mua = get_mua(spiketrain)
    mua_epochs = get_mua_events(mua)
    PBEs = get_PBEs(spiketrain, min_active=5)
         = get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5)
    """

    if fs is None:
        fs = mua.fs
    if fs is None:
        raise ValueError("fs must either be specified, or must be contained in mua!")

    if PrimaryThreshold is None:
        PrimaryThreshold = mua.mean() + 3 * mua.std()
    if SecondaryThreshold is None:
        SecondaryThreshold = mua.mean()
    if minLength is None:
        minLength = 0.050  # 50 ms minimum event duration
    if maxLength is None:
        maxLength = 0.750  # 750 ms maximum event duration
    if minThresholdLength is None:
        minThresholdLength = 0.0

    # determine MUA event bounds:
    mua_bounds_idx, maxes, _ = get_events_boundaries(
        x=mua.data,
        PrimaryThreshold=PrimaryThreshold,
        SecondaryThreshold=SecondaryThreshold,
        minThresholdLength=minThresholdLength,
        minLength=minLength,
        maxLength=maxLength,
        ds=1 / fs,
    )

    if len(mua_bounds_idx) == 0:
        logging.warning("no mua events detected")
        return core.EpochArray(empty=True)

    # store MUA bounds in an EpochArray
    mua_epochs = core.EpochArray(mua.time[mua_bounds_idx])

    return mua_epochs

get_run_epochs(speed, v1=10, v2=8)

Return epochs where animal is running at least as fast as specified by v1 and v2.

Parameters:

Name Type Description Default
speed AnalogSignalArray

AnalogSignalArray containing single channel speed, in units/sec

required
v1 float

Minimum speed (in same units as speed) that has to be reached / exceeded during an event. Default is 10 [units/sec]

10
v2 float

Speed that defines the event boundaries. Default is 8 [units/sec]

8

Returns:

Name Type Description
run_epochs EpochArray

EpochArray with all the epochs where speed satisfied the criteria.

Source code in nelpy/utils.py
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
def get_run_epochs(speed, v1=10, v2=8):
    """Return epochs where animal is running at least as fast as
    specified by v1 and v2.

    Parameters
    ----------
    speed : AnalogSignalArray
        AnalogSignalArray containing single channel speed, in units/sec
    v1 : float, optional
        Minimum speed (in same units as speed) that has to be reached /
        exceeded during an event. Default is 10 [units/sec]
    v2 : float, optional
        Speed that defines the event boundaries. Default is 8 [units/sec]

    Returns
    -------
    run_epochs : EpochArray
        EpochArray with all the epochs where speed satisfied the criteria.
    """

    run_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode="above")

    return run_epochs

get_sort_idx(tuning_curves)

Find indices to sort neurons by maximum firing rate location in tuning curve.

Parameters:

Name Type Description Default
tuning_curves list of lists

List where each inner list contains the tuning curve for an individual neuron.

required

Returns:

Name Type Description
sorted_idx list

List of integers that correspond to neurons sorted by the location of their maximum firing rate in the tuning curve.

Examples:

>>> tuning_curves = [[1, 5, 2], [3, 1, 4], [2, 3, 6]]
>>> get_sort_idx(tuning_curves)
[1, 0, 2]  # sorted by position of max firing rate
Source code in nelpy/utils.py
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
def get_sort_idx(tuning_curves):
    """
    Find indices to sort neurons by maximum firing rate location in tuning curve.

    Parameters
    ----------
    tuning_curves : list of lists
        List where each inner list contains the tuning curve for an individual
        neuron.

    Returns
    -------
    sorted_idx : list
        List of integers that correspond to neurons sorted by the location
        of their maximum firing rate in the tuning curve.

    Examples
    --------
    >>> tuning_curves = [[1, 5, 2], [3, 1, 4], [2, 3, 6]]
    >>> get_sort_idx(tuning_curves)
    [1, 0, 2]  # sorted by position of max firing rate
    """
    tc_max_loc = []
    for i, neuron_tc in enumerate(tuning_curves):
        tc_max_loc.append((i, np.where(neuron_tc == np.max(neuron_tc))[0][0]))
    sorted_by_tc = sorted(tc_max_loc, key=lambda x: x[1])

    sorted_idx = []
    for idx in sorted_by_tc:
        sorted_idx.append(idx[0])

    return sorted_idx

get_threshold_crossing_epochs(asa, t1=None, t2=None, mode='above')

Return epochs where a signal crosses a compound threshold specified by t1 and t2.

Parameters:

Name Type Description Default
asa AnalogSignalArray

AnalogSignalArray containing a single channel

required
t1 float

Primary threshold. Minimum signal value that has to be reached / exceeded during an event. Default is 3 standard deviations above signal mean.

None
t2 float

Secondary threshold. Signal value that defines the event boundaries. Default is signal mean.

None
mode string

Mode of operation. One of ['above', 'below']. If 'above', then return epochs where the signal exceeds the compound threshold, and if 'below', then return epochs where the signal falls below the compound threshold. Default is 'above'.

'above'

Returns:

Name Type Description
epochs EpochArray

EpochArray with all the epochs where the signal satisfied the criteria.

Source code in nelpy/utils.py
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
def get_threshold_crossing_epochs(asa, t1=None, t2=None, mode="above"):
    """Return epochs where a signal crosses a compound threshold specified by t1
    and t2.

    Parameters
    ----------
    asa : AnalogSignalArray
        AnalogSignalArray containing a single channel
    t1 : float, optional
        Primary threshold. Minimum signal value that has to be reached /
        exceeded during an event. Default is 3 standard deviations above signal
        mean.
    t2 : float, optional
        Secondary threshold. Signal value that defines the event boundaries.
        Default is signal mean.
    mode : string, optional
        Mode of operation. One of ['above', 'below']. If 'above', then return
        epochs where the signal exceeds the compound threshold, and if 'below',
        then return epochs where the signal falls below the compound threshold.
        Default is 'above'.

    Returns
    -------
    epochs : EpochArray
        EpochArray with all the epochs where the signal satisfied the criteria.
    """

    if asa.n_signals > 1:
        raise TypeError("multidimensional AnalogSignalArrays not supported!")
    x = asa.data.squeeze()

    if t1 is None:  # by default, threshold is 3 SDs above mean of x
        t1 = np.mean(x) + 3 * np.std(x)

    if t2 is None:  # by default, revert back to mean of x
        t2 = np.mean(x)

    # compute periods where signal exceeds compound threshold
    epoch_bounds, _, _ = get_events_boundaries(
        x=x, PrimaryThreshold=t1, SecondaryThreshold=t2, mode=mode
    )
    # convert bounds to time in seconds
    epoch_bounds = asa.time[epoch_bounds]
    if len(epoch_bounds) == 0:
        return type(asa._abscissa.support)(empty=True)
    # add 1/fs to stops for open interval
    epoch_bounds[:, 1] += 1 / asa.fs
    # create EpochArray with threshould exceeding bounds
    epochs = type(asa._abscissa.support)(epoch_bounds)
    return epochs

information_rate(ratemap, Pi=1)

This computes the spatial information rate of cell spikes given variable x in bits/second.

Parameters:

Name Type Description Default
ratemap ndarray

A firing rate map, any number of dimensions.

required
Pi ndarray

A probability distribution over the bins of the rate map. If not provided, it is assumed to be uniform.

1

Returns:

Name Type Description
ir ndarray

The information rate of the cell, in bits/second.

Source code in nelpy/utils.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
def information_rate(ratemap, Pi=1):
    """
    This computes the spatial information rate of cell spikes given variable x in
    bits/second.

    Parameters
    ----------
    ratemap : numpy.ndarray
        A firing rate map, any number of dimensions.
    Pi : numpy.ndarray
        A probability distribution over the bins of the rate map. If not
        provided, it is assumed to be uniform.

    Returns
    -------
    ir : numpy.ndarray
        The information rate of the cell, in bits/second.

    """
    # convert Pi to probability distribution
    Pi[np.isnan(Pi) | (Pi == 0)] = np.finfo(float).eps
    Pi = Pi / (np.sum(Pi) + np.finfo(float).eps)

    # Handle N-dimensional ratemaps (n_units, *spatial_dims)
    if len(ratemap.shape) < 2:
        raise TypeError(
            "rate map must have at least 2 dimensions (n_units, *spatial_dims)!"
        )

    # Sum over all spatial dimensions to get mean firing rate for each unit
    spatial_axes = tuple(range(1, len(ratemap.shape)))
    R = (ratemap * Pi).sum(axis=spatial_axes)
    return spatial_information(ratemap, Pi) * R

is_odd(n)

Check if a number is odd.

Parameters:

Name Type Description Default
n int

Integer to check.

required

Returns:

Type Description
bool

True if n is odd, False if n is even.

Examples:

>>> is_odd(3)
True
>>> is_odd(4)
False
>>> is_odd(-1)
True
Source code in nelpy/utils.py
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
def is_odd(n):
    """
    Check if a number is odd.

    Parameters
    ----------
    n : int
        Integer to check.

    Returns
    -------
    bool
        True if n is odd, False if n is even.

    Examples
    --------
    >>> is_odd(3)
    True
    >>> is_odd(4)
    False
    >>> is_odd(-1)
    True
    """
    return bool(n & 1)

is_sorted(x, chunk_size=None)

Check if a 1D array, list, or tuple is monotonically increasing (sorted).

Parameters:

Name Type Description Default
x array - like

1D array, list, or tuple to check.

required
chunk_size int

Size of chunks to check at a time (for large arrays).

None

Returns:

Name Type Description
sorted bool

True if x is sorted, False otherwise.

Examples:

>>> is_sorted([1, 2, 3])
True
>>> is_sorted([1, 3, 2])
False
Source code in nelpy/utils.py
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
def is_sorted(x, chunk_size=None):
    """
    Check if a 1D array, list, or tuple is monotonically increasing (sorted).

    Parameters
    ----------
    x : array-like
        1D array, list, or tuple to check.
    chunk_size : int, optional
        Size of chunks to check at a time (for large arrays).

    Returns
    -------
    sorted : bool
        True if x is sorted, False otherwise.

    Examples
    --------
    >>> is_sorted([1, 2, 3])
    True
    >>> is_sorted([1, 3, 2])
    False
    """

    if not isinstance(x, (tuple, list, np.ndarray)):
        raise TypeError("Unsupported type {}".format(type(x)))

    x = np.atleast_1d(np.array(x).squeeze())
    if x.ndim > 1:
        raise ValueError("Input x must be 1-dimensional")

    if chunk_size is None:
        chunk_size = 500000
    stop = x.size
    for chunk_start in range(0, stop, chunk_size):
        chunk_stop = int(min(stop, chunk_start + chunk_size + 1))
        chunk = x[chunk_start:chunk_stop]
        if not np.all(chunk[:-1] <= chunk[1:]):
            return False
    return True

is_sorted_general(iterable, key=lambda a, b: a <= b)

Check if an iterable is sorted according to a custom key function.

Parameters:

Name Type Description Default
iterable iterable

Sequence to check.

required
key callable

Function that takes two elements and returns True if they are in order. Default is lambda a, b: a <= b (monotonic increasing).

lambda a, b: a <= b

Returns:

Type Description
bool

True if iterable is sorted according to key function.

Examples:

>>> is_sorted_general([1, 2, 3, 4])
True
>>> is_sorted_general([4, 3, 2, 1])
False
>>> is_sorted_general([4, 3, 2, 1], key=lambda a, b: a >= b)  # decreasing
True
Source code in nelpy/utils.py
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
def is_sorted_general(iterable, key=lambda a, b: a <= b):
    """
    Check if an iterable is sorted according to a custom key function.

    Parameters
    ----------
    iterable : iterable
        Sequence to check.
    key : callable, optional
        Function that takes two elements and returns True if they are in order.
        Default is lambda a, b: a <= b (monotonic increasing).

    Returns
    -------
    bool
        True if iterable is sorted according to key function.

    Examples
    --------
    >>> is_sorted_general([1, 2, 3, 4])
    True
    >>> is_sorted_general([4, 3, 2, 1])
    False
    >>> is_sorted_general([4, 3, 2, 1], key=lambda a, b: a >= b)  # decreasing
    True
    """
    return all(key(a, b) for a, b in pairwise(iterable))

linear_merge(list1, list2)

Merge two sorted lists in linear time.

Parameters:

Name Type Description Default
list1 list or ndarray

First sorted list.

required
list2 list or ndarray

Second sorted list.

required

Returns:

Name Type Description
merged generator

Generator yielding merged, sorted elements.

Examples:

>>> list(linear_merge([1, 3, 5], [2, 4, 6]))
[1, 2, 3, 4, 5, 6]
>>> list(linear_merge([1, 2, 2, 3], [2, 2, 4, 4]))
[1, 2, 2, 2, 2, 3, 4, 4]
Source code in nelpy/utils.py
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
def linear_merge(list1, list2):
    """
    Merge two sorted lists in linear time.

    Parameters
    ----------
    list1 : list or np.ndarray
        First sorted list.
    list2 : list or np.ndarray
        Second sorted list.

    Returns
    -------
    merged : generator
        Generator yielding merged, sorted elements.

    Examples
    --------
    >>> list(linear_merge([1, 3, 5], [2, 4, 6]))
    [1, 2, 3, 4, 5, 6]
    >>> list(linear_merge([1, 2, 2, 3], [2, 2, 4, 4]))
    [1, 2, 2, 2, 2, 3, 4, 4]
    """

    # if any of the lists are empty, return the other (possibly also
    # empty) list: (this is necessary because having either list1 or
    # list2 be empty makes this quite a bit more complicated...)
    if isinstance(list1, (list, np.ndarray)):
        if len(list1) == 0:
            list2 = iter(list2)
            while True:
                try:
                    yield next(list2)
                except StopIteration:
                    return
    if isinstance(list2, (list, np.ndarray)):
        if len(list2) == 0:
            list1 = iter(list1)
            while True:
                try:
                    yield next(list1)
                except StopIteration:
                    return

    list1 = iter(list1)
    list2 = iter(list2)

    value1 = next(list1)
    value2 = next(list2)

    # We'll normally exit this loop from a next() call raising
    # StopIteration, which is how a generator function exits anyway.
    while True:
        if value1 <= value2:
            # Yield the lower value.
            try:
                yield value1
            except StopIteration:
                return
            try:
                # Grab the next value from list1.
                value1 = next(list1)
            except StopIteration:
                # list1 is empty.  Yield the last value we received from list2, then
                # yield the rest of list2.
                try:
                    yield value2
                except StopIteration:
                    return
                while True:
                    try:
                        yield next(list2)
                    except StopIteration:
                        return
        else:
            try:
                yield value2
            except StopIteration:
                return
            try:
                value2 = next(list2)

            except StopIteration:
                # list2 is empty.
                try:
                    yield value1
                except StopIteration:
                    return
                while True:
                    try:
                        yield next(list1)
                    except StopIteration:
                        return

nextfastpower(n)

Return the next integral power of small factors greater than the given number. Specifically, return m such that m >= n m == 2x * 3y * 5**z where x, y, and z are integers. This is useful for ensuring fast FFT sizes.

From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)

See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html

Source code in nelpy/utils.py
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
def nextfastpower(n):
    """Return the next integral power of small factors greater than the given
    number.  Specifically, return m such that
        m >= n
        m == 2**x * 3**y * 5**z
    where x, y, and z are integers.
    This is useful for ensuring fast FFT sizes.

    From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)

    See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html
    """
    if n < 7:
        return max(n, 1)

    # x, y, and z are all bounded from above by the formula of nextpower.
    # Compute all possible combinations for powers of 3 and 5.
    # (Not too many for reasonable FFT sizes.)
    def power_series(x, base):
        nmax = int(ceil(log(x) / log(base)))
        return np.logspace(0.0, nmax, num=nmax + 1, base=base)

    n35 = np.outer(power_series(n, 3.0), power_series(n, 5.0))
    n35 = n35[n35 <= n]
    # Lump the powers of 3 and 5 together and solve for the powers of 2.
    n2 = nextpower(n / n35)
    return int(min(n2 * n35))

nextpower(n, base=2.0)

Return the next integral power of two greater than the given number. Specifically, return m such that m >= n m == 2**x where x is an integer. Use base argument to specify a base other than 2. This is useful for ensuring fast FFT sizes.

From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)

Source code in nelpy/utils.py
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
def nextpower(n, base=2.0):
    """Return the next integral power of two greater than the given number.
    Specifically, return m such that
        m >= n
        m == 2**x
    where x is an integer. Use base argument to specify a base other than 2.
    This is useful for ensuring fast FFT sizes.

    From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)
    """
    x = base ** ceil(log(n) / log(base))
    if isinstance(n, np.ndarray):
        return np.asarray(x, dtype=int)
    else:
        return int(x)

pairwise(iterable)

Return a zip of all neighboring pairs in an iterable.

Parameters:

Name Type Description Default
iterable iterable

Input iterable.

required

Returns:

Name Type Description
pairs zip

Iterator of pairs (a, b).

Examples:

>>> list(pairwise([2, 3, 6, 8, 7]))
[(2, 3), (3, 6), (6, 8), (8, 7)]
Source code in nelpy/utils.py
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
def pairwise(iterable):
    """
    Return a zip of all neighboring pairs in an iterable.

    Parameters
    ----------
    iterable : iterable
        Input iterable.

    Returns
    -------
    pairs : zip
        Iterator of pairs (a, b).

    Examples
    --------
    >>> list(pairwise([2, 3, 6, 8, 7]))
    [(2, 3), (3, 6), (6, 8), (8, 7)]
    """
    a, b = tee(iterable)
    next(b, None)
    return zip(a, b)

ragged_array(arr)

Convert a list of arrays into a ragged array (object dtype).

Parameters:

Name Type Description Default
arr list of np.ndarray

List of arrays to combine into a ragged array.

required

Returns:

Name Type Description
out ndarray

Ragged array of dtype object.

Examples:

>>> ragged_array([np.arange(3), np.arange(5)])
array([array([0, 1, 2]), array([0, 1, 2, 3, 4])], dtype=object)
Source code in nelpy/utils.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
def ragged_array(arr):
    """
    Convert a list of arrays into a ragged array (object dtype).

    Parameters
    ----------
    arr : list of np.ndarray
        List of arrays to combine into a ragged array.

    Returns
    -------
    out : np.ndarray
        Ragged array of dtype object.

    Examples
    --------
    >>> ragged_array([np.arange(3), np.arange(5)])
    array([array([0, 1, 2]), array([0, 1, 2, 3, 4])], dtype=object)
    """
    n_elem = len(arr)
    out = np.array(n_elem * [None])
    for ii in range(out.shape[0]):
        out[ii] = arr[ii]
    return out

shrinkMatColsTo(mat, numCols)

Shrink a matrix by reducing the number of columns using interpolation.

Parameters:

Name Type Description Default
mat ndarray

Input matrix of shape (N, M1).

required
numCols int

Target number of columns M2, where M2 <= M1.

required

Returns:

Type Description
ndarray

Resized matrix of shape (N, numCols).

Examples:

>>> mat = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
>>> shrinkMatColsTo(mat, 2)
array([[1.5, 3.5],
       [5.5, 7.5]])
Notes

Uses scipy.ndimage.interpolation.zoom with order=1 (linear interpolation).

Source code in nelpy/utils.py
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
def shrinkMatColsTo(mat, numCols):
    """
    Shrink a matrix by reducing the number of columns using interpolation.

    Parameters
    ----------
    mat : np.ndarray
        Input matrix of shape (N, M1).
    numCols : int
        Target number of columns M2, where M2 <= M1.

    Returns
    -------
    np.ndarray
        Resized matrix of shape (N, numCols).

    Examples
    --------
    >>> mat = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
    >>> shrinkMatColsTo(mat, 2)
    array([[1.5, 3.5],
           [5.5, 7.5]])

    Notes
    -----
    Uses scipy.ndimage.interpolation.zoom with order=1 (linear interpolation).
    """
    import scipy.ndimage

    numCells = mat.shape[0]
    numColsMat = mat.shape[1]
    a = np.zeros((numCells, numCols))
    for row in np.arange(numCells):
        niurou = scipy.ndimage.interpolation.zoom(
            input=mat[row, :], zoom=(numCols / numColsMat), order=1
        )
        a[row, :] = niurou
    return a

signal_envelope1D(data, *, sigma=None, fs=None)

Find the signal envelope using the Hilbert transform (deprecated).

Parameters:

Name Type Description Default
data numpy array, list, or RegularlySampledAnalogSignalArray

Input data.

required
sigma float

Standard deviation of Gaussian smoothing kernel in seconds.

None
fs float

Sampling rate of the signal.

None

Returns:

Name Type Description
out same type as input

Signal envelope.

Notes

This function is deprecated. Use signal_envelope_1d instead.

Source code in nelpy/utils.py
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
def signal_envelope1D(data, *, sigma=None, fs=None):
    """
    Find the signal envelope using the Hilbert transform (deprecated).

    Parameters
    ----------
    data : numpy array, list, or RegularlySampledAnalogSignalArray
        Input data.
    sigma : float, optional
        Standard deviation of Gaussian smoothing kernel in seconds.
    fs : float, optional
        Sampling rate of the signal.

    Returns
    -------
    out : same type as input
        Signal envelope.

    Notes
    -----
    This function is deprecated. Use `signal_envelope_1d` instead.
    """
    logging.warnings(
        "'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!"
    )
    return signal_envelope_1d(data, sigma=sigma, fs=fs)

signal_envelope_1d(data, *, sigma=None, fs=None)

Finds the signal envelope by taking the absolute value of the Hilbert transform

Parameters:

Name Type Description Default
data numpy array, list, or RegularlySampledAnalogSignalArray

Input data If data is a numpy array, it is expected to have shape (n_signals, n_samples) If data is a list, it is expected to have length n_signals, where each sublist has length n_samples, i.e. data is not jagged

required
sigma float

Standard deviation of the Gaussian kernel used to smooth the envelope after applying the Hilbert transform. Units of seconds. Default is 4 ms

None
fs float

Sampling rate of the signal

None

Returns:

Name Type Description
out same type as the input object

An object containing the signal envelope

TODO this is not yet epoch-aware!
UPDATE this is actually epoch-aware by now!
Source code in nelpy/utils.py
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
def signal_envelope_1d(data, *, sigma=None, fs=None):
    """Finds the signal envelope by taking the absolute value
    of the Hilbert transform

    Parameters
    ----------
    data : numpy array, list, or RegularlySampledAnalogSignalArray
        Input data
        If data is a numpy array, it is expected to have shape
        (n_signals, n_samples)
        If data is a list, it is expected to have length n_signals,
        where each sublist has length n_samples, i.e. data is not
        jagged
    sigma : float, optional
        Standard deviation of the Gaussian kernel used to
        smooth the envelope after applying the Hilbert transform.
        Units of seconds. Default is 4 ms
    fs : float, optional
        Sampling rate of the signal

    Returns
    -------
    out : same type as the input object
        An object containing the signal envelope

    TODO: this is not yet epoch-aware!
    UPDATE: this is actually epoch-aware by now!
    """

    if sigma is None:
        sigma = 0.004  # 4 ms standard deviation
    if fs is None:
        if isinstance(data, (np.ndarray, list)):
            raise ValueError("sampling frequency must be specified!")
        elif isinstance(data, core.RegularlySampledAnalogSignalArray):
            fs = data.fs

    if isinstance(data, (np.ndarray, list)):
        data_array = np.array(data)
        n_dims = np.array(data).ndim
        assert n_dims <= 2, "Only 1D signals supported!"
        if n_dims == 1:
            input_data = data_array.reshape((1, data_array.size))
        else:
            input_data = data_array
        n_signals, n_samples = input_data.shape
        # Compute number of samples to compute fast FFTs
        padlen = next_fast_len(n_samples) - n_samples
        # Pad data
        paddeddata = np.hstack((input_data, np.zeros((n_signals, padlen))))
        # Use hilbert transform to get an envelope
        envelope = np.absolute(hilbert(paddeddata, axis=-1))
        # free up memory
        del paddeddata
        # Truncate results back to original length
        envelope = envelope[..., :n_samples]
        if sigma:
            # Smooth envelope with a gaussian (sigma = 4 ms default)
            EnvelopeSmoothingSD = sigma * fs
            smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(
                envelope, EnvelopeSmoothingSD, mode="constant", axis=-1
            )
            envelope = smoothed_envelope
        if isinstance(data, list):
            envelope = envelope.tolist()
        return envelope
    elif isinstance(data, core.RegularlySampledAnalogSignalArray):
        # Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported
        assert data.data.ndim == 2
        cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0)

        newasa = data.copy()
        # for segment in data:
        for idx in range(data.n_epochs):
            # print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs))
            segment_data = data._data[:, cum_lengths[idx] : cum_lengths[idx + 1]]
            n_signals, n_samples = segment_data.shape
            # Compute number of samples to compute fast FFTs:
            padlen = next_fast_len(n_samples) - n_samples
            # Pad data
            paddeddata = np.hstack((segment_data, np.zeros((n_signals, padlen))))
            # Use hilbert transform to get an envelope
            envelope = np.absolute(hilbert(paddeddata, axis=-1))
            # free up memory
            del paddeddata
            # Truncate results back to original length
            envelope = envelope[..., :n_samples]
            if sigma:
                # Smooth envelope with a gaussian (sigma = 4 ms default)
                EnvelopeSmoothingSD = sigma * fs
                smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(
                    envelope, EnvelopeSmoothingSD, mode="constant", axis=-1
                )
                envelope = smoothed_envelope
            newasa._data[:, cum_lengths[idx] : cum_lengths[idx + 1]] = np.atleast_2d(
                envelope
            )
        return newasa

spatial_information(ratemap, Pi=1)

Compute the spatial information and firing sparsity...

The specificity index examines the amount of information (in bits) that a single spike conveys about the animal's location (i.e., how well cell firing predicts the animal's location).The spatial information content of cell discharge was calculated using the formula: information content = \Sum P_i(R_i/R)log_2(R_i/R) where i is the bin number, P_i, is the probability for occupancy of bin i, R_i, is the mean firing rate for bin i, and R is the overall mean firing rate.

In order to account for the effects of low firing rates (with fewer spikes there is a tendency toward higher information content) or random bursts of firing, the spike firing time-series was randomly offset in time from the rat location time-series, and the information content was calculated. A distribution of the information content based on 100 such random shifts was obtained and was used to compute a standardized score (Zscore) of information content for that cell. While the distribution is not composed of independent samples, it was nominally normally distributed, and a Z value of 2.29 was chosen as a cut-off for significance (the equivalent of a one-tailed t-test with P = 0.01 under a normal distribution).

Reference(s)

Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L., and Skaggs, W. E. (1994). "Spatial information content and reliability of hippocampal CA1 neurons: effects of visual input", Hippocampus, 4(4), 410-421.

Parameters:

Name Type Description Default
ratemap array of shape (n_units, n_bins)

Rate map in Hz.

required
Pi ndarray

A probability distribution over the bins of the rate map. If not provided, it is assumed to be uniform.

1

Returns:

Name Type Description
si array of shape (n_units,)

spatial information (in bits per spike)

Source code in nelpy/utils.py
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
def spatial_information(ratemap, Pi=1):
    """Compute the spatial information and firing sparsity...

    The specificity index examines the amount of information
    (in bits) that a single spike conveys about the animal's
    location (i.e., how well cell firing predicts the animal's
    location).The spatial information content of cell discharge was
    calculated using the formula:
        information content = \\Sum P_i(R_i/R)log_2(R_i/R)
    where i is the bin number, P_i, is the probability for occupancy
    of bin i, R_i, is the mean firing rate for bin i, and R is the
    overall mean firing rate.

    In order to account for the effects of low firing rates (with
    fewer spikes there is a tendency toward higher information
    content) or random bursts of firing, the spike firing
    time-series was randomly offset in time from the rat location
    time-series, and the information content was calculated. A
    distribution of the information content based on 100 such random
    shifts was obtained and was used to compute a standardized score
    (Zscore) of information content for that cell. While the
    distribution is not composed of independent samples, it was
    nominally normally distributed, and a Z value of 2.29 was chosen
    as a cut-off for significance (the equivalent of a one-tailed
    t-test with P = 0.01 under a normal distribution).

    Reference(s)
    ------------
    Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L.,
        and Skaggs, W. E. (1994). "Spatial information content and
        reliability of hippocampal CA1 neurons: effects of visual
        input", Hippocampus, 4(4), 410-421.

    Parameters
    ----------
    ratemap : array of shape (n_units, n_bins)
        Rate map in Hz.

    Pi : numpy.ndarray
        A probability distribution over the bins of the rate map. If not
        provided, it is assumed to be uniform.
    Returns
    -------
    si : array of shape (n_units,)
        spatial information (in bits per spike)
    """
    # convert Pi to probability distribution
    Pi[np.isnan(Pi) | (Pi == 0)] = np.finfo(float).eps
    Pi = Pi / (np.sum(Pi) + np.finfo(float).eps)

    ratemap = copy.deepcopy(ratemap)
    # ensure that the ratemap always has nonzero firing rates,
    # otherwise the spatial information might return NaNs:
    ratemap[np.isnan(ratemap) | (ratemap == 0)] = np.finfo(float).eps

    # Handle N-dimensional ratemaps (n_units, *spatial_dims)
    if len(ratemap.shape) < 2:
        raise TypeError(
            "rate map must have at least 2 dimensions (n_units, *spatial_dims)!"
        )

    # Sum over all spatial dimensions to get mean firing rate for each unit
    spatial_axes = tuple(range(1, len(ratemap.shape)))
    R = (ratemap * Pi).sum(axis=spatial_axes)  # mean firing rate for each unit

    # Compute spatial information for each unit
    # We need to compute sum over spatial bins of: Pi * (Ri/R) * log2(Ri/R)
    # where Ri is the firing rate in each spatial bin

    si = np.zeros(ratemap.shape[0])  # one value per unit

    for unit_idx in range(ratemap.shape[0]):
        unit_ratemap = ratemap[unit_idx]  # spatial dimensions only
        unit_mean_rate = R[unit_idx]

        # Compute (Ri / R) * log2(Ri / R) for each spatial bin
        rate_ratio = unit_ratemap / unit_mean_rate
        log_term = rate_ratio * np.log2(rate_ratio)

        # Weight by occupancy probability and sum over all spatial dimensions
        si[unit_idx] = np.sum(Pi * log_term)

    return si

spatial_selectivity(ratemap, Pi=1)

The selectivity measure max(rate)/mean(rate) of the cell. The more tightly concentrated the cell's activity, the higher the selectivity. A cell with no spatial tuning at all will have a selectivity of 1.

Parameters:

Name Type Description Default
rate_map ndarray

A firing rate map, any number of dimensions.

required
Pi ndarray

A probability distribution of the occupancy of each bin in the rate map. If not provided, the occupancy will be assumed to be uniform.

1

Returns:

Name Type Description
out float

selectivity

Source code in nelpy/utils.py
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
def spatial_selectivity(ratemap, Pi=1):
    """
    The selectivity measure max(rate)/mean(rate)  of the cell. The more
    tightly concentrated the cell's activity, the higher the selectivity.
    A cell with no spatial tuning at all will have a selectivity of 1.

    Parameters
    ----------
    rate_map : numpy.ndarray
        A firing rate map, any number of dimensions.
    Pi : numpy.ndarray
        A probability distribution of the occupancy of each bin in the
        rate map. If not provided, the occupancy will be assumed to be uniform.

    Returns
    -------
    out : float
        selectivity
    """
    # convert Pi to probability distribution
    Pi[np.isnan(Pi) | (Pi == 0)] = np.finfo(float).eps
    Pi = Pi / (np.sum(Pi) + np.finfo(float).eps)

    # Handle N-dimensional ratemaps (n_units, *spatial_dims)
    if len(ratemap.shape) < 2:
        raise TypeError(
            "rate map must have at least 2 dimensions (n_units, *spatial_dims)!"
        )

    # Sum over all spatial dimensions to get mean firing rate for each unit
    spatial_axes = tuple(range(1, len(ratemap.shape)))
    R = (ratemap * Pi).sum(axis=spatial_axes)

    # Get maximum rate over all spatial dimensions for each unit
    max_rate = np.max(ratemap, axis=spatial_axes)
    return max_rate / R

spatial_sparsity(ratemap, Pi=1)

Compute the firing sparsity... Compute sparsity of a rate map, The sparsity measure is an adaptation to space. The adaptation measures the fraction of the environment in which a cell is active. A sparsity of, 0.1 means that the place field of the cell occupies 1/10 of the area the subject traverses

Parameters:

Name Type Description Default
rate_map ndarray

A firing rate map, any number of dimensions.

required
Pi ndarray

A probability distribution over the bins of the rate map. If not provided, it is assumed to be uniform.

1

Returns:

Name Type Description
out float

sparsity

References

.. [2] Skaggs, W. E., McNaughton, B. L., Wilson, M., & Barnes, C. (1996). Theta phase precession in hippocampal neuronal populations and the compression of temporal sequences. Hippocampus, 6, 149-172.

Parameters:

Name Type Description Default
ratemap array of shape (n_units, n_bins)

Rate map in Hz.

required
Pi array of shape (n_bins,)

Occupancy of the animal.

1

Returns:

Name Type Description
sparsity array of shape (n_units,)

sparsity (in percent) for each unit

Source code in nelpy/utils.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
def spatial_sparsity(ratemap, Pi=1):
    """Compute the firing sparsity...
    Compute sparsity of a rate map, The sparsity  measure is an adaptation
    to space. The adaptation measures the fraction of the environment  in which
    a cell is  active. A sparsity of, 0.1 means that the place field of the
    cell occupies 1/10 of the area the subject traverses

    Parameters
    ----------
    rate_map : numpy.ndarray
        A firing rate map, any number of dimensions.
    Pi : numpy.ndarray
        A probability distribution over the bins of the rate map. If not
        provided, it is assumed to be uniform.
    Returns
    -------
    out : float
        sparsity

    References
    ----------
    .. [2] Skaggs, W. E., McNaughton, B. L., Wilson, M., & Barnes, C. (1996).
    Theta phase precession in hippocampal neuronal populations and the
    compression of temporal sequences. Hippocampus, 6, 149-172.

    Parameters
    ----------

    ratemap : array of shape (n_units, n_bins)
        Rate map in Hz.
    Pi : array of shape (n_bins,)
        Occupancy of the animal.
    Returns
    -------
    sparsity: array of shape (n_units,)
        sparsity (in percent) for each unit
    """

    Pi[np.isnan(Pi) | (Pi == 0)] = np.finfo(float).eps
    Pi = Pi / (np.sum(Pi) + np.finfo(float).eps)

    ratemap = copy.deepcopy(ratemap)
    # ensure that the ratemap always has nonzero firing rates,
    # otherwise the spatial information might return NaNs:
    ratemap[np.isnan(ratemap) | (ratemap == 0)] = np.finfo(float).eps

    # Handle N-dimensional ratemaps (n_units, *spatial_dims)
    if len(ratemap.shape) < 2:
        raise TypeError(
            "rate map must have at least 2 dimensions (n_units, *spatial_dims)!"
        )

    # Sum over all spatial dimensions to get mean firing rate for each unit
    spatial_axes = tuple(range(1, len(ratemap.shape)))
    R = (ratemap * Pi).sum(axis=spatial_axes)

    # Compute average squared rate over all spatial dimensions
    avg_sqr_rate = np.sum(ratemap**2 * Pi, axis=spatial_axes)

    return R**2 / avg_sqr_rate

spiketrain_union(st1, st2)

Join two spiketrains together.

Parameters:

Name Type Description Default
st1 SpikeTrainArray

First spiketrain.

required
st2 SpikeTrainArray

Second spiketrain.

required

Returns:

Type Description
SpikeTrainArray

Combined spiketrain with joined support.

Examples:

>>> combined_st = spiketrain_union(st1, st2)
Notes

WARNING! This function should be improved a lot! Currently assumes both spiketrains have the same number of units.

Source code in nelpy/utils.py
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
def spiketrain_union(st1, st2):
    """
    Join two spiketrains together.

    Parameters
    ----------
    st1 : SpikeTrainArray
        First spiketrain.
    st2 : SpikeTrainArray
        Second spiketrain.

    Returns
    -------
    SpikeTrainArray
        Combined spiketrain with joined support.

    Examples
    --------
    >>> combined_st = spiketrain_union(st1, st2)

    Notes
    -----
    WARNING! This function should be improved a lot!
    Currently assumes both spiketrains have the same number of units.
    """
    assert st1.n_units == st2.n_units
    support = st1.support.join(st2.support)

    newdata = []
    for unit in range(st1.n_units):
        newdata.append(np.append(st1.time[unit], st2.time[unit]))

    fs = None
    if st1.fs == st2.fs:
        fs = st1.fs

    return core.SpikeTrainArray(newdata, support=support, fs=fs)

swap_cols(arr, frm, to)

Swap columns of a 2D numpy array.

Parameters:

Name Type Description Default
arr ndarray

2D array to modify.

required
frm int

Index of first column to swap.

required
to int

Index of second column to swap.

required

Returns:

Type Description
None

Modifies array in-place.

Examples:

>>> arr = np.array([[1, 2, 3], [4, 5, 6]])
>>> swap_cols(arr, 0, 2)
>>> arr
array([[3, 2, 1],
       [6, 5, 4]])
Source code in nelpy/utils.py
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
def swap_cols(arr, frm, to):
    """
    Swap columns of a 2D numpy array.

    Parameters
    ----------
    arr : np.ndarray
        2D array to modify.
    frm : int
        Index of first column to swap.
    to : int
        Index of second column to swap.

    Returns
    -------
    None
        Modifies array in-place.

    Examples
    --------
    >>> arr = np.array([[1, 2, 3], [4, 5, 6]])
    >>> swap_cols(arr, 0, 2)
    >>> arr
    array([[3, 2, 1],
           [6, 5, 4]])
    """
    if arr.ndim > 1:
        arr[:, [frm, to]] = arr[:, [to, frm]]
    else:
        arr[frm], arr[to] = arr[to], arr[frm]

swap_rows(arr, frm, to)

Swap rows of a 2D numpy array.

Parameters:

Name Type Description Default
arr ndarray

2D array to modify.

required
frm int

Index of first row to swap.

required
to int

Index of second row to swap.

required

Returns:

Type Description
None

Modifies array in-place.

Examples:

>>> arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> swap_rows(arr, 0, 2)
>>> arr
array([[7, 8, 9],
       [4, 5, 6],
       [1, 2, 3]])
Source code in nelpy/utils.py
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
def swap_rows(arr, frm, to):
    """
    Swap rows of a 2D numpy array.

    Parameters
    ----------
    arr : np.ndarray
        2D array to modify.
    frm : int
        Index of first row to swap.
    to : int
        Index of second row to swap.

    Returns
    -------
    None
        Modifies array in-place.

    Examples
    --------
    >>> arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    >>> swap_rows(arr, 0, 2)
    >>> arr
    array([[7, 8, 9],
           [4, 5, 6],
           [1, 2, 3]])
    """
    if arr.ndim > 1:
        arr[[frm, to], :] = arr[[to, frm], :]
    else:
        arr[frm], arr[to] = arr[to], arr[frm]