repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.is_unique
def is_unique(self): """True if no duplicate entries.""" if self._is_unique is None: t = self.values[:-1] == self.values[1:] # type: np.ndarray self._is_unique = ~np.any(t) return self._is_unique
python
def is_unique(self): """True if no duplicate entries.""" if self._is_unique is None: t = self.values[:-1] == self.values[1:] # type: np.ndarray self._is_unique = ~np.any(t) return self._is_unique
True if no duplicate entries.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3404-L3409
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.locate_key
def locate_key(self, key): """Get index location for the requested key. Parameters ---------- key : object Value to locate. Returns ------- loc : int or slice Location of `key` (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 6, 11]) >>> idx.locate_key(3) 0 >>> idx.locate_key(11) 3 >>> idx.locate_key(6) slice(1, 3, None) >>> try: ... idx.locate_key(2) ... except KeyError as e: ... print(e) ... 2 """ left = bisect.bisect_left(self, key) right = bisect.bisect_right(self, key) diff = right - left if diff == 0: raise KeyError(key) elif diff == 1: return left else: return slice(left, right)
python
def locate_key(self, key): """Get index location for the requested key. Parameters ---------- key : object Value to locate. Returns ------- loc : int or slice Location of `key` (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 6, 11]) >>> idx.locate_key(3) 0 >>> idx.locate_key(11) 3 >>> idx.locate_key(6) slice(1, 3, None) >>> try: ... idx.locate_key(2) ... except KeyError as e: ... print(e) ... 2 """ left = bisect.bisect_left(self, key) right = bisect.bisect_right(self, key) diff = right - left if diff == 0: raise KeyError(key) elif diff == 1: return left else: return slice(left, right)
Get index location for the requested key. Parameters ---------- key : object Value to locate. Returns ------- loc : int or slice Location of `key` (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 6, 11]) >>> idx.locate_key(3) 0 >>> idx.locate_key(11) 3 >>> idx.locate_key(6) slice(1, 3, None) >>> try: ... idx.locate_key(2) ... except KeyError as e: ... print(e) ... 2
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3429-L3470
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.locate_intersection
def locate_intersection(self, other): """Locate the intersection with another array. Parameters ---------- other : array_like, int Array of values to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True, False]) >>> loc2 array([False, True, True, False]) >>> idx1[loc1] <SortedIndex shape=(2,) dtype=int64> [6, 20] >>> idx2[loc2] <SortedIndex shape=(2,) dtype=int64> [6, 20] """ # check inputs other = SortedIndex(other, copy=False) # find intersection assume_unique = self.is_unique and other.is_unique loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_other
python
def locate_intersection(self, other): """Locate the intersection with another array. Parameters ---------- other : array_like, int Array of values to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True, False]) >>> loc2 array([False, True, True, False]) >>> idx1[loc1] <SortedIndex shape=(2,) dtype=int64> [6, 20] >>> idx2[loc2] <SortedIndex shape=(2,) dtype=int64> [6, 20] """ # check inputs other = SortedIndex(other, copy=False) # find intersection assume_unique = self.is_unique and other.is_unique loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_other
Locate the intersection with another array. Parameters ---------- other : array_like, int Array of values to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True, False]) >>> loc2 array([False, True, True, False]) >>> idx1[loc1] <SortedIndex shape=(2,) dtype=int64> [6, 20] >>> idx2[loc2] <SortedIndex shape=(2,) dtype=int64> [6, 20]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3472-L3515
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.locate_keys
def locate_keys(self, keys, strict=True): """Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of values. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc = idx1.locate_keys(idx2, strict=False) >>> loc array([False, True, False, True, False]) >>> idx1[loc] <SortedIndex shape=(2,) dtype=int64> [6, 20] """ # check inputs keys = SortedIndex(keys, copy=False) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
python
def locate_keys(self, keys, strict=True): """Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of values. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc = idx1.locate_keys(idx2, strict=False) >>> loc array([False, True, False, True, False]) >>> idx1[loc] <SortedIndex shape=(2,) dtype=int64> [6, 20] """ # check inputs keys = SortedIndex(keys, copy=False) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of values. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc = idx1.locate_keys(idx2, strict=False) >>> loc array([False, True, False, True, False]) >>> idx1[loc] <SortedIndex shape=(2,) dtype=int64> [6, 20]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3517-L3556
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.intersect
def intersect(self, other): """Intersect with `other` sorted index. Parameters ---------- other : array_like, int Array of values to intersect with. Returns ------- out : SortedIndex Values in common. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> idx1.intersect(idx2) <SortedIndex shape=(2,) dtype=int64> [6, 20] """ loc = self.locate_keys(other, strict=False) return self.compress(loc, axis=0)
python
def intersect(self, other): """Intersect with `other` sorted index. Parameters ---------- other : array_like, int Array of values to intersect with. Returns ------- out : SortedIndex Values in common. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> idx1.intersect(idx2) <SortedIndex shape=(2,) dtype=int64> [6, 20] """ loc = self.locate_keys(other, strict=False) return self.compress(loc, axis=0)
Intersect with `other` sorted index. Parameters ---------- other : array_like, int Array of values to intersect with. Returns ------- out : SortedIndex Values in common. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> idx1.intersect(idx2) <SortedIndex shape=(2,) dtype=int64> [6, 20]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3558-L3584
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.locate_range
def locate_range(self, start=None, stop=None): """Locate slice of index containing all entries within `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> loc = idx.locate_range(4, 32) >>> loc slice(1, 4, None) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 20] """ # locate start and stop indices if start is None: start_index = 0 else: start_index = bisect.bisect_left(self, start) if stop is None: stop_index = len(self) else: stop_index = bisect.bisect_right(self, stop) if stop_index - start_index == 0: raise KeyError(start, stop) loc = slice(start_index, stop_index) return loc
python
def locate_range(self, start=None, stop=None): """Locate slice of index containing all entries within `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> loc = idx.locate_range(4, 32) >>> loc slice(1, 4, None) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 20] """ # locate start and stop indices if start is None: start_index = 0 else: start_index = bisect.bisect_left(self, start) if stop is None: stop_index = len(self) else: stop_index = bisect.bisect_right(self, stop) if stop_index - start_index == 0: raise KeyError(start, stop) loc = slice(start_index, stop_index) return loc
Locate slice of index containing all entries within `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> loc = idx.locate_range(4, 32) >>> loc slice(1, 4, None) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 20]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3586-L3630
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.intersect_range
def intersect_range(self, start=None, stop=None): """Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx.intersect_range(4, 32) <SortedIndex shape=(3,) dtype=int64> [6, 11, 20] """ try: loc = self.locate_range(start=start, stop=stop) except KeyError: return self.values[0:0] else: return self[loc]
python
def intersect_range(self, start=None, stop=None): """Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx.intersect_range(4, 32) <SortedIndex shape=(3,) dtype=int64> [6, 11, 20] """ try: loc = self.locate_range(start=start, stop=stop) except KeyError: return self.values[0:0] else: return self[loc]
Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx.intersect_range(4, 32) <SortedIndex shape=(3,) dtype=int64> [6, 11, 20]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3632-L3663
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.locate_intersection_ranges
def locate_intersection_ranges(self, starts, stops): """Locate the intersection with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- loc : ndarray, bool Boolean array with location of entries found. loc_ranges : ndarray, bool Boolean array with location of ranges containing one or more entries. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc, loc_ranges = idx.locate_intersection_ranges(starts, stops) >>> loc array([False, True, True, False, True]) >>> loc_ranges array([False, True, False, True, False]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] >>> ranges[loc_ranges] array([[ 6, 17], [31, 35]]) """ # check inputs starts = asarray_ndim(starts, 1) stops = asarray_ndim(stops, 1) check_dim0_aligned(starts, stops) # find indices of start and stop values in idx start_indices = np.searchsorted(self, starts) stop_indices = np.searchsorted(self, stops, side='right') # find intervals overlapping at least one value loc_ranges = start_indices < stop_indices # find values within at least one interval loc = np.zeros(self.shape, dtype=np.bool) for i, j in zip(start_indices[loc_ranges], stop_indices[loc_ranges]): loc[i:j] = True return loc, loc_ranges
python
def locate_intersection_ranges(self, starts, stops): """Locate the intersection with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- loc : ndarray, bool Boolean array with location of entries found. loc_ranges : ndarray, bool Boolean array with location of ranges containing one or more entries. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc, loc_ranges = idx.locate_intersection_ranges(starts, stops) >>> loc array([False, True, True, False, True]) >>> loc_ranges array([False, True, False, True, False]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] >>> ranges[loc_ranges] array([[ 6, 17], [31, 35]]) """ # check inputs starts = asarray_ndim(starts, 1) stops = asarray_ndim(stops, 1) check_dim0_aligned(starts, stops) # find indices of start and stop values in idx start_indices = np.searchsorted(self, starts) stop_indices = np.searchsorted(self, stops, side='right') # find intervals overlapping at least one value loc_ranges = start_indices < stop_indices # find values within at least one interval loc = np.zeros(self.shape, dtype=np.bool) for i, j in zip(start_indices[loc_ranges], stop_indices[loc_ranges]): loc[i:j] = True return loc, loc_ranges
Locate the intersection with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- loc : ndarray, bool Boolean array with location of entries found. loc_ranges : ndarray, bool Boolean array with location of ranges containing one or more entries. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc, loc_ranges = idx.locate_intersection_ranges(starts, stops) >>> loc array([False, True, True, False, True]) >>> loc_ranges array([False, True, False, True, False]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] >>> ranges[loc_ranges] array([[ 6, 17], [31, 35]])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3665-L3724
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.locate_ranges
def locate_ranges(self, starts, stops, strict=True): """Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] """ loc, found = self.locate_intersection_ranges(starts, stops) if strict and np.any(~found): raise KeyError(starts[~found], stops[~found]) return loc
python
def locate_ranges(self, starts, stops, strict=True): """Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] """ loc, found = self.locate_intersection_ranges(starts, stops) if strict and np.any(~found): raise KeyError(starts[~found], stops[~found]) return loc
Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3726-L3767
cggh/scikit-allel
allel/model/ndarray.py
SortedIndex.intersect_ranges
def intersect_ranges(self, starts, stops): """Intersect with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> idx.intersect_ranges(starts, stops) <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] """ loc = self.locate_ranges(starts, stops, strict=False) return self.compress(loc, axis=0)
python
def intersect_ranges(self, starts, stops): """Intersect with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> idx.intersect_ranges(starts, stops) <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] """ loc = self.locate_ranges(starts, stops, strict=False) return self.compress(loc, axis=0)
Intersect with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> idx.intersect_ranges(starts, stops) <SortedIndex shape=(3,) dtype=int64> [6, 11, 35]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3769-L3800
cggh/scikit-allel
allel/model/ndarray.py
UniqueIndex.locate_intersection
def locate_intersection(self, other): """Locate the intersection with another array. Parameters ---------- other : array_like Array to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.UniqueIndex(['A', 'C', 'B', 'F'], dtype=object) >>> idx2 = allel.UniqueIndex(['X', 'F', 'G', 'C', 'Z'], dtype=object) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True]) >>> loc2 array([False, True, False, True, False]) >>> idx1[loc1] <UniqueIndex shape=(2,) dtype=object> ['C', 'F'] >>> idx2[loc2] <UniqueIndex shape=(2,) dtype=object> ['F', 'C'] """ # check inputs other = UniqueIndex(other) # find intersection assume_unique = True loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_other
python
def locate_intersection(self, other): """Locate the intersection with another array. Parameters ---------- other : array_like Array to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.UniqueIndex(['A', 'C', 'B', 'F'], dtype=object) >>> idx2 = allel.UniqueIndex(['X', 'F', 'G', 'C', 'Z'], dtype=object) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True]) >>> loc2 array([False, True, False, True, False]) >>> idx1[loc1] <UniqueIndex shape=(2,) dtype=object> ['C', 'F'] >>> idx2[loc2] <UniqueIndex shape=(2,) dtype=object> ['F', 'C'] """ # check inputs other = UniqueIndex(other) # find intersection assume_unique = True loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_other
Locate the intersection with another array. Parameters ---------- other : array_like Array to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.UniqueIndex(['A', 'C', 'B', 'F'], dtype=object) >>> idx2 = allel.UniqueIndex(['X', 'F', 'G', 'C', 'Z'], dtype=object) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True]) >>> loc2 array([False, True, False, True, False]) >>> idx1[loc1] <UniqueIndex shape=(2,) dtype=object> ['C', 'F'] >>> idx2[loc2] <UniqueIndex shape=(2,) dtype=object> ['F', 'C']
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3904-L3947
cggh/scikit-allel
allel/model/ndarray.py
UniqueIndex.locate_keys
def locate_keys(self, keys, strict=True): """Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of keys. Examples -------- >>> import allel >>> idx = allel.UniqueIndex(['A', 'C', 'B', 'F']) >>> idx.locate_keys(['F', 'C']) array([False, True, False, True]) >>> idx.locate_keys(['X', 'F', 'G', 'C', 'Z'], strict=False) array([False, True, False, True]) """ # check inputs keys = UniqueIndex(keys) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
python
def locate_keys(self, keys, strict=True): """Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of keys. Examples -------- >>> import allel >>> idx = allel.UniqueIndex(['A', 'C', 'B', 'F']) >>> idx.locate_keys(['F', 'C']) array([False, True, False, True]) >>> idx.locate_keys(['X', 'F', 'G', 'C', 'Z'], strict=False) array([False, True, False, True]) """ # check inputs keys = UniqueIndex(keys) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of keys. Examples -------- >>> import allel >>> idx = allel.UniqueIndex(['A', 'C', 'B', 'F']) >>> idx.locate_keys(['F', 'C']) array([False, True, False, True]) >>> idx.locate_keys(['X', 'F', 'G', 'C', 'Z'], strict=False) array([False, True, False, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3949-L3985
cggh/scikit-allel
allel/model/ndarray.py
SortedMultiIndex.locate_key
def locate_key(self, k1, k2=None): """ Get index location for the requested key. Parameters ---------- k1 : object Level 1 key. k2 : object, optional Level 2 key. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_key('chr1') slice(0, 2, None) >>> idx.locate_key('chr1', 4) 1 >>> idx.locate_key('chr2', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4) """ loc1 = self.l1.locate_key(k1) if k2 is None: return loc1 if isinstance(loc1, slice): offset = loc1.start try: loc2 = SortedIndex(self.l2[loc1], copy=False).locate_key(k2) except KeyError: # reraise with more information raise KeyError(k1, k2) else: if isinstance(loc2, slice): loc = slice(offset + loc2.start, offset + loc2.stop) else: # assume singleton loc = offset + loc2 else: # singleton match in l1 v = self.l2[loc1] if v == k2: loc = loc1 else: raise KeyError(k1, k2) return loc
python
def locate_key(self, k1, k2=None): """ Get index location for the requested key. Parameters ---------- k1 : object Level 1 key. k2 : object, optional Level 2 key. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_key('chr1') slice(0, 2, None) >>> idx.locate_key('chr1', 4) 1 >>> idx.locate_key('chr2', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4) """ loc1 = self.l1.locate_key(k1) if k2 is None: return loc1 if isinstance(loc1, slice): offset = loc1.start try: loc2 = SortedIndex(self.l2[loc1], copy=False).locate_key(k2) except KeyError: # reraise with more information raise KeyError(k1, k2) else: if isinstance(loc2, slice): loc = slice(offset + loc2.start, offset + loc2.stop) else: # assume singleton loc = offset + loc2 else: # singleton match in l1 v = self.l2[loc1] if v == k2: loc = loc1 else: raise KeyError(k1, k2) return loc
Get index location for the requested key. Parameters ---------- k1 : object Level 1 key. k2 : object, optional Level 2 key. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_key('chr1') slice(0, 2, None) >>> idx.locate_key('chr1', 4) 1 >>> idx.locate_key('chr2', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4102-L4164
cggh/scikit-allel
allel/model/ndarray.py
SortedMultiIndex.locate_range
def locate_range(self, key, start=None, stop=None): """Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- key : object Level 1 key value. start : object, optional Level 2 start value. stop : object, optional Level 2 stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_range('chr1') slice(0, 2, None) >>> idx.locate_range('chr1', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr2', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9) """ loc1 = self.l1.locate_key(key) if start is None and stop is None: loc = loc1 elif isinstance(loc1, slice): offset = loc1.start idx = SortedIndex(self.l2[loc1], copy=False) try: loc2 = idx.locate_range(start, stop) except KeyError: raise KeyError(key, start, stop) else: loc = slice(offset + loc2.start, offset + loc2.stop) else: # singleton match in l1 v = self.l2[loc1] if start <= v <= stop: loc = loc1 else: raise KeyError(key, start, stop) # ensure slice is always returned if not isinstance(loc, slice): loc = slice(loc, loc + 1) return loc
python
def locate_range(self, key, start=None, stop=None): """Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- key : object Level 1 key value. start : object, optional Level 2 start value. stop : object, optional Level 2 stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_range('chr1') slice(0, 2, None) >>> idx.locate_range('chr1', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr2', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9) """ loc1 = self.l1.locate_key(key) if start is None and stop is None: loc = loc1 elif isinstance(loc1, slice): offset = loc1.start idx = SortedIndex(self.l2[loc1], copy=False) try: loc2 = idx.locate_range(start, stop) except KeyError: raise KeyError(key, start, stop) else: loc = slice(offset + loc2.start, offset + loc2.stop) else: # singleton match in l1 v = self.l2[loc1] if start <= v <= stop: loc = loc1 else: raise KeyError(key, start, stop) # ensure slice is always returned if not isinstance(loc, slice): loc = slice(loc, loc + 1) return loc
Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- key : object Level 1 key value. start : object, optional Level 2 start value. stop : object, optional Level 2 stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_range('chr1') slice(0, 2, None) >>> idx.locate_range('chr1', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr2', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4166-L4227
cggh/scikit-allel
allel/model/ndarray.py
ChromPosIndex.locate_key
def locate_key(self, chrom, pos=None): """ Get index location for the requested key. Parameters ---------- chrom : object Chromosome or contig. pos : int, optional Position within chromosome or contig. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_key('chr1') slice(2, 5, None) >>> idx.locate_key('chr2', 4) 1 >>> idx.locate_key('chr1', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4) """ if pos is None: # we just want the region for a chromosome if chrom in self.chrom_ranges: # return previously cached result return self.chrom_ranges[chrom] else: loc_chrom = np.nonzero(self.chrom == chrom)[0] if len(loc_chrom) == 0: raise KeyError(chrom) slice_chrom = slice(min(loc_chrom), max(loc_chrom) + 1) # cache the result self.chrom_ranges[chrom] = slice_chrom return slice_chrom else: slice_chrom = self.locate_key(chrom) pos_chrom = SortedIndex(self.pos[slice_chrom]) try: idx_within_chrom = pos_chrom.locate_key(pos) except KeyError: raise KeyError(chrom, pos) if isinstance(idx_within_chrom, slice): return slice(slice_chrom.start + idx_within_chrom.start, slice_chrom.start + idx_within_chrom.stop) else: return slice_chrom.start + idx_within_chrom
python
def locate_key(self, chrom, pos=None): """ Get index location for the requested key. Parameters ---------- chrom : object Chromosome or contig. pos : int, optional Position within chromosome or contig. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_key('chr1') slice(2, 5, None) >>> idx.locate_key('chr2', 4) 1 >>> idx.locate_key('chr1', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4) """ if pos is None: # we just want the region for a chromosome if chrom in self.chrom_ranges: # return previously cached result return self.chrom_ranges[chrom] else: loc_chrom = np.nonzero(self.chrom == chrom)[0] if len(loc_chrom) == 0: raise KeyError(chrom) slice_chrom = slice(min(loc_chrom), max(loc_chrom) + 1) # cache the result self.chrom_ranges[chrom] = slice_chrom return slice_chrom else: slice_chrom = self.locate_key(chrom) pos_chrom = SortedIndex(self.pos[slice_chrom]) try: idx_within_chrom = pos_chrom.locate_key(pos) except KeyError: raise KeyError(chrom, pos) if isinstance(idx_within_chrom, slice): return slice(slice_chrom.start + idx_within_chrom.start, slice_chrom.start + idx_within_chrom.stop) else: return slice_chrom.start + idx_within_chrom
Get index location for the requested key. Parameters ---------- chrom : object Chromosome or contig. pos : int, optional Position within chromosome or contig. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_key('chr1') slice(2, 5, None) >>> idx.locate_key('chr2', 4) 1 >>> idx.locate_key('chr1', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4322-L4386
cggh/scikit-allel
allel/model/ndarray.py
ChromPosIndex.locate_range
def locate_range(self, chrom, start=None, stop=None): """Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- chrom : object Chromosome or contig. start : int, optional Position start value. stop : int, optional Position stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_range('chr1') slice(2, 5, None) >>> idx.locate_range('chr2', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr1', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9) """ slice_chrom = self.locate_key(chrom) if start is None and stop is None: return slice_chrom else: pos_chrom = SortedIndex(self.pos[slice_chrom]) try: slice_within_chrom = pos_chrom.locate_range(start, stop) except KeyError: raise KeyError(chrom, start, stop) loc = slice(slice_chrom.start + slice_within_chrom.start, slice_chrom.start + slice_within_chrom.stop) return loc
python
def locate_range(self, chrom, start=None, stop=None): """Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- chrom : object Chromosome or contig. start : int, optional Position start value. stop : int, optional Position stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_range('chr1') slice(2, 5, None) >>> idx.locate_range('chr2', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr1', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9) """ slice_chrom = self.locate_key(chrom) if start is None and stop is None: return slice_chrom else: pos_chrom = SortedIndex(self.pos[slice_chrom]) try: slice_within_chrom = pos_chrom.locate_range(start, stop) except KeyError: raise KeyError(chrom, start, stop) loc = slice(slice_chrom.start + slice_within_chrom.start, slice_chrom.start + slice_within_chrom.stop) return loc
Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- chrom : object Chromosome or contig. start : int, optional Position start value. stop : int, optional Position stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_range('chr1') slice(2, 5, None) >>> idx.locate_range('chr2', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr1', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4388-L4441
cggh/scikit-allel
allel/model/ndarray.py
VariantTable.set_index
def set_index(self, index): """Set or reset the index. Parameters ---------- index : string or pair of strings, optional Names of columns to use for positional index, e.g., 'POS' if table contains a 'POS' column and records from a single chromosome/contig, or ('CHROM', 'POS') if table contains records from multiple chromosomes/contigs. """ if index is None: pass elif isinstance(index, str): index = SortedIndex(self[index], copy=False) elif isinstance(index, (tuple, list)) and len(index) == 2: index = SortedMultiIndex(self[index[0]], self[index[1]], copy=False) else: raise ValueError('invalid index argument, expected string or ' 'pair of strings, found %s' % repr(index)) self.index = index
python
def set_index(self, index): """Set or reset the index. Parameters ---------- index : string or pair of strings, optional Names of columns to use for positional index, e.g., 'POS' if table contains a 'POS' column and records from a single chromosome/contig, or ('CHROM', 'POS') if table contains records from multiple chromosomes/contigs. """ if index is None: pass elif isinstance(index, str): index = SortedIndex(self[index], copy=False) elif isinstance(index, (tuple, list)) and len(index) == 2: index = SortedMultiIndex(self[index[0]], self[index[1]], copy=False) else: raise ValueError('invalid index argument, expected string or ' 'pair of strings, found %s' % repr(index)) self.index = index
Set or reset the index. Parameters ---------- index : string or pair of strings, optional Names of columns to use for positional index, e.g., 'POS' if table contains a 'POS' column and records from a single chromosome/contig, or ('CHROM', 'POS') if table contains records from multiple chromosomes/contigs.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4541-L4563
cggh/scikit-allel
allel/model/ndarray.py
VariantTable.query_position
def query_position(self, chrom=None, position=None): """Query the table, returning row or rows matching the given genomic position. Parameters ---------- chrom : string, optional Chromosome/contig. position : int, optional Position (1-based). Returns ------- result : row or VariantTable """ if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_key(position) else: loc = self.index.locate_key(chrom, position) return self[loc]
python
def query_position(self, chrom=None, position=None): """Query the table, returning row or rows matching the given genomic position. Parameters ---------- chrom : string, optional Chromosome/contig. position : int, optional Position (1-based). Returns ------- result : row or VariantTable """ if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_key(position) else: loc = self.index.locate_key(chrom, position) return self[loc]
Query the table, returning row or rows matching the given genomic position. Parameters ---------- chrom : string, optional Chromosome/contig. position : int, optional Position (1-based). Returns ------- result : row or VariantTable
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4565-L4589
cggh/scikit-allel
allel/model/ndarray.py
VariantTable.query_region
def query_region(self, chrom=None, start=None, stop=None): """Query the table, returning row or rows within the given genomic region. Parameters ---------- chrom : string, optional Chromosome/contig. start : int, optional Region start position (1-based). stop : int, optional Region stop position (1-based). Returns ------- result : VariantTable """ if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_range(start, stop) else: loc = self.index.locate_range(chrom, start, stop) return self[loc]
python
def query_region(self, chrom=None, start=None, stop=None): """Query the table, returning row or rows within the given genomic region. Parameters ---------- chrom : string, optional Chromosome/contig. start : int, optional Region start position (1-based). stop : int, optional Region stop position (1-based). Returns ------- result : VariantTable """ if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_range(start, stop) else: loc = self.index.locate_range(chrom, start, stop) return self[loc]
Query the table, returning row or rows within the given genomic region. Parameters ---------- chrom : string, optional Chromosome/contig. start : int, optional Region start position (1-based). stop : int, optional Region stop position (1-based). Returns ------- result : VariantTable
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4591-L4616
cggh/scikit-allel
allel/model/ndarray.py
VariantTable.to_vcf
def to_vcf(self, path, rename=None, number=None, description=None, fill=None, write_header=True): r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description : dict, optional Descriptions for the INFO and FILTER headers. fill : dict, optional Fill values used for missing data in the table. write_header : bool, optional If True write VCF header. Examples -------- Setup a variant table to write out:: >>> import allel >>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3'] >>> pos = [2, 6, 3, 8, 1] >>> ids = ['a', 'b', 'c', 'd', 'e'] >>> ref = [b'A', b'C', b'T', b'G', b'N'] >>> alt = [(b'T', b'.'), ... (b'G', b'.'), ... (b'A', b'C'), ... (b'C', b'A'), ... (b'X', b'.')] >>> qual = [1.2, 2.3, 3.4, 4.5, 5.6] >>> filter_qd = [True, True, True, False, False] >>> filter_dp = [True, False, True, False, False] >>> dp = [12, 23, 34, 45, 56] >>> qd = [12.3, 23.4, 34.5, 45.6, 56.7] >>> flg = [True, False, True, False, True] >>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)] >>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9), ... (9.0, 9.9)] >>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp, ... filter_qd, dp, qd, flg, ac, xx] >>> records = list(zip(*columns)) >>> dtype = [('CHROM', 'S4'), ... ('POS', 'u4'), ... ('ID', 'S1'), ... ('REF', 'S1'), ... ('ALT', ('S1', 2)), ... ('qual', 'f4'), ... ('filter_dp', bool), ... ('filter_qd', bool), ... ('dp', int), ... ('qd', float), ... ('flg', bool), ... ('ac', (int, 2)), ... ('xx', (float, 2))] >>> vt = allel.VariantTable(records, dtype=dtype) Now write out to VCF and inspect the result:: >>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'} >>> fill = {'ALT': b'.', 'ac': -1} >>> number = {'ac': 'A'} >>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'} >>> vt.to_vcf('example.vcf', rename=rename, fill=fill, ... number=number, description=description) >>> print(open('example.vcf').read()) ##fileformat=VCFv4.1 ##fileDate=... ##source=... ##INFO=<ID=DP,Number=1,Type=Integer,Description=""> ##INFO=<ID=QD,Number=1,Type=Float,Description=""> ##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts"> ##INFO=<ID=flg,Number=0,Type=Flag,Description=""> ##INFO=<ID=xx,Number=2,Type=Float,Description=""> ##FILTER=<ID=QD,Description=""> ##FILTER=<ID=dp,Description="Low depth"> #CHROM POS ID REF ALT QUAL FILTER INFO chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=... chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5 chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x... chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7... chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=... """ write_vcf(path, callset=self, rename=rename, number=number, description=description, fill=fill, write_header=write_header)
python
def to_vcf(self, path, rename=None, number=None, description=None, fill=None, write_header=True): r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description : dict, optional Descriptions for the INFO and FILTER headers. fill : dict, optional Fill values used for missing data in the table. write_header : bool, optional If True write VCF header. Examples -------- Setup a variant table to write out:: >>> import allel >>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3'] >>> pos = [2, 6, 3, 8, 1] >>> ids = ['a', 'b', 'c', 'd', 'e'] >>> ref = [b'A', b'C', b'T', b'G', b'N'] >>> alt = [(b'T', b'.'), ... (b'G', b'.'), ... (b'A', b'C'), ... (b'C', b'A'), ... (b'X', b'.')] >>> qual = [1.2, 2.3, 3.4, 4.5, 5.6] >>> filter_qd = [True, True, True, False, False] >>> filter_dp = [True, False, True, False, False] >>> dp = [12, 23, 34, 45, 56] >>> qd = [12.3, 23.4, 34.5, 45.6, 56.7] >>> flg = [True, False, True, False, True] >>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)] >>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9), ... (9.0, 9.9)] >>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp, ... filter_qd, dp, qd, flg, ac, xx] >>> records = list(zip(*columns)) >>> dtype = [('CHROM', 'S4'), ... ('POS', 'u4'), ... ('ID', 'S1'), ... ('REF', 'S1'), ... ('ALT', ('S1', 2)), ... ('qual', 'f4'), ... ('filter_dp', bool), ... ('filter_qd', bool), ... ('dp', int), ... ('qd', float), ... ('flg', bool), ... ('ac', (int, 2)), ... ('xx', (float, 2))] >>> vt = allel.VariantTable(records, dtype=dtype) Now write out to VCF and inspect the result:: >>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'} >>> fill = {'ALT': b'.', 'ac': -1} >>> number = {'ac': 'A'} >>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'} >>> vt.to_vcf('example.vcf', rename=rename, fill=fill, ... number=number, description=description) >>> print(open('example.vcf').read()) ##fileformat=VCFv4.1 ##fileDate=... ##source=... ##INFO=<ID=DP,Number=1,Type=Integer,Description=""> ##INFO=<ID=QD,Number=1,Type=Float,Description=""> ##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts"> ##INFO=<ID=flg,Number=0,Type=Flag,Description=""> ##INFO=<ID=xx,Number=2,Type=Float,Description=""> ##FILTER=<ID=QD,Description=""> ##FILTER=<ID=dp,Description="Low depth"> #CHROM POS ID REF ALT QUAL FILTER INFO chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=... chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5 chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x... chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7... chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=... """ write_vcf(path, callset=self, rename=rename, number=number, description=description, fill=fill, write_header=write_header)
r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description : dict, optional Descriptions for the INFO and FILTER headers. fill : dict, optional Fill values used for missing data in the table. write_header : bool, optional If True write VCF header. Examples -------- Setup a variant table to write out:: >>> import allel >>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3'] >>> pos = [2, 6, 3, 8, 1] >>> ids = ['a', 'b', 'c', 'd', 'e'] >>> ref = [b'A', b'C', b'T', b'G', b'N'] >>> alt = [(b'T', b'.'), ... (b'G', b'.'), ... (b'A', b'C'), ... (b'C', b'A'), ... (b'X', b'.')] >>> qual = [1.2, 2.3, 3.4, 4.5, 5.6] >>> filter_qd = [True, True, True, False, False] >>> filter_dp = [True, False, True, False, False] >>> dp = [12, 23, 34, 45, 56] >>> qd = [12.3, 23.4, 34.5, 45.6, 56.7] >>> flg = [True, False, True, False, True] >>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)] >>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9), ... (9.0, 9.9)] >>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp, ... filter_qd, dp, qd, flg, ac, xx] >>> records = list(zip(*columns)) >>> dtype = [('CHROM', 'S4'), ... ('POS', 'u4'), ... ('ID', 'S1'), ... ('REF', 'S1'), ... ('ALT', ('S1', 2)), ... ('qual', 'f4'), ... ('filter_dp', bool), ... ('filter_qd', bool), ... ('dp', int), ... ('qd', float), ... ('flg', bool), ... ('ac', (int, 2)), ... ('xx', (float, 2))] >>> vt = allel.VariantTable(records, dtype=dtype) Now write out to VCF and inspect the result:: >>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'} >>> fill = {'ALT': b'.', 'ac': -1} >>> number = {'ac': 'A'} >>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'} >>> vt.to_vcf('example.vcf', rename=rename, fill=fill, ... number=number, description=description) >>> print(open('example.vcf').read()) ##fileformat=VCFv4.1 ##fileDate=... ##source=... ##INFO=<ID=DP,Number=1,Type=Integer,Description=""> ##INFO=<ID=QD,Number=1,Type=Float,Description=""> ##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts"> ##INFO=<ID=flg,Number=0,Type=Flag,Description=""> ##INFO=<ID=xx,Number=2,Type=Float,Description=""> ##FILTER=<ID=QD,Description=""> ##FILTER=<ID=dp,Description="Low depth"> #CHROM POS ID REF ALT QUAL FILTER INFO chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=... chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5 chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x... chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7... chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=...
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4618-L4708
cggh/scikit-allel
allel/model/ndarray.py
FeatureTable.to_mask
def to_mask(self, size, start_name='start', stop_name='end'): """Construct a mask array where elements are True if the fall within features in the table. Parameters ---------- size : int Size of chromosome/contig. start_name : string, optional Name of column with start coordinates. stop_name : string, optional Name of column with stop coordinates. Returns ------- mask : ndarray, bool """ m = np.zeros(size, dtype=bool) for start, stop in self[[start_name, stop_name]]: m[start-1:stop] = True return m
python
def to_mask(self, size, start_name='start', stop_name='end'): """Construct a mask array where elements are True if the fall within features in the table. Parameters ---------- size : int Size of chromosome/contig. start_name : string, optional Name of column with start coordinates. stop_name : string, optional Name of column with stop coordinates. Returns ------- mask : ndarray, bool """ m = np.zeros(size, dtype=bool) for start, stop in self[[start_name, stop_name]]: m[start-1:stop] = True return m
Construct a mask array where elements are True if the fall within features in the table. Parameters ---------- size : int Size of chromosome/contig. start_name : string, optional Name of column with start coordinates. stop_name : string, optional Name of column with stop coordinates. Returns ------- mask : ndarray, bool
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4734-L4757
cggh/scikit-allel
allel/model/ndarray.py
FeatureTable.from_gff3
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None): """Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable """ a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
python
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None): """Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable """ a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4760-L4795
cggh/scikit-allel
allel/stats/distance.py
pairwise_distance
def pairwise_distance(x, metric, chunked=False, blen=None): """Compute pairwise distance between individuals (e.g., samples or haplotypes). Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. chunked : bool, optional If True, use a block-wise implementation to avoid loading the entire input array into memory. This means that a distance matrix will be calculated for each block of the input array, and the results will be summed to produce the final output. For some distance metrics this will return a different result from the standard implementation. blen : int, optional Block length to use for chunked implementation. Returns ------- dist : ndarray, shape (m * (m - 1) / 2,) Distance matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1], [1, 1]], ... [[0, 1], [1, 1], [1, 2]], ... [[0, 2], [2, 2], [-1, -1]]]) >>> d = allel.pairwise_distance(g.to_n_alt(), metric='cityblock') >>> d array([3., 4., 3.]) >>> import scipy.spatial >>> scipy.spatial.distance.squareform(d) array([[0., 3., 4.], [3., 0., 3.], [4., 3., 0.]]) """ import scipy.spatial # check inputs if not hasattr(x, 'ndim'): x = np.asarray(x) if x.ndim < 2: raise ValueError('array with at least 2 dimensions expected') if x.ndim == 2: # use scipy to calculate distance, it's most efficient def f(b): # transpose as pdist expects (m, n) for m observations in an # n-dimensional space t = b.T # compute the distance matrix return scipy.spatial.distance.pdist(t, metric=metric) else: # use our own implementation, it handles multidimensional observations def f(b): return pdist(b, metric=metric) if chunked: # use block-wise implementation blen = get_blen_array(x, blen) dist = None for i in range(0, x.shape[0], blen): j = min(x.shape[0], i+blen) block = x[i:j] if dist is None: dist = f(block) else: dist += f(block) else: # standard implementation dist = f(x) return dist
python
def pairwise_distance(x, metric, chunked=False, blen=None): """Compute pairwise distance between individuals (e.g., samples or haplotypes). Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. chunked : bool, optional If True, use a block-wise implementation to avoid loading the entire input array into memory. This means that a distance matrix will be calculated for each block of the input array, and the results will be summed to produce the final output. For some distance metrics this will return a different result from the standard implementation. blen : int, optional Block length to use for chunked implementation. Returns ------- dist : ndarray, shape (m * (m - 1) / 2,) Distance matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1], [1, 1]], ... [[0, 1], [1, 1], [1, 2]], ... [[0, 2], [2, 2], [-1, -1]]]) >>> d = allel.pairwise_distance(g.to_n_alt(), metric='cityblock') >>> d array([3., 4., 3.]) >>> import scipy.spatial >>> scipy.spatial.distance.squareform(d) array([[0., 3., 4.], [3., 0., 3.], [4., 3., 0.]]) """ import scipy.spatial # check inputs if not hasattr(x, 'ndim'): x = np.asarray(x) if x.ndim < 2: raise ValueError('array with at least 2 dimensions expected') if x.ndim == 2: # use scipy to calculate distance, it's most efficient def f(b): # transpose as pdist expects (m, n) for m observations in an # n-dimensional space t = b.T # compute the distance matrix return scipy.spatial.distance.pdist(t, metric=metric) else: # use our own implementation, it handles multidimensional observations def f(b): return pdist(b, metric=metric) if chunked: # use block-wise implementation blen = get_blen_array(x, blen) dist = None for i in range(0, x.shape[0], blen): j = min(x.shape[0], i+blen) block = x[i:j] if dist is None: dist = f(block) else: dist += f(block) else: # standard implementation dist = f(x) return dist
Compute pairwise distance between individuals (e.g., samples or haplotypes). Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. chunked : bool, optional If True, use a block-wise implementation to avoid loading the entire input array into memory. This means that a distance matrix will be calculated for each block of the input array, and the results will be summed to produce the final output. For some distance metrics this will return a different result from the standard implementation. blen : int, optional Block length to use for chunked implementation. Returns ------- dist : ndarray, shape (m * (m - 1) / 2,) Distance matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1], [1, 1]], ... [[0, 1], [1, 1], [1, 2]], ... [[0, 2], [2, 2], [-1, -1]]]) >>> d = allel.pairwise_distance(g.to_n_alt(), metric='cityblock') >>> d array([3., 4., 3.]) >>> import scipy.spatial >>> scipy.spatial.distance.squareform(d) array([[0., 3., 4.], [3., 0., 3.], [4., 3., 0.]])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L17-L106
cggh/scikit-allel
allel/stats/distance.py
pdist
def pdist(x, metric): """Alternative implementation of :func:`scipy.spatial.distance.pdist` which is slower but more flexible in that arrays with >2 dimensions can be passed, allowing for multidimensional observations, e.g., diploid genotype calls or allele counts. Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. Returns ------- dist : ndarray Distance matrix in condensed form. """ if isinstance(metric, str): import scipy.spatial if hasattr(scipy.spatial.distance, metric): metric = getattr(scipy.spatial.distance, metric) else: raise ValueError('metric name not found') m = x.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): a = x[:, i, ...] b = x[:, j, ...] d = metric(a, b) dist.append(d) return np.array(dist)
python
def pdist(x, metric): """Alternative implementation of :func:`scipy.spatial.distance.pdist` which is slower but more flexible in that arrays with >2 dimensions can be passed, allowing for multidimensional observations, e.g., diploid genotype calls or allele counts. Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. Returns ------- dist : ndarray Distance matrix in condensed form. """ if isinstance(metric, str): import scipy.spatial if hasattr(scipy.spatial.distance, metric): metric = getattr(scipy.spatial.distance, metric) else: raise ValueError('metric name not found') m = x.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): a = x[:, i, ...] b = x[:, j, ...] d = metric(a, b) dist.append(d) return np.array(dist)
Alternative implementation of :func:`scipy.spatial.distance.pdist` which is slower but more flexible in that arrays with >2 dimensions can be passed, allowing for multidimensional observations, e.g., diploid genotype calls or allele counts. Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. Returns ------- dist : ndarray Distance matrix in condensed form.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L109-L148
cggh/scikit-allel
allel/stats/distance.py
pairwise_dxy
def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None): """Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts """ if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) gac = asarray_ndim(gac, 3) # compute this once here, to avoid repeated evaluation within the loop gan = np.sum(gac, axis=2) m = gac.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): ac1 = gac[:, i, ...] an1 = gan[:, i] ac2 = gac[:, j, ...] an2 = gan[:, j] d = sequence_divergence(pos, ac1, ac2, an1=an1, an2=an2, start=start, stop=stop, is_accessible=is_accessible) dist.append(d) return np.array(dist)
python
def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None): """Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts """ if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) gac = asarray_ndim(gac, 3) # compute this once here, to avoid repeated evaluation within the loop gan = np.sum(gac, axis=2) m = gac.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): ac1 = gac[:, i, ...] an1 = gan[:, i] ac2 = gac[:, j, ...] an2 = gan[:, j] d = sequence_divergence(pos, ac1, ac2, an1=an1, an2=an2, start=start, stop=stop, is_accessible=is_accessible) dist.append(d) return np.array(dist)
Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L151-L196
cggh/scikit-allel
allel/stats/distance.py
pcoa
def pcoa(dist): """Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for the samples. explained_ratio : ndarray, shape (n_dimensions) Variance explained by each dimension. """ import scipy.linalg # This implementation is based on the skbio.math.stats.ordination.PCoA # implementation, with some minor adjustments. # check inputs dist = ensure_square(dist) # perform scaling e_matrix = (dist ** 2) / -2 row_means = np.mean(e_matrix, axis=1, keepdims=True) col_means = np.mean(e_matrix, axis=0, keepdims=True) matrix_mean = np.mean(e_matrix) f_matrix = e_matrix - row_means - col_means + matrix_mean eigvals, eigvecs = scipy.linalg.eigh(f_matrix) # deal with eigvals close to zero close_to_zero = np.isclose(eigvals, 0) eigvals[close_to_zero] = 0 # sort descending idxs = eigvals.argsort()[::-1] eigvals = eigvals[idxs] eigvecs = eigvecs[:, idxs] # keep only positive eigenvalues keep = eigvals >= 0 eigvecs = eigvecs[:, keep] eigvals = eigvals[keep] # compute coordinates coords = eigvecs * np.sqrt(eigvals) # compute ratio explained explained_ratio = eigvals / eigvals.sum() return coords, explained_ratio
python
def pcoa(dist): """Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for the samples. explained_ratio : ndarray, shape (n_dimensions) Variance explained by each dimension. """ import scipy.linalg # This implementation is based on the skbio.math.stats.ordination.PCoA # implementation, with some minor adjustments. # check inputs dist = ensure_square(dist) # perform scaling e_matrix = (dist ** 2) / -2 row_means = np.mean(e_matrix, axis=1, keepdims=True) col_means = np.mean(e_matrix, axis=0, keepdims=True) matrix_mean = np.mean(e_matrix) f_matrix = e_matrix - row_means - col_means + matrix_mean eigvals, eigvecs = scipy.linalg.eigh(f_matrix) # deal with eigvals close to zero close_to_zero = np.isclose(eigvals, 0) eigvals[close_to_zero] = 0 # sort descending idxs = eigvals.argsort()[::-1] eigvals = eigvals[idxs] eigvecs = eigvecs[:, idxs] # keep only positive eigenvalues keep = eigvals >= 0 eigvecs = eigvecs[:, keep] eigvals = eigvals[keep] # compute coordinates coords = eigvecs * np.sqrt(eigvals) # compute ratio explained explained_ratio = eigvals / eigvals.sum() return coords, explained_ratio
Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for the samples. explained_ratio : ndarray, shape (n_dimensions) Variance explained by each dimension.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L199-L252
cggh/scikit-allel
allel/stats/distance.py
condensed_coords
def condensed_coords(i, j, n): """Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int """ # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix)
python
def condensed_coords(i, j, n): """Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int """ # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix)
Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L255-L288
cggh/scikit-allel
allel/stats/distance.py
condensed_coords_within
def condensed_coords_within(pop, n): """Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.combinations(sorted(pop), 2)]
python
def condensed_coords_within(pop, n): """Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.combinations(sorted(pop), 2)]
Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L291-L309
cggh/scikit-allel
allel/stats/distance.py
condensed_coords_between
def condensed_coords_between(pop1, pop2, n): """Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.product(sorted(pop1), sorted(pop2))]
python
def condensed_coords_between(pop1, pop2, n): """Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.product(sorted(pop1), sorted(pop2))]
Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L312-L332
cggh/scikit-allel
allel/stats/distance.py
plot_pairwise_distance
def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): """Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
python
def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): """Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/distance.py#L335-L396
cggh/scikit-allel
allel/stats/misc.py
jackknife
def jackknife(values, statistic): """Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float Mean of jackknife values. se : float Estimate of standard error. vj : ndarray Statistic values computed for each jackknife iteration. """ if isinstance(values, tuple): # multiple input arrays n = len(values[0]) masked_values = [np.ma.asarray(v) for v in values] for m in masked_values: assert m.ndim == 1, 'only 1D arrays supported' assert m.shape[0] == n, 'input arrays not of equal length' m.mask = np.zeros(m.shape, dtype=bool) else: n = len(values) masked_values = np.ma.asarray(values) assert masked_values.ndim == 1, 'only 1D arrays supported' masked_values.mask = np.zeros(masked_values.shape, dtype=bool) # values of the statistic calculated in each jackknife iteration vj = list() for i in range(n): if isinstance(values, tuple): # multiple input arrays for m in masked_values: m.mask[i] = True x = statistic(*masked_values) for m in masked_values: m.mask[i] = False else: masked_values.mask[i] = True x = statistic(masked_values) masked_values.mask[i] = False vj.append(x) # convert to array for convenience vj = np.array(vj) # compute mean of jackknife values m = vj.mean() # compute standard error sv = ((n - 1) / n) * np.sum((vj - m) ** 2) se = np.sqrt(sv) return m, se, vj
python
def jackknife(values, statistic): """Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float Mean of jackknife values. se : float Estimate of standard error. vj : ndarray Statistic values computed for each jackknife iteration. """ if isinstance(values, tuple): # multiple input arrays n = len(values[0]) masked_values = [np.ma.asarray(v) for v in values] for m in masked_values: assert m.ndim == 1, 'only 1D arrays supported' assert m.shape[0] == n, 'input arrays not of equal length' m.mask = np.zeros(m.shape, dtype=bool) else: n = len(values) masked_values = np.ma.asarray(values) assert masked_values.ndim == 1, 'only 1D arrays supported' masked_values.mask = np.zeros(masked_values.shape, dtype=bool) # values of the statistic calculated in each jackknife iteration vj = list() for i in range(n): if isinstance(values, tuple): # multiple input arrays for m in masked_values: m.mask[i] = True x = statistic(*masked_values) for m in masked_values: m.mask[i] = False else: masked_values.mask[i] = True x = statistic(masked_values) masked_values.mask[i] = False vj.append(x) # convert to array for convenience vj = np.array(vj) # compute mean of jackknife values m = vj.mean() # compute standard error sv = ((n - 1) / n) * np.sum((vj - m) ** 2) se = np.sqrt(sv) return m, se, vj
Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float Mean of jackknife values. se : float Estimate of standard error. vj : ndarray Statistic values computed for each jackknife iteration.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/misc.py#L15-L82
cggh/scikit-allel
allel/stats/misc.py
plot_variant_locator
def plot_variant_locator(pos, step=None, ax=None, start=None, stop=None, flip=False, line_kwargs=None): """ Plot lines indicating the physical genome location of variants from a single chromosome/contig. By default the top x axis is in variant index space, and the bottom x axis is in genome position space. Parameters ---------- pos : array_like A sorted 1-dimensional array of genomic positions from a single chromosome/contig. step : int, optional Plot a line for every `step` variants. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. start : int, optional The start position for the region to draw. stop : int, optional The stop position for the region to draw. flip : bool, optional Flip the plot upside down. line_kwargs : dict-like Additional keyword arguments passed through to `plt.Line2D`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs pos = SortedIndex(pos, copy=False) # set up axes if ax is None: x = plt.rcParams['figure.figsize'][0] y = x / 7 fig, ax = plt.subplots(figsize=(x, y)) fig.tight_layout() # determine x axis limits if start is None: start = np.min(pos) if stop is None: stop = np.max(pos) loc = pos.locate_range(start, stop) pos = pos[loc] if step is None: step = len(pos) // 100 ax.set_xlim(start, stop) # plot the lines if line_kwargs is None: line_kwargs = dict() # line_kwargs.setdefault('linewidth', .5) n_variants = len(pos) for i, p in enumerate(pos[::step]): xfrom = p xto = ( start + ((i * step / n_variants) * (stop-start)) ) line = plt.Line2D([xfrom, xto], [0, 1], **line_kwargs) ax.add_line(line) # invert? if flip: ax.invert_yaxis() ax.xaxis.tick_top() else: ax.xaxis.tick_bottom() # tidy up ax.set_yticks([]) ax.xaxis.set_tick_params(direction='out') for spine in 'left', 'right': ax.spines[spine].set_visible(False) return ax
python
def plot_variant_locator(pos, step=None, ax=None, start=None, stop=None, flip=False, line_kwargs=None): """ Plot lines indicating the physical genome location of variants from a single chromosome/contig. By default the top x axis is in variant index space, and the bottom x axis is in genome position space. Parameters ---------- pos : array_like A sorted 1-dimensional array of genomic positions from a single chromosome/contig. step : int, optional Plot a line for every `step` variants. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. start : int, optional The start position for the region to draw. stop : int, optional The stop position for the region to draw. flip : bool, optional Flip the plot upside down. line_kwargs : dict-like Additional keyword arguments passed through to `plt.Line2D`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs pos = SortedIndex(pos, copy=False) # set up axes if ax is None: x = plt.rcParams['figure.figsize'][0] y = x / 7 fig, ax = plt.subplots(figsize=(x, y)) fig.tight_layout() # determine x axis limits if start is None: start = np.min(pos) if stop is None: stop = np.max(pos) loc = pos.locate_range(start, stop) pos = pos[loc] if step is None: step = len(pos) // 100 ax.set_xlim(start, stop) # plot the lines if line_kwargs is None: line_kwargs = dict() # line_kwargs.setdefault('linewidth', .5) n_variants = len(pos) for i, p in enumerate(pos[::step]): xfrom = p xto = ( start + ((i * step / n_variants) * (stop-start)) ) line = plt.Line2D([xfrom, xto], [0, 1], **line_kwargs) ax.add_line(line) # invert? if flip: ax.invert_yaxis() ax.xaxis.tick_top() else: ax.xaxis.tick_bottom() # tidy up ax.set_yticks([]) ax.xaxis.set_tick_params(direction='out') for spine in 'left', 'right': ax.spines[spine].set_visible(False) return ax
Plot lines indicating the physical genome location of variants from a single chromosome/contig. By default the top x axis is in variant index space, and the bottom x axis is in genome position space. Parameters ---------- pos : array_like A sorted 1-dimensional array of genomic positions from a single chromosome/contig. step : int, optional Plot a line for every `step` variants. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. start : int, optional The start position for the region to draw. stop : int, optional The stop position for the region to draw. flip : bool, optional Flip the plot upside down. line_kwargs : dict-like Additional keyword arguments passed through to `plt.Line2D`. Returns ------- ax : axes The axes on which the plot was drawn
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/misc.py#L85-L171
cggh/scikit-allel
allel/stats/misc.py
tabulate_state_transitions
def tabulate_state_transitions(x, states, pos=None): """Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
python
def tabulate_state_transitions(x, states, pos=None): """Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/misc.py#L174-L248
cggh/scikit-allel
allel/stats/misc.py
tabulate_state_blocks
def tabulate_state_blocks(x, states, pos=None): """Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns] """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = observations[1:] s1 = switch_points[:-1] s2 = switch_points[1:] is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0) size_min = s2[:, 0] - s1[:, 1] + 1 size_max = s2[:, 1] - s1[:, 0] - 1 size_max[is_marginal] = -1 # start to build a dataframe items = [ ('state', t), ('support', o), ('start_lidx', s1[:, 0]), ('start_ridx', s1[:, 1]), ('stop_lidx', s2[:, 0]), ('stop_ridx', s2[:, 1]), ('size_min', size_min), ('size_max', size_max), ('is_marginal', is_marginal) ] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # obtain switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # setup helpers p1 = switch_positions[:-1] p2 = switch_positions[1:] length_min = p2[:, 0] - p1[:, 1] + 1 length_max = p2[:, 1] - p1[:, 0] - 1 length_max[is_marginal] = -1 items += [ ('start_lpos', p1[:, 0]), ('start_rpos', p1[:, 1]), ('stop_lpos', p2[:, 0]), ('stop_rpos', p2[:, 1]), ('length_min', length_min), ('length_max', length_max), ] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
python
def tabulate_state_blocks(x, states, pos=None): """Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns] """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = observations[1:] s1 = switch_points[:-1] s2 = switch_points[1:] is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0) size_min = s2[:, 0] - s1[:, 1] + 1 size_max = s2[:, 1] - s1[:, 0] - 1 size_max[is_marginal] = -1 # start to build a dataframe items = [ ('state', t), ('support', o), ('start_lidx', s1[:, 0]), ('start_ridx', s1[:, 1]), ('stop_lidx', s2[:, 0]), ('stop_ridx', s2[:, 1]), ('size_min', size_min), ('size_max', size_max), ('is_marginal', is_marginal) ] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # obtain switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # setup helpers p1 = switch_positions[:-1] p2 = switch_positions[1:] length_min = p2[:, 0] - p1[:, 1] + 1 length_max = p2[:, 1] - p1[:, 0] - 1 length_max[is_marginal] = -1 items += [ ('start_lpos', p1[:, 0]), ('start_rpos', p1[:, 1]), ('stop_lpos', p2[:, 0]), ('stop_rpos', p2[:, 1]), ('length_min', length_min), ('length_max', length_max), ] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns]
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/misc.py#L251-L349
cggh/scikit-allel
allel/io/vcf_write.py
write_vcf
def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True): """Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.""" names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, number=number, description=description) write_vcf_data(vcf_file, names, callset=callset, rename=rename, fill=fill)
python
def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True): """Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.""" names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, number=number, description=description) write_vcf_data(vcf_file, names, callset=callset, rename=rename, fill=fill)
Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_write.py#L50-L61
cggh/scikit-allel
allel/util.py
asarray_ndim
def asarray_ndim(a, *ndims, **kwargs): """Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray """ allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) else: # noinspection PyUnresolvedReferences expect_str = '%s' % ndims[0] raise TypeError('bad number of dimensions: expected %s; found %s' % (expect_str, a.ndim)) return a
python
def asarray_ndim(a, *ndims, **kwargs): """Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray """ allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) else: # noinspection PyUnresolvedReferences expect_str = '%s' % ndims[0] raise TypeError('bad number of dimensions: expected %s; found %s' % (expect_str, a.ndim)) return a
Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/util.py#L32-L61
cggh/scikit-allel
allel/util.py
hdf5_cache
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs): """HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9) """ # initialise HDF5 file path if filepath is None: import tempfile filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5') atexit.register(os.remove, filepath) # initialise defaults for dataset creation h5dcreate_kwargs.setdefault('chunks', True) def decorator(user_function): # setup the name for the cache container group if group is None: container = user_function.__name__ else: container = group def wrapper(*args, **kwargs): # load from cache or not no_cache = kwargs.pop('no_cache', False) # compute a key from the function arguments key = _make_key(args, kwargs, typed) if hashed_key: key = str(hash(key)) else: key = str(key).replace('/', '__slash__') return _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs) wrapper.cache_filepath = filepath return update_wrapper(wrapper, user_function) return decorator
python
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs): """HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9) """ # initialise HDF5 file path if filepath is None: import tempfile filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5') atexit.register(os.remove, filepath) # initialise defaults for dataset creation h5dcreate_kwargs.setdefault('chunks', True) def decorator(user_function): # setup the name for the cache container group if group is None: container = user_function.__name__ else: container = group def wrapper(*args, **kwargs): # load from cache or not no_cache = kwargs.pop('no_cache', False) # compute a key from the function arguments key = _make_key(args, kwargs, typed) if hashed_key: key = str(hash(key)) else: key = str(key).replace('/', '__slash__') return _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs) wrapper.cache_filepath = filepath return update_wrapper(wrapper, user_function) return decorator
HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/util.py#L283-L401
cggh/scikit-allel
allel/stats/decomposition.py
pca
def pca(gn, n_components=10, copy=True, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypePCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. See Also -------- randomized_pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypePCA(n_components, copy=copy, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
python
def pca(gn, n_components=10, copy=True, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypePCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. See Also -------- randomized_pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypePCA(n_components, copy=copy, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypePCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. See Also -------- randomized_pca, allel.stats.ld.locate_unlinked
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/decomposition.py#L11-L60
cggh/scikit-allel
allel/stats/decomposition.py
randomized_pca
def randomized_pca(gn, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. iterated_power : int, optional Number of iterations for the power method. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypeRandomizedPCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. Based on the :class:`sklearn.decomposition.RandomizedPCA` implementation. See Also -------- pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypeRandomizedPCA(n_components, copy=copy, iterated_power=iterated_power, random_state=random_state, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
python
def randomized_pca(gn, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. iterated_power : int, optional Number of iterations for the power method. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypeRandomizedPCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. Based on the :class:`sklearn.decomposition.RandomizedPCA` implementation. See Also -------- pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypeRandomizedPCA(n_components, copy=copy, iterated_power=iterated_power, random_state=random_state, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. iterated_power : int, optional Number of iterations for the power method. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypeRandomizedPCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. Based on the :class:`sklearn.decomposition.RandomizedPCA` implementation. See Also -------- pca, allel.stats.ld.locate_unlinked
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/decomposition.py#L126-L187
cggh/scikit-allel
allel/stats/admixture.py
h_hat
def h_hat(ac): """Unbiased estimator for h, where 2*h is the heterozygosity of the population. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array for a single population. Returns ------- h_hat : ndarray, float, shape (n_variants,) Notes ----- Used in Patterson (2012) for calculation of various statistics. """ # check inputs ac = asarray_ndim(ac, 2) assert ac.shape[1] == 2, 'only biallelic variants supported' # compute allele number an = ac.sum(axis=1) # compute estimator x = (ac[:, 0] * ac[:, 1]) / (an * (an - 1)) return x
python
def h_hat(ac): """Unbiased estimator for h, where 2*h is the heterozygosity of the population. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array for a single population. Returns ------- h_hat : ndarray, float, shape (n_variants,) Notes ----- Used in Patterson (2012) for calculation of various statistics. """ # check inputs ac = asarray_ndim(ac, 2) assert ac.shape[1] == 2, 'only biallelic variants supported' # compute allele number an = ac.sum(axis=1) # compute estimator x = (ac[:, 0] * ac[:, 1]) / (an * (an - 1)) return x
Unbiased estimator for h, where 2*h is the heterozygosity of the population. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array for a single population. Returns ------- h_hat : ndarray, float, shape (n_variants,) Notes ----- Used in Patterson (2012) for calculation of various statistics.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L14-L43
cggh/scikit-allel
allel/stats/admixture.py
patterson_f2
def patterson_f2(aca, acb): """Unbiased estimator for F2(A, B), the branch length between populations A and B. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- f2 : ndarray, float, shape (n_variants,) Notes ----- See Patterson (2012), Appendix A. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb) # compute allele numbers sa = aca.sum(axis=1) sb = acb.sum(axis=1) # compute heterozygosities ha = h_hat(aca) hb = h_hat(acb) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] # compute estimator x = ((a - b) ** 2) - (ha / sa) - (hb / sb) return x
python
def patterson_f2(aca, acb): """Unbiased estimator for F2(A, B), the branch length between populations A and B. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- f2 : ndarray, float, shape (n_variants,) Notes ----- See Patterson (2012), Appendix A. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb) # compute allele numbers sa = aca.sum(axis=1) sb = acb.sum(axis=1) # compute heterozygosities ha = h_hat(aca) hb = h_hat(acb) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] # compute estimator x = ((a - b) ** 2) - (ha / sa) - (hb / sb) return x
Unbiased estimator for F2(A, B), the branch length between populations A and B. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- f2 : ndarray, float, shape (n_variants,) Notes ----- See Patterson (2012), Appendix A.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L46-L89
cggh/scikit-allel
allel/stats/admixture.py
patterson_f3
def patterson_f3(acc, aca, acb): """Unbiased estimator for F3(C; A, B), the three-population test for admixture in population C. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). Returns ------- T : ndarray, float, shape (n_variants,) Un-normalized f3 estimates per variant. B : ndarray, float, shape (n_variants,) Estimates for heterozygosity in population C. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f3 statistics, ignore the `B` return value. To compute the f3* statistic, which is normalized by heterozygosity in population C to remove numerical dependence on the allele frequency spectrum, compute ``np.sum(T) / np.sum(B)``. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc) # compute allele number and heterozygosity in test population sc = acc.sum(axis=1) hc = h_hat(acc) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] # compute estimator T = ((c - a) * (c - b)) - (hc / sc) B = 2 * hc return T, B
python
def patterson_f3(acc, aca, acb): """Unbiased estimator for F3(C; A, B), the three-population test for admixture in population C. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). Returns ------- T : ndarray, float, shape (n_variants,) Un-normalized f3 estimates per variant. B : ndarray, float, shape (n_variants,) Estimates for heterozygosity in population C. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f3 statistics, ignore the `B` return value. To compute the f3* statistic, which is normalized by heterozygosity in population C to remove numerical dependence on the allele frequency spectrum, compute ``np.sum(T) / np.sum(B)``. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc) # compute allele number and heterozygosity in test population sc = acc.sum(axis=1) hc = h_hat(acc) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] # compute estimator T = ((c - a) * (c - b)) - (hc / sc) B = 2 * hc return T, B
Unbiased estimator for F3(C; A, B), the three-population test for admixture in population C. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). Returns ------- T : ndarray, float, shape (n_variants,) Un-normalized f3 estimates per variant. B : ndarray, float, shape (n_variants,) Estimates for heterozygosity in population C. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f3 statistics, ignore the `B` return value. To compute the f3* statistic, which is normalized by heterozygosity in population C to remove numerical dependence on the allele frequency spectrum, compute ``np.sum(T) / np.sum(B)``.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L93-L147
cggh/scikit-allel
allel/stats/admixture.py
patterson_d
def patterson_d(aca, acb, acc, acd): """Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' acd = AlleleCountsArray(acd, copy=False) assert acd.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc, acd) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] d = acd.to_frequencies()[:, 1] # compute estimator num = (a - b) * (c - d) den = (a + b - (2 * a * b)) * (c + d - (2 * c * d)) return num, den
python
def patterson_d(aca, acb, acc, acd): """Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value. """ # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' acd = AlleleCountsArray(acd, copy=False) assert acd.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc, acd) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] d = acd.to_frequencies()[:, 1] # compute estimator num = (a - b) * (c - d) den = (a + b - (2 * a * b)) * (c + d - (2 * c * d)) return num, den
Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L150-L202
cggh/scikit-allel
allel/stats/admixture.py
moving_patterson_f3
def moving_patterson_f3(acc, aca, acb, size, start=0, stop=None, step=None, normed=True): """Estimate F3(C; A, B) in moving windows. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=size, start=start, stop=stop, step=step) B_bsum = moving_statistic(B, statistic=np.nansum, size=size, start=start, stop=stop, step=step) f3 = T_bsum / B_bsum else: f3 = moving_statistic(T, statistic=np.nanmean, size=size, start=start, stop=stop, step=step) return f3
python
def moving_patterson_f3(acc, aca, acb, size, start=0, stop=None, step=None, normed=True): """Estimate F3(C; A, B) in moving windows. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=size, start=start, stop=stop, step=step) B_bsum = moving_statistic(B, statistic=np.nansum, size=size, start=start, stop=stop, step=step) f3 = T_bsum / B_bsum else: f3 = moving_statistic(T, statistic=np.nanmean, size=size, start=start, stop=stop, step=step) return f3
Estimate F3(C; A, B) in moving windows. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L206-L252
cggh/scikit-allel
allel/stats/admixture.py
moving_patterson_d
def moving_patterson_d(aca, acb, acc, acd, size, start=0, stop=None, step=None): """Estimate D(A, B; C, D) in moving windows. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- d : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # compute the numerator and denominator within each window num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate the statistic values in each block d = num_sum / den_sum return d
python
def moving_patterson_d(aca, acb, acc, acd, size, start=0, stop=None, step=None): """Estimate D(A, B; C, D) in moving windows. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- d : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window. """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # compute the numerator and denominator within each window num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate the statistic values in each block d = num_sum / den_sum return d
Estimate D(A, B; C, D) in moving windows. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- d : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L255-L302
cggh/scikit-allel
allel/stats/admixture.py
average_patterson_f3
def average_patterson_f3(acc, aca, acb, blen, normed=True): """Estimate F3(C; A, B) and standard error using the block-jackknife. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). blen : int Block size (number of variants). normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_f3 """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall value of statistic if normed: f3 = np.nansum(T) / np.nansum(B) else: f3 = np.nanmean(T) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=blen) B_bsum = moving_statistic(B, statistic=np.nansum, size=blen) vb = T_bsum / B_bsum _, se, vj = jackknife((T_bsum, B_bsum), statistic=lambda t, b: np.sum(t) / np.sum(b)) else: vb = moving_statistic(T, statistic=np.nanmean, size=blen) _, se, vj = jackknife(vb, statistic=np.mean) # compute Z score z = f3 / se return f3, se, z, vb, vj
python
def average_patterson_f3(acc, aca, acb, blen, normed=True): """Estimate F3(C; A, B) and standard error using the block-jackknife. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). blen : int Block size (number of variants). normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_f3 """ # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall value of statistic if normed: f3 = np.nansum(T) / np.nansum(B) else: f3 = np.nanmean(T) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=blen) B_bsum = moving_statistic(B, statistic=np.nansum, size=blen) vb = T_bsum / B_bsum _, se, vj = jackknife((T_bsum, B_bsum), statistic=lambda t, b: np.sum(t) / np.sum(b)) else: vb = moving_statistic(T, statistic=np.nanmean, size=blen) _, se, vj = jackknife(vb, statistic=np.mean) # compute Z score z = f3 / se return f3, se, z, vb, vj
Estimate F3(C; A, B) and standard error using the block-jackknife. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). blen : int Block size (number of variants). normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_f3
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L306-L373
cggh/scikit-allel
allel/stats/admixture.py
average_patterson_d
def average_patterson_d(aca, acb, acc, acd, blen): """Estimate D(A, B; C, D) and standard error using the block-jackknife. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. blen : int Block size (number of variants). Returns ------- d : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_d """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall estimate d_avg = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) # compute Z score z = d_avg / se return d_avg, se, z, vb, vj
python
def average_patterson_d(aca, acb, acc, acd, blen): """Estimate D(A, B; C, D) and standard error using the block-jackknife. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. blen : int Block size (number of variants). Returns ------- d : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_d """ # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall estimate d_avg = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) # compute Z score z = d_avg / se return d_avg, se, z, vb, vj
Estimate D(A, B; C, D) and standard error using the block-jackknife. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. blen : int Block size (number of variants). Returns ------- d : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_d
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/admixture.py#L376-L439
cggh/scikit-allel
allel/model/dask.py
get_chunks
def get_chunks(data, chunks=None): """Try to guess a reasonable chunk shape to use for block-wise algorithms operating over `data`.""" if chunks is None: if hasattr(data, 'chunklen') and hasattr(data, 'shape'): # bcolz carray, chunk first dimension only return (data.chunklen,) + data.shape[1:] elif hasattr(data, 'chunks') and hasattr(data, 'shape') and \ len(data.chunks) == len(data.shape): # h5py dataset or zarr array return data.chunks else: # fall back to something simple, ~4Mb chunks of first dimension row = np.asarray(data[0]) chunklen = max(1, (2**22) // row.nbytes) if row.shape: chunks = (chunklen,) + row.shape else: chunks = (chunklen,) return chunks else: return chunks
python
def get_chunks(data, chunks=None): """Try to guess a reasonable chunk shape to use for block-wise algorithms operating over `data`.""" if chunks is None: if hasattr(data, 'chunklen') and hasattr(data, 'shape'): # bcolz carray, chunk first dimension only return (data.chunklen,) + data.shape[1:] elif hasattr(data, 'chunks') and hasattr(data, 'shape') and \ len(data.chunks) == len(data.shape): # h5py dataset or zarr array return data.chunks else: # fall back to something simple, ~4Mb chunks of first dimension row = np.asarray(data[0]) chunklen = max(1, (2**22) // row.nbytes) if row.shape: chunks = (chunklen,) + row.shape else: chunks = (chunklen,) return chunks else: return chunks
Try to guess a reasonable chunk shape to use for block-wise algorithms operating over `data`.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/dask.py#L47-L74
cggh/scikit-allel
allel/io/gff.py
iter_gff3
def iter_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix'): """Iterate over records in a GFF3 file. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string Tabix command. Returns ------- Iterator """ # prepare fill values for attributes if attributes is not None: attributes = list(attributes) if isinstance(attributes_fill, (list, tuple)): if len(attributes) != len(attributes_fill): raise ValueError('number of fills does not match attributes') else: attributes_fill = [attributes_fill] * len(attributes) # open input stream if region is not None: cmd = [tabix, path, region] buffer = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout elif path.endswith('.gz') or path.endswith('.bgz'): buffer = gzip.open(path, mode='rb') else: buffer = open(path, mode='rb') try: for line in buffer: if line[0] == b'>': # assume begin embedded FASTA return if line[0] == b'#': # skip comment lines continue vals = line.split(b'\t') if len(vals) == 9: # unpack for processing fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase, fattrs = vals # convert numerics fstart = int(fstart) fend = int(fend) if fscore == b'.': fscore = score_fill else: fscore = float(fscore) if fphase == b'.': fphase = phase_fill else: fphase = int(fphase) if not PY2: fseqid = str(fseqid, 'ascii') fsource = str(fsource, 'ascii') ftype = str(ftype, 'ascii') fstrand = str(fstrand, 'ascii') fattrs = str(fattrs, 'ascii') rec = (fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase) if attributes is not None: dattrs = gff3_parse_attributes(fattrs) vattrs = tuple( dattrs.get(k, f) for k, f in zip(attributes, attributes_fill) ) rec += vattrs yield rec finally: buffer.close()
python
def iter_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix'): """Iterate over records in a GFF3 file. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string Tabix command. Returns ------- Iterator """ # prepare fill values for attributes if attributes is not None: attributes = list(attributes) if isinstance(attributes_fill, (list, tuple)): if len(attributes) != len(attributes_fill): raise ValueError('number of fills does not match attributes') else: attributes_fill = [attributes_fill] * len(attributes) # open input stream if region is not None: cmd = [tabix, path, region] buffer = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout elif path.endswith('.gz') or path.endswith('.bgz'): buffer = gzip.open(path, mode='rb') else: buffer = open(path, mode='rb') try: for line in buffer: if line[0] == b'>': # assume begin embedded FASTA return if line[0] == b'#': # skip comment lines continue vals = line.split(b'\t') if len(vals) == 9: # unpack for processing fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase, fattrs = vals # convert numerics fstart = int(fstart) fend = int(fend) if fscore == b'.': fscore = score_fill else: fscore = float(fscore) if fphase == b'.': fphase = phase_fill else: fphase = int(fphase) if not PY2: fseqid = str(fseqid, 'ascii') fsource = str(fsource, 'ascii') ftype = str(ftype, 'ascii') fstrand = str(fstrand, 'ascii') fattrs = str(fattrs, 'ascii') rec = (fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase) if attributes is not None: dattrs = gff3_parse_attributes(fattrs) vattrs = tuple( dattrs.get(k, f) for k, f in zip(attributes, attributes_fill) ) rec += vattrs yield rec finally: buffer.close()
Iterate over records in a GFF3 file. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string Tabix command. Returns ------- Iterator
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/gff.py#L31-L118
cggh/scikit-allel
allel/io/gff.py
gff3_to_recarray
def gff3_to_recarray(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', dtype=None): """Load data from a GFF3 into a NumPy recarray. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. dtype : dtype, optional Override dtype. Returns ------- np.recarray """ # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) if not recs: return None # determine dtype if dtype is None: dtype = [('seqid', object), ('source', object), ('type', object), ('start', int), ('end', int), ('score', float), ('strand', object), ('phase', int)] if attributes: for n in attributes: dtype.append((n, object)) a = np.rec.fromrecords(recs, dtype=dtype) return a
python
def gff3_to_recarray(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', dtype=None): """Load data from a GFF3 into a NumPy recarray. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. dtype : dtype, optional Override dtype. Returns ------- np.recarray """ # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) if not recs: return None # determine dtype if dtype is None: dtype = [('seqid', object), ('source', object), ('type', object), ('start', int), ('end', int), ('score', float), ('strand', object), ('phase', int)] if attributes: for n in attributes: dtype.append((n, object)) a = np.rec.fromrecords(recs, dtype=dtype) return a
Load data from a GFF3 into a NumPy recarray. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. dtype : dtype, optional Override dtype. Returns ------- np.recarray
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/gff.py#L124-L178
cggh/scikit-allel
allel/io/gff.py
gff3_to_dataframe
def gff3_to_dataframe(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', **kwargs): """Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. Returns ------- pandas.DataFrame """ import pandas # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) # load into pandas columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase'] if attributes: columns += list(attributes) df = pandas.DataFrame.from_records(recs, columns=columns, **kwargs) return df
python
def gff3_to_dataframe(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', **kwargs): """Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. Returns ------- pandas.DataFrame """ import pandas # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) # load into pandas columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase'] if attributes: columns += list(attributes) df = pandas.DataFrame.from_records(recs, columns=columns, **kwargs) return df
Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. Returns ------- pandas.DataFrame
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/gff.py#L181-L223
cggh/scikit-allel
allel/stats/selection.py
ehh_decay
def ehh_decay(h, truncate=False): """Compute the decay of extended haplotype homozygosity (EHH) moving away from the first variant. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. truncate : bool, optional If True, the return array will exclude trailing zeros. Returns ------- ehh : ndarray, float, shape (n_variants, ) EHH at successive variants from the first variant. """ # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.min() < 0: raise NotImplementedError('missing calls are not supported') # initialise n_variants = h.n_variants # number of rows, i.e., variants n_haplotypes = h.n_haplotypes # number of columns, i.e., haplotypes n_pairs = (n_haplotypes * (n_haplotypes - 1)) // 2 # compute the shared prefix length between all pairs of haplotypes spl = pairwise_shared_prefix_lengths(memoryview_safe(np.asarray(h))) # compute EHH by counting the number of shared prefixes extending beyond # each variant minlength = None if truncate else n_variants + 1 b = np.bincount(spl, minlength=minlength) c = np.cumsum(b[::-1])[:-1] ehh = (c / n_pairs)[::-1] return ehh
python
def ehh_decay(h, truncate=False): """Compute the decay of extended haplotype homozygosity (EHH) moving away from the first variant. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. truncate : bool, optional If True, the return array will exclude trailing zeros. Returns ------- ehh : ndarray, float, shape (n_variants, ) EHH at successive variants from the first variant. """ # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.min() < 0: raise NotImplementedError('missing calls are not supported') # initialise n_variants = h.n_variants # number of rows, i.e., variants n_haplotypes = h.n_haplotypes # number of columns, i.e., haplotypes n_pairs = (n_haplotypes * (n_haplotypes - 1)) // 2 # compute the shared prefix length between all pairs of haplotypes spl = pairwise_shared_prefix_lengths(memoryview_safe(np.asarray(h))) # compute EHH by counting the number of shared prefixes extending beyond # each variant minlength = None if truncate else n_variants + 1 b = np.bincount(spl, minlength=minlength) c = np.cumsum(b[::-1])[:-1] ehh = (c / n_pairs)[::-1] return ehh
Compute the decay of extended haplotype homozygosity (EHH) moving away from the first variant. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. truncate : bool, optional If True, the return array will exclude trailing zeros. Returns ------- ehh : ndarray, float, shape (n_variants, ) EHH at successive variants from the first variant.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L20-L59
cggh/scikit-allel
allel/stats/selection.py
voight_painting
def voight_painting(h): """Paint haplotypes, assigning a unique integer to each shared haplotype prefix. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- painting : ndarray, int, shape (n_variants, n_haplotypes) Painting array. indices : ndarray, int, shape (n_hapotypes,) Haplotype indices after sorting by prefix. """ # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.max() > 1: raise NotImplementedError('only biallelic variants are supported') if h.min() < 0: raise NotImplementedError('missing calls are not supported') # sort by prefix indices = h.prefix_argsort() h = np.take(h, indices, axis=1) # paint painting = paint_shared_prefixes(memoryview_safe(np.asarray(h))) return painting, indices
python
def voight_painting(h): """Paint haplotypes, assigning a unique integer to each shared haplotype prefix. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- painting : ndarray, int, shape (n_variants, n_haplotypes) Painting array. indices : ndarray, int, shape (n_hapotypes,) Haplotype indices after sorting by prefix. """ # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.max() > 1: raise NotImplementedError('only biallelic variants are supported') if h.min() < 0: raise NotImplementedError('missing calls are not supported') # sort by prefix indices = h.prefix_argsort() h = np.take(h, indices, axis=1) # paint painting = paint_shared_prefixes(memoryview_safe(np.asarray(h))) return painting, indices
Paint haplotypes, assigning a unique integer to each shared haplotype prefix. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- painting : ndarray, int, shape (n_variants, n_haplotypes) Painting array. indices : ndarray, int, shape (n_hapotypes,) Haplotype indices after sorting by prefix.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L62-L95
cggh/scikit-allel
allel/stats/selection.py
plot_voight_painting
def plot_voight_painting(painting, palette='colorblind', flank='right', ax=None, height_factor=0.01): """Plot a painting of shared haplotype prefixes. Parameters ---------- painting : array_like, int, shape (n_variants, n_haplotypes) Painting array. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. palette : string, optional A Seaborn palette name. flank : {'right', 'left'}, optional If left, painting will be reversed along first axis. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. Returns ------- ax : axes """ import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt if flank == 'left': painting = painting[::-1] n_colors = painting.max() palette = sns.color_palette(palette, n_colors) # use white for singleton haplotypes cmap = ListedColormap(['white'] + palette) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] h = height_factor*painting.shape[1] fig, ax = plt.subplots(figsize=(w, h)) sns.despine(ax=ax, bottom=True, left=True) ax.pcolormesh(painting.T, cmap=cmap) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim(0, painting.shape[0]) ax.set_ylim(0, painting.shape[1]) return ax
python
def plot_voight_painting(painting, palette='colorblind', flank='right', ax=None, height_factor=0.01): """Plot a painting of shared haplotype prefixes. Parameters ---------- painting : array_like, int, shape (n_variants, n_haplotypes) Painting array. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. palette : string, optional A Seaborn palette name. flank : {'right', 'left'}, optional If left, painting will be reversed along first axis. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. Returns ------- ax : axes """ import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt if flank == 'left': painting = painting[::-1] n_colors = painting.max() palette = sns.color_palette(palette, n_colors) # use white for singleton haplotypes cmap = ListedColormap(['white'] + palette) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] h = height_factor*painting.shape[1] fig, ax = plt.subplots(figsize=(w, h)) sns.despine(ax=ax, bottom=True, left=True) ax.pcolormesh(painting.T, cmap=cmap) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim(0, painting.shape[0]) ax.set_ylim(0, painting.shape[1]) return ax
Plot a painting of shared haplotype prefixes. Parameters ---------- painting : array_like, int, shape (n_variants, n_haplotypes) Painting array. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. palette : string, optional A Seaborn palette name. flank : {'right', 'left'}, optional If left, painting will be reversed along first axis. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. Returns ------- ax : axes
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L98-L148
cggh/scikit-allel
allel/stats/selection.py
fig_voight_painting
def fig_voight_painting(h, index=None, palette='colorblind', height_factor=0.01, fig=None): """Make a figure of shared haplotype prefixes for both left and right flanks, centred on some variant of choice. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. index : int, optional Index of the variant within the haplotype array to centre on. If not provided, the middle variant will be used. palette : string, optional A Seaborn palette name. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. fig : figure The figure on which to draw. If not provided, a new figure will be created. Returns ------- fig : figure Notes ----- N.B., the ordering of haplotypes on the left and right flanks will be different. This means that haplotypes on the right flank **will not** correspond to haplotypes on the left flank at the same vertical position. """ import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import seaborn as sns # check inputs h = asarray_ndim(h, 2) if index is None: # use midpoint index = h.shape[0] // 2 # divide data into two flanks hl = h[:index+1][::-1] hr = h[index:] # paint both flanks pl, il = voight_painting(hl) pr, ir = voight_painting(hr) # compute ehh decay for both flanks el = ehh_decay(hl, truncate=False) er = ehh_decay(hr, truncate=False) # setup figure # fixed height for EHH decay subplot h_ehh = plt.rcParams['figure.figsize'][1] // 3 # add height for paintings h_painting = height_factor*h.shape[1] if fig is None: w = plt.rcParams['figure.figsize'][0] h = h_ehh + h_painting fig = plt.figure(figsize=(w, h)) # setup gridspec gs = GridSpec(2, 2, width_ratios=[hl.shape[0], hr.shape[0]], height_ratios=[h_painting, h_ehh]) # plot paintings ax = fig.add_subplot(gs[0, 0]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pl, palette=palette, flank='left', ax=ax) ax = fig.add_subplot(gs[0, 1]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pr, palette=palette, flank='right', ax=ax) # plot ehh ax = fig.add_subplot(gs[1, 0]) sns.despine(ax=ax, offset=3) x = np.arange(el.shape[0]) y = el ax.fill_between(x, 0, y) ax.set_ylim(0, 1) ax.set_yticks([0, 1]) ax.set_ylabel('EHH') ax.invert_xaxis() ax = fig.add_subplot(gs[1, 1]) sns.despine(ax=ax, left=True, right=False, offset=3) ax.yaxis.tick_right() ax.set_ylim(0, 1) ax.set_yticks([0, 1]) x = np.arange(er.shape[0]) y = er ax.fill_between(x, 0, y) # tidy up fig.tight_layout() return fig
python
def fig_voight_painting(h, index=None, palette='colorblind', height_factor=0.01, fig=None): """Make a figure of shared haplotype prefixes for both left and right flanks, centred on some variant of choice. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. index : int, optional Index of the variant within the haplotype array to centre on. If not provided, the middle variant will be used. palette : string, optional A Seaborn palette name. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. fig : figure The figure on which to draw. If not provided, a new figure will be created. Returns ------- fig : figure Notes ----- N.B., the ordering of haplotypes on the left and right flanks will be different. This means that haplotypes on the right flank **will not** correspond to haplotypes on the left flank at the same vertical position. """ import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import seaborn as sns # check inputs h = asarray_ndim(h, 2) if index is None: # use midpoint index = h.shape[0] // 2 # divide data into two flanks hl = h[:index+1][::-1] hr = h[index:] # paint both flanks pl, il = voight_painting(hl) pr, ir = voight_painting(hr) # compute ehh decay for both flanks el = ehh_decay(hl, truncate=False) er = ehh_decay(hr, truncate=False) # setup figure # fixed height for EHH decay subplot h_ehh = plt.rcParams['figure.figsize'][1] // 3 # add height for paintings h_painting = height_factor*h.shape[1] if fig is None: w = plt.rcParams['figure.figsize'][0] h = h_ehh + h_painting fig = plt.figure(figsize=(w, h)) # setup gridspec gs = GridSpec(2, 2, width_ratios=[hl.shape[0], hr.shape[0]], height_ratios=[h_painting, h_ehh]) # plot paintings ax = fig.add_subplot(gs[0, 0]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pl, palette=palette, flank='left', ax=ax) ax = fig.add_subplot(gs[0, 1]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pr, palette=palette, flank='right', ax=ax) # plot ehh ax = fig.add_subplot(gs[1, 0]) sns.despine(ax=ax, offset=3) x = np.arange(el.shape[0]) y = el ax.fill_between(x, 0, y) ax.set_ylim(0, 1) ax.set_yticks([0, 1]) ax.set_ylabel('EHH') ax.invert_xaxis() ax = fig.add_subplot(gs[1, 1]) sns.despine(ax=ax, left=True, right=False, offset=3) ax.yaxis.tick_right() ax.set_ylim(0, 1) ax.set_yticks([0, 1]) x = np.arange(er.shape[0]) y = er ax.fill_between(x, 0, y) # tidy up fig.tight_layout() return fig
Make a figure of shared haplotype prefixes for both left and right flanks, centred on some variant of choice. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. index : int, optional Index of the variant within the haplotype array to centre on. If not provided, the middle variant will be used. palette : string, optional A Seaborn palette name. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. fig : figure The figure on which to draw. If not provided, a new figure will be created. Returns ------- fig : figure Notes ----- N.B., the ordering of haplotypes on the left and right flanks will be different. This means that haplotypes on the right flank **will not** correspond to haplotypes on the left flank at the same vertical position.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L151-L251
cggh/scikit-allel
allel/stats/selection.py
compute_ihh_gaps
def compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible): """Compute spacing between variants for integrating haplotype homozygosity. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. Returns ------- gaps : ndarray, float, shape (n_variants - 1,) """ # check inputs if map_pos is None: # integrate over physical distance map_pos = pos else: map_pos = asarray_ndim(map_pos, 1) check_dim0_aligned(pos, map_pos) # compute physical gaps physical_gaps = np.diff(pos) # compute genetic gaps gaps = np.diff(map_pos).astype('f8') if is_accessible is not None: # compute accessible gaps is_accessible = asarray_ndim(is_accessible, 1) assert is_accessible.shape[0] > pos[-1], \ 'accessibility array too short' accessible_gaps = np.zeros_like(physical_gaps) for i in range(1, len(pos)): # N.B., expect pos is 1-based n_access = np.count_nonzero(is_accessible[pos[i-1]-1:pos[i]-1]) accessible_gaps[i-1] = n_access # adjust using accessibility scaling = accessible_gaps / physical_gaps gaps = gaps * scaling elif gap_scale is not None and gap_scale > 0: scaling = np.ones(gaps.shape, dtype='f8') loc_scale = physical_gaps > gap_scale scaling[loc_scale] = gap_scale / physical_gaps[loc_scale] gaps = gaps * scaling if max_gap is not None and max_gap > 0: # deal with very large gaps gaps[physical_gaps > max_gap] = -1 return gaps
python
def compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible): """Compute spacing between variants for integrating haplotype homozygosity. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. Returns ------- gaps : ndarray, float, shape (n_variants - 1,) """ # check inputs if map_pos is None: # integrate over physical distance map_pos = pos else: map_pos = asarray_ndim(map_pos, 1) check_dim0_aligned(pos, map_pos) # compute physical gaps physical_gaps = np.diff(pos) # compute genetic gaps gaps = np.diff(map_pos).astype('f8') if is_accessible is not None: # compute accessible gaps is_accessible = asarray_ndim(is_accessible, 1) assert is_accessible.shape[0] > pos[-1], \ 'accessibility array too short' accessible_gaps = np.zeros_like(physical_gaps) for i in range(1, len(pos)): # N.B., expect pos is 1-based n_access = np.count_nonzero(is_accessible[pos[i-1]-1:pos[i]-1]) accessible_gaps[i-1] = n_access # adjust using accessibility scaling = accessible_gaps / physical_gaps gaps = gaps * scaling elif gap_scale is not None and gap_scale > 0: scaling = np.ones(gaps.shape, dtype='f8') loc_scale = physical_gaps > gap_scale scaling[loc_scale] = gap_scale / physical_gaps[loc_scale] gaps = gaps * scaling if max_gap is not None and max_gap > 0: # deal with very large gaps gaps[physical_gaps > max_gap] = -1 return gaps
Compute spacing between variants for integrating haplotype homozygosity. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. Returns ------- gaps : ndarray, float, shape (n_variants - 1,)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L255-L322
cggh/scikit-allel
allel/stats/selection.py
ihs
def ihs(h, pos, map_pos=None, min_ehh=0.05, min_maf=0.05, include_edges=False, gap_scale=20000, max_gap=200000, is_accessible=None, use_threads=True): """Compute the unstandardized integrated haplotype score (IHS) for each variant, comparing integrated haplotype homozygosity between the reference (0) and alternate (1) alleles. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). min_ehh: float, optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation. min_maf : float, optional Do not compute integrated haplotype homozogysity for variants with minor allele frequency below this value. include_edges : bool, optional If True, report scores even if EHH does not decay below `min_ehh` before reaching the edge of the data. gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized IHS scores. Notes ----- This function will calculate IHS for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes IHS comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function returns NaN for any IHS calculations where haplotype homozygosity does not decay below `min_ehh` before reaching the first or last variant. To disable this behaviour, set `include_edges` to True. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count """ # check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) pos = asarray_ndim(pos, 1) check_dim0_aligned(h, pos) h = memoryview_safe(h) pos = memoryview_safe(pos) # compute gaps between variants for integration gaps = compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible) # setup kwargs kwargs = dict(min_ehh=min_ehh, min_maf=min_maf, include_edges=include_edges) if use_threads and multiprocessing.cpu_count() > 1: # run with threads # create pool pool = ThreadPool(2) # scan forward result_fwd = pool.apply_async(ihh01_scan, (h, gaps), kwargs) # scan backward result_rev = pool.apply_async(ihh01_scan, (h[::-1], gaps[::-1]), kwargs) # wait for both to finish pool.close() pool.join() # obtain results ihh0_fwd, ihh1_fwd = result_fwd.get() ihh0_rev, ihh1_rev = result_rev.get() # cleanup pool.terminate() else: # run without threads # scan forward ihh0_fwd, ihh1_fwd = ihh01_scan(h, gaps, **kwargs) # scan backward ihh0_rev, ihh1_rev = ihh01_scan(h[::-1], gaps[::-1], **kwargs) # handle reverse scan ihh0_rev = ihh0_rev[::-1] ihh1_rev = ihh1_rev[::-1] # compute unstandardized score ihh0 = ihh0_fwd + ihh0_rev ihh1 = ihh1_fwd + ihh1_rev score = np.log(ihh1 / ihh0) return score
python
def ihs(h, pos, map_pos=None, min_ehh=0.05, min_maf=0.05, include_edges=False, gap_scale=20000, max_gap=200000, is_accessible=None, use_threads=True): """Compute the unstandardized integrated haplotype score (IHS) for each variant, comparing integrated haplotype homozygosity between the reference (0) and alternate (1) alleles. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). min_ehh: float, optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation. min_maf : float, optional Do not compute integrated haplotype homozogysity for variants with minor allele frequency below this value. include_edges : bool, optional If True, report scores even if EHH does not decay below `min_ehh` before reaching the edge of the data. gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized IHS scores. Notes ----- This function will calculate IHS for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes IHS comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function returns NaN for any IHS calculations where haplotype homozygosity does not decay below `min_ehh` before reaching the first or last variant. To disable this behaviour, set `include_edges` to True. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count """ # check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) pos = asarray_ndim(pos, 1) check_dim0_aligned(h, pos) h = memoryview_safe(h) pos = memoryview_safe(pos) # compute gaps between variants for integration gaps = compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible) # setup kwargs kwargs = dict(min_ehh=min_ehh, min_maf=min_maf, include_edges=include_edges) if use_threads and multiprocessing.cpu_count() > 1: # run with threads # create pool pool = ThreadPool(2) # scan forward result_fwd = pool.apply_async(ihh01_scan, (h, gaps), kwargs) # scan backward result_rev = pool.apply_async(ihh01_scan, (h[::-1], gaps[::-1]), kwargs) # wait for both to finish pool.close() pool.join() # obtain results ihh0_fwd, ihh1_fwd = result_fwd.get() ihh0_rev, ihh1_rev = result_rev.get() # cleanup pool.terminate() else: # run without threads # scan forward ihh0_fwd, ihh1_fwd = ihh01_scan(h, gaps, **kwargs) # scan backward ihh0_rev, ihh1_rev = ihh01_scan(h[::-1], gaps[::-1], **kwargs) # handle reverse scan ihh0_rev = ihh0_rev[::-1] ihh1_rev = ihh1_rev[::-1] # compute unstandardized score ihh0 = ihh0_fwd + ihh0_rev ihh1 = ihh1_fwd + ihh1_rev score = np.log(ihh1 / ihh0) return score
Compute the unstandardized integrated haplotype score (IHS) for each variant, comparing integrated haplotype homozygosity between the reference (0) and alternate (1) alleles. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). min_ehh: float, optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation. min_maf : float, optional Do not compute integrated haplotype homozogysity for variants with minor allele frequency below this value. include_edges : bool, optional If True, report scores even if EHH does not decay below `min_ehh` before reaching the edge of the data. gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized IHS scores. Notes ----- This function will calculate IHS for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes IHS comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function returns NaN for any IHS calculations where haplotype homozygosity does not decay below `min_ehh` before reaching the first or last variant. To disable this behaviour, set `include_edges` to True. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L325-L443
cggh/scikit-allel
allel/stats/selection.py
xpehh
def xpehh(h1, h2, pos, map_pos=None, min_ehh=0.05, include_edges=False, gap_scale=20000, max_gap=200000, is_accessible=None, use_threads=True): """Compute the unstandardized cross-population extended haplotype homozygosity score (XPEHH) for each variant. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. pos : array_like, int, shape (n_variants,) Variant positions on physical or genetic map. map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). min_ehh: float, optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation. include_edges : bool, optional If True, report scores even if EHH does not decay below `min_ehh` before reaching the edge of the data. gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPEHH scores. Notes ----- This function will calculate XPEHH for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype arrays before passing to this function. This function returns NaN for any EHH calculations where haplotype homozygosity does not decay below `min_ehh` before reaching the first or last variant. To disable this behaviour, set `include_edges` to True. Note that the unstandardized score is returned. Usually these scores are then standardized genome-wide. Haplotype arrays from the two populations may have different numbers of haplotypes. See Also -------- standardize """ # check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) pos = asarray_ndim(pos, 1) check_dim0_aligned(h1, h2, pos) h1 = memoryview_safe(h1) h2 = memoryview_safe(h2) pos = memoryview_safe(pos) # compute gaps between variants for integration gaps = compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible) # setup kwargs kwargs = dict(min_ehh=min_ehh, include_edges=include_edges) if use_threads and multiprocessing.cpu_count() > 1: # use multiple threads # setup threadpool pool = ThreadPool(min(4, multiprocessing.cpu_count())) # scan forward res1_fwd = pool.apply_async(ihh_scan, (h1, gaps), kwargs) res2_fwd = pool.apply_async(ihh_scan, (h2, gaps), kwargs) # scan backward res1_rev = pool.apply_async(ihh_scan, (h1[::-1], gaps[::-1]), kwargs) res2_rev = pool.apply_async(ihh_scan, (h2[::-1], gaps[::-1]), kwargs) # wait for both to finish pool.close() pool.join() # obtain results ihh1_fwd = res1_fwd.get() ihh2_fwd = res2_fwd.get() ihh1_rev = res1_rev.get() ihh2_rev = res2_rev.get() # cleanup pool.terminate() else: # compute without threads # scan forward ihh1_fwd = ihh_scan(h1, gaps, **kwargs) ihh2_fwd = ihh_scan(h2, gaps, **kwargs) # scan backward ihh1_rev = ihh_scan(h1[::-1], gaps[::-1], **kwargs) ihh2_rev = ihh_scan(h2[::-1], gaps[::-1], **kwargs) # handle reverse scans ihh1_rev = ihh1_rev[::-1] ihh2_rev = ihh2_rev[::-1] # compute unstandardized score ihh1 = ihh1_fwd + ihh1_rev ihh2 = ihh2_fwd + ihh2_rev score = np.log(ihh1 / ihh2) return score
python
def xpehh(h1, h2, pos, map_pos=None, min_ehh=0.05, include_edges=False, gap_scale=20000, max_gap=200000, is_accessible=None, use_threads=True): """Compute the unstandardized cross-population extended haplotype homozygosity score (XPEHH) for each variant. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. pos : array_like, int, shape (n_variants,) Variant positions on physical or genetic map. map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). min_ehh: float, optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation. include_edges : bool, optional If True, report scores even if EHH does not decay below `min_ehh` before reaching the edge of the data. gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPEHH scores. Notes ----- This function will calculate XPEHH for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype arrays before passing to this function. This function returns NaN for any EHH calculations where haplotype homozygosity does not decay below `min_ehh` before reaching the first or last variant. To disable this behaviour, set `include_edges` to True. Note that the unstandardized score is returned. Usually these scores are then standardized genome-wide. Haplotype arrays from the two populations may have different numbers of haplotypes. See Also -------- standardize """ # check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) pos = asarray_ndim(pos, 1) check_dim0_aligned(h1, h2, pos) h1 = memoryview_safe(h1) h2 = memoryview_safe(h2) pos = memoryview_safe(pos) # compute gaps between variants for integration gaps = compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible) # setup kwargs kwargs = dict(min_ehh=min_ehh, include_edges=include_edges) if use_threads and multiprocessing.cpu_count() > 1: # use multiple threads # setup threadpool pool = ThreadPool(min(4, multiprocessing.cpu_count())) # scan forward res1_fwd = pool.apply_async(ihh_scan, (h1, gaps), kwargs) res2_fwd = pool.apply_async(ihh_scan, (h2, gaps), kwargs) # scan backward res1_rev = pool.apply_async(ihh_scan, (h1[::-1], gaps[::-1]), kwargs) res2_rev = pool.apply_async(ihh_scan, (h2[::-1], gaps[::-1]), kwargs) # wait for both to finish pool.close() pool.join() # obtain results ihh1_fwd = res1_fwd.get() ihh2_fwd = res2_fwd.get() ihh1_rev = res1_rev.get() ihh2_rev = res2_rev.get() # cleanup pool.terminate() else: # compute without threads # scan forward ihh1_fwd = ihh_scan(h1, gaps, **kwargs) ihh2_fwd = ihh_scan(h2, gaps, **kwargs) # scan backward ihh1_rev = ihh_scan(h1[::-1], gaps[::-1], **kwargs) ihh2_rev = ihh_scan(h2[::-1], gaps[::-1], **kwargs) # handle reverse scans ihh1_rev = ihh1_rev[::-1] ihh2_rev = ihh2_rev[::-1] # compute unstandardized score ihh1 = ihh1_fwd + ihh1_rev ihh2 = ihh2_fwd + ihh2_rev score = np.log(ihh1 / ihh2) return score
Compute the unstandardized cross-population extended haplotype homozygosity score (XPEHH) for each variant. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. pos : array_like, int, shape (n_variants,) Variant positions on physical or genetic map. map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). min_ehh: float, optional Minimum EHH beyond which to truncate integrated haplotype homozygosity calculation. include_edges : bool, optional If True, report scores even if EHH does not decay below `min_ehh` before reaching the edge of the data. gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPEHH scores. Notes ----- This function will calculate XPEHH for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype arrays before passing to this function. This function returns NaN for any EHH calculations where haplotype homozygosity does not decay below `min_ehh` before reaching the first or last variant. To disable this behaviour, set `include_edges` to True. Note that the unstandardized score is returned. Usually these scores are then standardized genome-wide. Haplotype arrays from the two populations may have different numbers of haplotypes. See Also -------- standardize
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L446-L571
cggh/scikit-allel
allel/stats/selection.py
nsl
def nsl(h, use_threads=True): """Compute the unstandardized number of segregating sites by length (nSl) for each variant, comparing the reference and alternate alleles, after Ferrer-Admetlla et al. (2014). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Notes ----- This function will calculate nSl for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes nSl by comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function does nothing about nSl calculations where haplotype homozygosity extends up to the first or last variant. There may be edge effects. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count """ # check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) h = memoryview_safe(h) # # check there are no invariant sites # ac = h.count_alleles() # assert np.all(ac.is_segregating()), 'please remove non-segregating sites' if use_threads and multiprocessing.cpu_count() > 1: # create pool pool = ThreadPool(2) # scan forward result_fwd = pool.apply_async(nsl01_scan, args=(h,)) # scan backward result_rev = pool.apply_async(nsl01_scan, args=(h[::-1],)) # wait for both to finish pool.close() pool.join() # obtain results nsl0_fwd, nsl1_fwd = result_fwd.get() nsl0_rev, nsl1_rev = result_rev.get() else: # scan forward nsl0_fwd, nsl1_fwd = nsl01_scan(h) # scan backward nsl0_rev, nsl1_rev = nsl01_scan(h[::-1]) # handle backwards nsl0_rev = nsl0_rev[::-1] nsl1_rev = nsl1_rev[::-1] # compute unstandardized score nsl0 = nsl0_fwd + nsl0_rev nsl1 = nsl1_fwd + nsl1_rev score = np.log(nsl1 / nsl0) return score
python
def nsl(h, use_threads=True): """Compute the unstandardized number of segregating sites by length (nSl) for each variant, comparing the reference and alternate alleles, after Ferrer-Admetlla et al. (2014). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Notes ----- This function will calculate nSl for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes nSl by comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function does nothing about nSl calculations where haplotype homozygosity extends up to the first or last variant. There may be edge effects. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count """ # check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) h = memoryview_safe(h) # # check there are no invariant sites # ac = h.count_alleles() # assert np.all(ac.is_segregating()), 'please remove non-segregating sites' if use_threads and multiprocessing.cpu_count() > 1: # create pool pool = ThreadPool(2) # scan forward result_fwd = pool.apply_async(nsl01_scan, args=(h,)) # scan backward result_rev = pool.apply_async(nsl01_scan, args=(h[::-1],)) # wait for both to finish pool.close() pool.join() # obtain results nsl0_fwd, nsl1_fwd = result_fwd.get() nsl0_rev, nsl1_rev = result_rev.get() else: # scan forward nsl0_fwd, nsl1_fwd = nsl01_scan(h) # scan backward nsl0_rev, nsl1_rev = nsl01_scan(h[::-1]) # handle backwards nsl0_rev = nsl0_rev[::-1] nsl1_rev = nsl1_rev[::-1] # compute unstandardized score nsl0 = nsl0_fwd + nsl0_rev nsl1 = nsl1_fwd + nsl1_rev score = np.log(nsl1 / nsl0) return score
Compute the unstandardized number of segregating sites by length (nSl) for each variant, comparing the reference and alternate alleles, after Ferrer-Admetlla et al. (2014). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Notes ----- This function will calculate nSl for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes nSl by comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function does nothing about nSl calculations where haplotype homozygosity extends up to the first or last variant. There may be edge effects. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L574-L658
cggh/scikit-allel
allel/stats/selection.py
xpnsl
def xpnsl(h1, h2, use_threads=True): """Cross-population version of the NSL statistic. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPNSL scores. """ # check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) check_dim0_aligned(h1, h2) h1 = memoryview_safe(h1) h2 = memoryview_safe(h2) if use_threads and multiprocessing.cpu_count() > 1: # use multiple threads # setup threadpool pool = ThreadPool(min(4, multiprocessing.cpu_count())) # scan forward res1_fwd = pool.apply_async(nsl_scan, args=(h1,)) res2_fwd = pool.apply_async(nsl_scan, args=(h2,)) # scan backward res1_rev = pool.apply_async(nsl_scan, args=(h1[::-1],)) res2_rev = pool.apply_async(nsl_scan, args=(h2[::-1],)) # wait for both to finish pool.close() pool.join() # obtain results nsl1_fwd = res1_fwd.get() nsl2_fwd = res2_fwd.get() nsl1_rev = res1_rev.get() nsl2_rev = res2_rev.get() # cleanup pool.terminate() else: # compute without threads # scan forward nsl1_fwd = nsl_scan(h1) nsl2_fwd = nsl_scan(h2) # scan backward nsl1_rev = nsl_scan(h1[::-1]) nsl2_rev = nsl_scan(h2[::-1]) # handle reverse scans nsl1_rev = nsl1_rev[::-1] nsl2_rev = nsl2_rev[::-1] # compute unstandardized score nsl1 = nsl1_fwd + nsl1_rev nsl2 = nsl2_fwd + nsl2_rev score = np.log(nsl1 / nsl2) return score
python
def xpnsl(h1, h2, use_threads=True): """Cross-population version of the NSL statistic. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPNSL scores. """ # check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) check_dim0_aligned(h1, h2) h1 = memoryview_safe(h1) h2 = memoryview_safe(h2) if use_threads and multiprocessing.cpu_count() > 1: # use multiple threads # setup threadpool pool = ThreadPool(min(4, multiprocessing.cpu_count())) # scan forward res1_fwd = pool.apply_async(nsl_scan, args=(h1,)) res2_fwd = pool.apply_async(nsl_scan, args=(h2,)) # scan backward res1_rev = pool.apply_async(nsl_scan, args=(h1[::-1],)) res2_rev = pool.apply_async(nsl_scan, args=(h2[::-1],)) # wait for both to finish pool.close() pool.join() # obtain results nsl1_fwd = res1_fwd.get() nsl2_fwd = res2_fwd.get() nsl1_rev = res1_rev.get() nsl2_rev = res2_rev.get() # cleanup pool.terminate() else: # compute without threads # scan forward nsl1_fwd = nsl_scan(h1) nsl2_fwd = nsl_scan(h2) # scan backward nsl1_rev = nsl_scan(h1[::-1]) nsl2_rev = nsl_scan(h2[::-1]) # handle reverse scans nsl1_rev = nsl1_rev[::-1] nsl2_rev = nsl2_rev[::-1] # compute unstandardized score nsl1 = nsl1_fwd + nsl1_rev nsl2 = nsl2_fwd + nsl2_rev score = np.log(nsl1 / nsl2) return score
Cross-population version of the NSL statistic. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPNSL scores.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L661-L736
cggh/scikit-allel
allel/stats/selection.py
haplotype_diversity
def haplotype_diversity(h): """Estimate haplotype diversity. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- hd : float Haplotype diversity. """ # check inputs h = HaplotypeArray(h, copy=False) # number of haplotypes n = h.n_haplotypes # compute haplotype frequencies f = h.distinct_frequencies() # estimate haplotype diversity hd = (1 - np.sum(f**2)) * n / (n - 1) return hd
python
def haplotype_diversity(h): """Estimate haplotype diversity. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- hd : float Haplotype diversity. """ # check inputs h = HaplotypeArray(h, copy=False) # number of haplotypes n = h.n_haplotypes # compute haplotype frequencies f = h.distinct_frequencies() # estimate haplotype diversity hd = (1 - np.sum(f**2)) * n / (n - 1) return hd
Estimate haplotype diversity. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- hd : float Haplotype diversity.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L739-L766
cggh/scikit-allel
allel/stats/selection.py
moving_haplotype_diversity
def moving_haplotype_diversity(h, size, start=0, stop=None, step=None): """Estimate haplotype diversity in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- hd : ndarray, float, shape (n_windows,) Haplotype diversity. """ hd = moving_statistic(values=h, statistic=haplotype_diversity, size=size, start=start, stop=stop, step=step) return hd
python
def moving_haplotype_diversity(h, size, start=0, stop=None, step=None): """Estimate haplotype diversity in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- hd : ndarray, float, shape (n_windows,) Haplotype diversity. """ hd = moving_statistic(values=h, statistic=haplotype_diversity, size=size, start=start, stop=stop, step=step) return hd
Estimate haplotype diversity in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- hd : ndarray, float, shape (n_windows,) Haplotype diversity.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L769-L795
cggh/scikit-allel
allel/stats/selection.py
garud_h
def garud_h(h): """Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- h1 : float H1 statistic (sum of squares of haplotype frequencies). h12 : float H12 statistic (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : float H123 statistic (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : float H2/H1 statistic, indicating the "softness" of a sweep. """ # check inputs h = HaplotypeArray(h, copy=False) # compute haplotype frequencies f = h.distinct_frequencies() # compute H1 h1 = np.sum(f**2) # compute H12 h12 = np.sum(f[:2])**2 + np.sum(f[2:]**2) # compute H123 h123 = np.sum(f[:3])**2 + np.sum(f[3:]**2) # compute H2/H1 h2 = h1 - f[0]**2 h2_h1 = h2 / h1 return h1, h12, h123, h2_h1
python
def garud_h(h): """Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- h1 : float H1 statistic (sum of squares of haplotype frequencies). h12 : float H12 statistic (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : float H123 statistic (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : float H2/H1 statistic, indicating the "softness" of a sweep. """ # check inputs h = HaplotypeArray(h, copy=False) # compute haplotype frequencies f = h.distinct_frequencies() # compute H1 h1 = np.sum(f**2) # compute H12 h12 = np.sum(f[:2])**2 + np.sum(f[2:]**2) # compute H123 h123 = np.sum(f[:3])**2 + np.sum(f[3:]**2) # compute H2/H1 h2 = h1 - f[0]**2 h2_h1 = h2 / h1 return h1, h12, h123, h2_h1
Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- h1 : float H1 statistic (sum of squares of haplotype frequencies). h12 : float H12 statistic (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : float H123 statistic (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : float H2/H1 statistic, indicating the "softness" of a sweep.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L798-L841
cggh/scikit-allel
allel/stats/selection.py
moving_garud_h
def moving_garud_h(h, size, start=0, stop=None, step=None): """Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015), in moving windows, Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- h1 : ndarray, float, shape (n_windows,) H1 statistics (sum of squares of haplotype frequencies). h12 : ndarray, float, shape (n_windows,) H12 statistics (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : ndarray, float, shape (n_windows,) H123 statistics (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : ndarray, float, shape (n_windows,) H2/H1 statistics, indicating the "softness" of a sweep. """ gh = moving_statistic(values=h, statistic=garud_h, size=size, start=start, stop=stop, step=step) h1 = gh[:, 0] h12 = gh[:, 1] h123 = gh[:, 2] h2_h1 = gh[:, 3] return h1, h12, h123, h2_h1
python
def moving_garud_h(h, size, start=0, stop=None, step=None): """Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015), in moving windows, Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- h1 : ndarray, float, shape (n_windows,) H1 statistics (sum of squares of haplotype frequencies). h12 : ndarray, float, shape (n_windows,) H12 statistics (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : ndarray, float, shape (n_windows,) H123 statistics (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : ndarray, float, shape (n_windows,) H2/H1 statistics, indicating the "softness" of a sweep. """ gh = moving_statistic(values=h, statistic=garud_h, size=size, start=start, stop=stop, step=step) h1 = gh[:, 0] h12 = gh[:, 1] h123 = gh[:, 2] h2_h1 = gh[:, 3] return h1, h12, h123, h2_h1
Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015), in moving windows, Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- h1 : ndarray, float, shape (n_windows,) H1 statistics (sum of squares of haplotype frequencies). h12 : ndarray, float, shape (n_windows,) H12 statistics (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : ndarray, float, shape (n_windows,) H123 statistics (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : ndarray, float, shape (n_windows,) H2/H1 statistics, indicating the "softness" of a sweep.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L844-L885
cggh/scikit-allel
allel/stats/selection.py
plot_haplotype_frequencies
def plot_haplotype_frequencies(h, palette='Paired', singleton_color='w', ax=None): """Plot haplotype frequencies. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes """ import matplotlib.pyplot as plt import seaborn as sns # check inputs h = HaplotypeArray(h, copy=False) # setup figure if ax is None: width = plt.rcParams['figure.figsize'][0] height = width / 10 fig, ax = plt.subplots(figsize=(width, height)) sns.despine(ax=ax, left=True) # count distinct haplotypes hc = h.distinct_counts() # setup palette n_colors = np.count_nonzero(hc > 1) palette = sns.color_palette(palette, n_colors) # paint frequencies x1 = 0 for i, c in enumerate(hc): x2 = x1 + c if c > 1: color = palette[i] else: color = singleton_color ax.axvspan(x1, x2, color=color) x1 = x2 # tidy up ax.set_xlim(0, h.shape[1]) ax.set_yticks([]) return ax
python
def plot_haplotype_frequencies(h, palette='Paired', singleton_color='w', ax=None): """Plot haplotype frequencies. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes """ import matplotlib.pyplot as plt import seaborn as sns # check inputs h = HaplotypeArray(h, copy=False) # setup figure if ax is None: width = plt.rcParams['figure.figsize'][0] height = width / 10 fig, ax = plt.subplots(figsize=(width, height)) sns.despine(ax=ax, left=True) # count distinct haplotypes hc = h.distinct_counts() # setup palette n_colors = np.count_nonzero(hc > 1) palette = sns.color_palette(palette, n_colors) # paint frequencies x1 = 0 for i, c in enumerate(hc): x2 = x1 + c if c > 1: color = palette[i] else: color = singleton_color ax.axvspan(x1, x2, color=color) x1 = x2 # tidy up ax.set_xlim(0, h.shape[1]) ax.set_yticks([]) return ax
Plot haplotype frequencies. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L888-L945
cggh/scikit-allel
allel/stats/selection.py
moving_hfs_rank
def moving_hfs_rank(h, size, start=0, stop=None): """Helper function for plotting haplotype frequencies in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. Returns ------- hr : ndarray, int, shape (n_windows, n_haplotypes) Haplotype rank array. """ # determine windows windows = np.asarray(list(index_windows(h, size=size, start=start, stop=stop, step=None))) # setup output hr = np.zeros((windows.shape[0], h.shape[1]), dtype='i4') # iterate over windows for i, (window_start, window_stop) in enumerate(windows): # extract haplotypes for the current window hw = h[window_start:window_stop] # count haplotypes hc = hw.distinct_counts() # ensure sorted descending hc.sort() hc = hc[::-1] # compute ranks for non-singleton haplotypes cp = 0 for j, c in enumerate(hc): if c > 1: hr[i, cp:cp+c] = j+1 cp += c return hr
python
def moving_hfs_rank(h, size, start=0, stop=None): """Helper function for plotting haplotype frequencies in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. Returns ------- hr : ndarray, int, shape (n_windows, n_haplotypes) Haplotype rank array. """ # determine windows windows = np.asarray(list(index_windows(h, size=size, start=start, stop=stop, step=None))) # setup output hr = np.zeros((windows.shape[0], h.shape[1]), dtype='i4') # iterate over windows for i, (window_start, window_stop) in enumerate(windows): # extract haplotypes for the current window hw = h[window_start:window_stop] # count haplotypes hc = hw.distinct_counts() # ensure sorted descending hc.sort() hc = hc[::-1] # compute ranks for non-singleton haplotypes cp = 0 for j, c in enumerate(hc): if c > 1: hr[i, cp:cp+c] = j+1 cp += c return hr
Helper function for plotting haplotype frequencies in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. Returns ------- hr : ndarray, int, shape (n_windows, n_haplotypes) Haplotype rank array.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L948-L996
cggh/scikit-allel
allel/stats/selection.py
plot_moving_haplotype_frequencies
def plot_moving_haplotype_frequencies(pos, h, size, start=0, stop=None, n=None, palette='Paired', singleton_color='w', ax=None): """Plot haplotype frequencies in moving windows over the genome. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. n : int, optional Color only the `n` most frequent haplotypes (by default, all non-singleton haplotypes are colored). palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes """ import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # setup figure if ax is None: fig, ax = plt.subplots() # compute haplotype frequencies # N.B., here we use a haplotype rank data structure to enable the use of # pcolormesh() which is a lot faster than any other type of plotting # function hr = moving_hfs_rank(h, size=size, start=start, stop=stop) # truncate to n most common haplotypes if n: hr[hr > n] = 0 # compute window start and stop positions windows = moving_statistic(pos, statistic=lambda v: (v[0], v[-1]), size=size, start=start, stop=stop) # create color map colors = [singleton_color] + sns.color_palette(palette, n_colors=hr.max()) cmap = mpl.colors.ListedColormap(colors) # draw colors x = np.append(windows[:, 0], windows[-1, -1]) y = np.arange(h.shape[1]+1) ax.pcolormesh(x, y, hr.T, cmap=cmap) # tidy up ax.set_xlim(windows[0, 0], windows[-1, -1]) ax.set_ylim(0, h.shape[1]) ax.set_ylabel('haplotype count') ax.set_xlabel('position (bp)') return ax
python
def plot_moving_haplotype_frequencies(pos, h, size, start=0, stop=None, n=None, palette='Paired', singleton_color='w', ax=None): """Plot haplotype frequencies in moving windows over the genome. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. n : int, optional Color only the `n` most frequent haplotypes (by default, all non-singleton haplotypes are colored). palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes """ import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # setup figure if ax is None: fig, ax = plt.subplots() # compute haplotype frequencies # N.B., here we use a haplotype rank data structure to enable the use of # pcolormesh() which is a lot faster than any other type of plotting # function hr = moving_hfs_rank(h, size=size, start=start, stop=stop) # truncate to n most common haplotypes if n: hr[hr > n] = 0 # compute window start and stop positions windows = moving_statistic(pos, statistic=lambda v: (v[0], v[-1]), size=size, start=start, stop=stop) # create color map colors = [singleton_color] + sns.color_palette(palette, n_colors=hr.max()) cmap = mpl.colors.ListedColormap(colors) # draw colors x = np.append(windows[:, 0], windows[-1, -1]) y = np.arange(h.shape[1]+1) ax.pcolormesh(x, y, hr.T, cmap=cmap) # tidy up ax.set_xlim(windows[0, 0], windows[-1, -1]) ax.set_ylim(0, h.shape[1]) ax.set_ylabel('haplotype count') ax.set_xlabel('position (bp)') return ax
Plot haplotype frequencies in moving windows over the genome. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. n : int, optional Color only the `n` most frequent haplotypes (by default, all non-singleton haplotypes are colored). palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L999-L1070
cggh/scikit-allel
allel/stats/selection.py
moving_delta_tajima_d
def moving_delta_tajima_d(ac1, ac2, size, start=0, stop=None, step=None): """Compute the difference in Tajima's D between two populations in moving windows. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- delta_d : ndarray, float, shape (n_windows,) Standardized delta Tajima's D. See Also -------- allel.stats.diversity.moving_tajima_d """ d1 = moving_tajima_d(ac1, size=size, start=start, stop=stop, step=step) d2 = moving_tajima_d(ac2, size=size, start=start, stop=stop, step=step) delta = d1 - d2 delta_z = (delta - np.mean(delta)) / np.std(delta) return delta_z
python
def moving_delta_tajima_d(ac1, ac2, size, start=0, stop=None, step=None): """Compute the difference in Tajima's D between two populations in moving windows. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- delta_d : ndarray, float, shape (n_windows,) Standardized delta Tajima's D. See Also -------- allel.stats.diversity.moving_tajima_d """ d1 = moving_tajima_d(ac1, size=size, start=start, stop=stop, step=step) d2 = moving_tajima_d(ac2, size=size, start=start, stop=stop, step=step) delta = d1 - d2 delta_z = (delta - np.mean(delta)) / np.std(delta) return delta_z
Compute the difference in Tajima's D between two populations in moving windows. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- delta_d : ndarray, float, shape (n_windows,) Standardized delta Tajima's D. See Also -------- allel.stats.diversity.moving_tajima_d
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L1073-L1108
cggh/scikit-allel
allel/stats/selection.py
make_similar_sized_bins
def make_similar_sized_bins(x, n): """Utility function to create a set of bins over the range of values in `x` such that each bin contains roughly the same number of values. Parameters ---------- x : array_like The values to be binned. n : int The number of bins to create. Returns ------- bins : ndarray An array of bin edges. Notes ----- The actual number of bins returned may be less than `n` if `x` contains integer values and any single value is represented more than len(x)//n times. """ # copy and sort the array y = np.array(x).flatten() y.sort() # setup bins bins = [y[0]] # determine step size step = len(y) // n # add bin edges for i in range(step, len(y), step): # get value at this index v = y[i] # only add bin edge if larger than previous if v > bins[-1]: bins.append(v) # fix last bin edge bins[-1] = y[-1] return np.array(bins)
python
def make_similar_sized_bins(x, n): """Utility function to create a set of bins over the range of values in `x` such that each bin contains roughly the same number of values. Parameters ---------- x : array_like The values to be binned. n : int The number of bins to create. Returns ------- bins : ndarray An array of bin edges. Notes ----- The actual number of bins returned may be less than `n` if `x` contains integer values and any single value is represented more than len(x)//n times. """ # copy and sort the array y = np.array(x).flatten() y.sort() # setup bins bins = [y[0]] # determine step size step = len(y) // n # add bin edges for i in range(step, len(y), step): # get value at this index v = y[i] # only add bin edge if larger than previous if v > bins[-1]: bins.append(v) # fix last bin edge bins[-1] = y[-1] return np.array(bins)
Utility function to create a set of bins over the range of values in `x` such that each bin contains roughly the same number of values. Parameters ---------- x : array_like The values to be binned. n : int The number of bins to create. Returns ------- bins : ndarray An array of bin edges. Notes ----- The actual number of bins returned may be less than `n` if `x` contains integer values and any single value is represented more than len(x)//n times.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L1111-L1157
cggh/scikit-allel
allel/stats/selection.py
standardize
def standardize(score): """Centre and scale to unit variance.""" score = asarray_ndim(score, 1) return (score - np.nanmean(score)) / np.nanstd(score)
python
def standardize(score): """Centre and scale to unit variance.""" score = asarray_ndim(score, 1) return (score - np.nanmean(score)) / np.nanstd(score)
Centre and scale to unit variance.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L1160-L1163
cggh/scikit-allel
allel/stats/selection.py
standardize_by_allele_count
def standardize_by_allele_count(score, aac, bins=None, n_bins=None, diagnostics=True): """Standardize `score` within allele frequency bins. Parameters ---------- score : array_like, float The score to be standardized, e.g., IHS or NSL. aac : array_like, int An array of alternate allele counts. bins : array_like, int, optional Allele count bins, overrides `n_bins`. n_bins : int, optional Number of allele count bins to use. diagnostics : bool, optional If True, plot some diagnostic information about the standardization. Returns ------- score_standardized : ndarray, float Standardized scores. bins : ndarray, int Allele count bins used for standardization. """ from scipy.stats import binned_statistic # check inputs score = asarray_ndim(score, 1) aac = asarray_ndim(aac, 1) check_dim0_aligned(score, aac) # remove nans nonan = ~np.isnan(score) score_nonan = score[nonan] aac_nonan = aac[nonan] if bins is None: # make our own similar sized bins # how many bins to make? if n_bins is None: # something vaguely reasonable n_bins = np.max(aac) // 2 # make bins bins = make_similar_sized_bins(aac_nonan, n_bins) else: # user-provided bins bins = asarray_ndim(bins, 1) mean_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.mean, bins=bins) std_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.std, bins=bins) if diagnostics: import matplotlib.pyplot as plt x = (bins[:-1] + bins[1:]) / 2 plt.figure() plt.fill_between(x, mean_score - std_score, mean_score + std_score, alpha=.5, label='std') plt.plot(x, mean_score, marker='o', label='mean') plt.grid(axis='y') plt.xlabel('Alternate allele count') plt.ylabel('Unstandardized score') plt.title('Standardization diagnostics') plt.legend() # apply standardization score_standardized = np.empty_like(score) for i in range(len(bins) - 1): x1 = bins[i] x2 = bins[i + 1] if i == 0: # first bin loc = (aac < x2) elif i == len(bins) - 2: # last bin loc = (aac >= x1) else: # middle bins loc = (aac >= x1) & (aac < x2) m = mean_score[i] s = std_score[i] score_standardized[loc] = (score[loc] - m) / s return score_standardized, bins
python
def standardize_by_allele_count(score, aac, bins=None, n_bins=None, diagnostics=True): """Standardize `score` within allele frequency bins. Parameters ---------- score : array_like, float The score to be standardized, e.g., IHS or NSL. aac : array_like, int An array of alternate allele counts. bins : array_like, int, optional Allele count bins, overrides `n_bins`. n_bins : int, optional Number of allele count bins to use. diagnostics : bool, optional If True, plot some diagnostic information about the standardization. Returns ------- score_standardized : ndarray, float Standardized scores. bins : ndarray, int Allele count bins used for standardization. """ from scipy.stats import binned_statistic # check inputs score = asarray_ndim(score, 1) aac = asarray_ndim(aac, 1) check_dim0_aligned(score, aac) # remove nans nonan = ~np.isnan(score) score_nonan = score[nonan] aac_nonan = aac[nonan] if bins is None: # make our own similar sized bins # how many bins to make? if n_bins is None: # something vaguely reasonable n_bins = np.max(aac) // 2 # make bins bins = make_similar_sized_bins(aac_nonan, n_bins) else: # user-provided bins bins = asarray_ndim(bins, 1) mean_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.mean, bins=bins) std_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.std, bins=bins) if diagnostics: import matplotlib.pyplot as plt x = (bins[:-1] + bins[1:]) / 2 plt.figure() plt.fill_between(x, mean_score - std_score, mean_score + std_score, alpha=.5, label='std') plt.plot(x, mean_score, marker='o', label='mean') plt.grid(axis='y') plt.xlabel('Alternate allele count') plt.ylabel('Unstandardized score') plt.title('Standardization diagnostics') plt.legend() # apply standardization score_standardized = np.empty_like(score) for i in range(len(bins) - 1): x1 = bins[i] x2 = bins[i + 1] if i == 0: # first bin loc = (aac < x2) elif i == len(bins) - 2: # last bin loc = (aac >= x1) else: # middle bins loc = (aac >= x1) & (aac < x2) m = mean_score[i] s = std_score[i] score_standardized[loc] = (score[loc] - m) / s return score_standardized, bins
Standardize `score` within allele frequency bins. Parameters ---------- score : array_like, float The score to be standardized, e.g., IHS or NSL. aac : array_like, int An array of alternate allele counts. bins : array_like, int, optional Allele count bins, overrides `n_bins`. n_bins : int, optional Number of allele count bins to use. diagnostics : bool, optional If True, plot some diagnostic information about the standardization. Returns ------- score_standardized : ndarray, float Standardized scores. bins : ndarray, int Allele count bins used for standardization.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L1166-L1260
cggh/scikit-allel
allel/stats/selection.py
pbs
def pbs(ac1, ac2, ac3, window_size, window_start=0, window_stop=None, window_step=None, normed=True): """Compute the population branching statistic (PBS) which performs a comparison of allele frequencies between three populations to detect genome regions that are unusually differentiated in one population relative to the other two populations. Parameters ---------- ac1 : array_like, int Allele counts from the first population. ac2 : array_like, int Allele counts from the second population. ac3 : array_like, int Allele counts from the third population. window_size : int The window size (number of variants) within which to compute PBS values. window_start : int, optional The variant index at which to start windowed calculations. window_stop : int, optional The variant index at which to stop windowed calculations. window_step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If True (default), use the normalised version of PBS, also known as PBSn1 [2]_. Otherwise, use the PBS statistic as originally defined in [1]_. Returns ------- pbs : ndarray, float Windowed PBS values. Notes ----- The F:sub:`ST` calculations use Hudson's estimator. References ---------- .. [1] Yi et al., "Sequencing of Fifty Human Exomes Reveals Adaptation to High Altitude", Science, 329(5987): 75–78, 2 July 2010. .. [2] Malaspinas et al., "A genomic history of Aboriginal Australia", Nature. volume 538, pages 207–214, 13 October 2016. """ # normalise and check inputs ac1 = AlleleCountsArray(ac1) ac2 = AlleleCountsArray(ac2) ac3 = AlleleCountsArray(ac3) check_dim0_aligned(ac1, ac2, ac3) # compute fst fst12 = moving_hudson_fst(ac1, ac2, size=window_size, start=window_start, stop=window_stop, step=window_step) fst13 = moving_hudson_fst(ac1, ac3, size=window_size, start=window_start, stop=window_stop, step=window_step) fst23 = moving_hudson_fst(ac2, ac3, size=window_size, start=window_start, stop=window_stop, step=window_step) # clip fst values to avoid infinite if fst is 1 for x in fst12, fst13, fst23: np.clip(x, a_min=0, a_max=0.99999, out=x) # compute fst transform t12 = -np.log(1 - fst12) t13 = -np.log(1 - fst13) t23 = -np.log(1 - fst23) # compute pbs ret = (t12 + t13 - t23) / 2 if normed: # compute pbs normalising constant norm = 1 + (t12 + t13 + t23) / 2 ret = ret / norm return ret
python
def pbs(ac1, ac2, ac3, window_size, window_start=0, window_stop=None, window_step=None, normed=True): """Compute the population branching statistic (PBS) which performs a comparison of allele frequencies between three populations to detect genome regions that are unusually differentiated in one population relative to the other two populations. Parameters ---------- ac1 : array_like, int Allele counts from the first population. ac2 : array_like, int Allele counts from the second population. ac3 : array_like, int Allele counts from the third population. window_size : int The window size (number of variants) within which to compute PBS values. window_start : int, optional The variant index at which to start windowed calculations. window_stop : int, optional The variant index at which to stop windowed calculations. window_step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If True (default), use the normalised version of PBS, also known as PBSn1 [2]_. Otherwise, use the PBS statistic as originally defined in [1]_. Returns ------- pbs : ndarray, float Windowed PBS values. Notes ----- The F:sub:`ST` calculations use Hudson's estimator. References ---------- .. [1] Yi et al., "Sequencing of Fifty Human Exomes Reveals Adaptation to High Altitude", Science, 329(5987): 75–78, 2 July 2010. .. [2] Malaspinas et al., "A genomic history of Aboriginal Australia", Nature. volume 538, pages 207–214, 13 October 2016. """ # normalise and check inputs ac1 = AlleleCountsArray(ac1) ac2 = AlleleCountsArray(ac2) ac3 = AlleleCountsArray(ac3) check_dim0_aligned(ac1, ac2, ac3) # compute fst fst12 = moving_hudson_fst(ac1, ac2, size=window_size, start=window_start, stop=window_stop, step=window_step) fst13 = moving_hudson_fst(ac1, ac3, size=window_size, start=window_start, stop=window_stop, step=window_step) fst23 = moving_hudson_fst(ac2, ac3, size=window_size, start=window_start, stop=window_stop, step=window_step) # clip fst values to avoid infinite if fst is 1 for x in fst12, fst13, fst23: np.clip(x, a_min=0, a_max=0.99999, out=x) # compute fst transform t12 = -np.log(1 - fst12) t13 = -np.log(1 - fst13) t23 = -np.log(1 - fst23) # compute pbs ret = (t12 + t13 - t23) / 2 if normed: # compute pbs normalising constant norm = 1 + (t12 + t13 + t23) / 2 ret = ret / norm return ret
Compute the population branching statistic (PBS) which performs a comparison of allele frequencies between three populations to detect genome regions that are unusually differentiated in one population relative to the other two populations. Parameters ---------- ac1 : array_like, int Allele counts from the first population. ac2 : array_like, int Allele counts from the second population. ac3 : array_like, int Allele counts from the third population. window_size : int The window size (number of variants) within which to compute PBS values. window_start : int, optional The variant index at which to start windowed calculations. window_stop : int, optional The variant index at which to stop windowed calculations. window_step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If True (default), use the normalised version of PBS, also known as PBSn1 [2]_. Otherwise, use the PBS statistic as originally defined in [1]_. Returns ------- pbs : ndarray, float Windowed PBS values. Notes ----- The F:sub:`ST` calculations use Hudson's estimator. References ---------- .. [1] Yi et al., "Sequencing of Fifty Human Exomes Reveals Adaptation to High Altitude", Science, 329(5987): 75–78, 2 July 2010. .. [2] Malaspinas et al., "A genomic history of Aboriginal Australia", Nature. volume 538, pages 207–214, 13 October 2016.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/selection.py#L1263-L1339
cggh/scikit-allel
allel/stats/window.py
moving_statistic
def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs): """Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. kwargs Additional keyword arguments are passed through to the `statistic` function. Returns ------- out : ndarray, shape (n_windows,) Examples -------- >>> import allel >>> values = [2, 5, 8, 16] >>> allel.moving_statistic(values, np.sum, size=2) array([ 7, 24]) >>> allel.moving_statistic(values, np.sum, size=2, step=1) array([ 7, 13, 24]) """ windows = index_windows(values, size, start, stop, step) # setup output out = np.array([statistic(values[i:j], **kwargs) for i, j in windows]) return out
python
def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs): """Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. kwargs Additional keyword arguments are passed through to the `statistic` function. Returns ------- out : ndarray, shape (n_windows,) Examples -------- >>> import allel >>> values = [2, 5, 8, 16] >>> allel.moving_statistic(values, np.sum, size=2) array([ 7, 24]) >>> allel.moving_statistic(values, np.sum, size=2, step=1) array([ 7, 13, 24]) """ windows = index_windows(values, size, start, stop, step) # setup output out = np.array([statistic(values[i:j], **kwargs) for i, j in windows]) return out
Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. kwargs Additional keyword arguments are passed through to the `statistic` function. Returns ------- out : ndarray, shape (n_windows,) Examples -------- >>> import allel >>> values = [2, 5, 8, 16] >>> allel.moving_statistic(values, np.sum, size=2) array([ 7, 24]) >>> allel.moving_statistic(values, np.sum, size=2, step=1) array([ 7, 13, 24])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L12-L57
cggh/scikit-allel
allel/stats/window.py
index_windows
def index_windows(values, size, start, stop, step): """Convenience function to construct windows for the :func:`moving_statistic` function. """ # determine step if stop is None: stop = len(values) if step is None: # non-overlapping step = size # iterate over windows for window_start in range(start, stop, step): window_stop = window_start + size if window_stop > stop: # ensure all windows are equal sized return yield (window_start, window_stop)
python
def index_windows(values, size, start, stop, step): """Convenience function to construct windows for the :func:`moving_statistic` function. """ # determine step if stop is None: stop = len(values) if step is None: # non-overlapping step = size # iterate over windows for window_start in range(start, stop, step): window_stop = window_start + size if window_stop > stop: # ensure all windows are equal sized return yield (window_start, window_stop)
Convenience function to construct windows for the :func:`moving_statistic` function.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L75-L96
cggh/scikit-allel
allel/stats/window.py
position_windows
def position_windows(pos, size, start, stop, step): """Convenience function to construct windows for the :func:`windowed_statistic` and :func:`windowed_count` functions. """ last = False # determine start and stop positions if start is None: start = pos[0] if stop is None: stop = pos[-1] if step is None: # non-overlapping step = size windows = [] for window_start in range(start, stop, step): # determine window stop window_stop = window_start + size if window_stop >= stop: # last window window_stop = stop last = True else: window_stop -= 1 windows.append([window_start, window_stop]) if last: break return np.asarray(windows)
python
def position_windows(pos, size, start, stop, step): """Convenience function to construct windows for the :func:`windowed_statistic` and :func:`windowed_count` functions. """ last = False # determine start and stop positions if start is None: start = pos[0] if stop is None: stop = pos[-1] if step is None: # non-overlapping step = size windows = [] for window_start in range(start, stop, step): # determine window stop window_stop = window_start + size if window_stop >= stop: # last window window_stop = stop last = True else: window_stop -= 1 windows.append([window_start, window_stop]) if last: break return np.asarray(windows)
Convenience function to construct windows for the :func:`windowed_statistic` and :func:`windowed_count` functions.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L99-L132
cggh/scikit-allel
allel/stats/window.py
window_locations
def window_locations(pos, windows): """Locate indices in `pos` corresponding to the start and stop positions of `windows`. """ start_locs = np.searchsorted(pos, windows[:, 0]) stop_locs = np.searchsorted(pos, windows[:, 1], side='right') locs = np.column_stack((start_locs, stop_locs)) return locs
python
def window_locations(pos, windows): """Locate indices in `pos` corresponding to the start and stop positions of `windows`. """ start_locs = np.searchsorted(pos, windows[:, 0]) stop_locs = np.searchsorted(pos, windows[:, 1], side='right') locs = np.column_stack((start_locs, stop_locs)) return locs
Locate indices in `pos` corresponding to the start and stop positions of `windows`.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L135-L143
cggh/scikit-allel
allel/stats/window.py
windowed_count
def windowed_count(pos, size=None, start=None, stop=None, step=None, windows=None): """Count the number of items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. Returns ------- counts : ndarray, int, shape (n_windows,) The number of items in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> counts, windows = allel.windowed_count(pos, size=10) >>> counts array([2, 2, 1]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) Half-overlapping windows:: >>> counts, windows = allel.windowed_count(pos, size=10, step=5) >>> counts array([2, 3, 2, 0, 1]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # count number of items in each window counts = np.diff(locs, axis=1).reshape(-1) return counts, windows
python
def windowed_count(pos, size=None, start=None, stop=None, step=None, windows=None): """Count the number of items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. Returns ------- counts : ndarray, int, shape (n_windows,) The number of items in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> counts, windows = allel.windowed_count(pos, size=10) >>> counts array([2, 2, 1]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) Half-overlapping windows:: >>> counts, windows = allel.windowed_count(pos, size=10, step=5) >>> counts array([2, 3, 2, 0, 1]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # count number of items in each window counts = np.diff(locs, axis=1).reshape(-1) return counts, windows
Count the number of items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. Returns ------- counts : ndarray, int, shape (n_windows,) The number of items in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> counts, windows = allel.windowed_count(pos, size=10) >>> counts array([2, 2, 1]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) Half-overlapping windows:: >>> counts, windows = allel.windowed_count(pos, size=10, step=5) >>> counts array([2, 3, 2, 0, 1]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L146-L231
cggh/scikit-allel
allel/stats/window.py
windowed_statistic
def windowed_statistic(pos, values, statistic, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also be a tuple of values arrays, in which case each array will be sliced and passed through to the statistic function as separate arguments. statistic : function The statistic to compute. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Count non-zero (i.e., True) items in non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> values = [True, False, True, False, False] >>> nnz, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.count_nonzero, size=10 ... ) >>> nnz array([1, 1, 0]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) >>> counts array([2, 2, 1]) Compute a sum over items in half-overlapping windows:: >>> values = [3, 4, 2, 6, 9] >>> x, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.sum, size=10, step=5, fill=0 ... ) >>> x array([ 7, 12, 8, 0, 9]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) >>> counts array([2, 3, 2, 0, 1]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # check lengths are equal if isinstance(values, tuple): # assume multiple values arrays check_equal_length(pos, *values) else: # assume a single values array check_equal_length(pos, values) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # setup outputs out = [] counts = [] # iterate over windows for start_idx, stop_idx in locs: # calculate number of values in window n = stop_idx - start_idx if n == 0: # window is empty s = fill else: if isinstance(values, tuple): # assume multiple values arrays wv = [v[start_idx:stop_idx] for v in values] s = statistic(*wv) else: # assume a single values array wv = values[start_idx:stop_idx] s = statistic(wv) # store outputs out.append(s) counts.append(n) # convert to arrays for output return np.asarray(out), windows, np.asarray(counts)
python
def windowed_statistic(pos, values, statistic, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also be a tuple of values arrays, in which case each array will be sliced and passed through to the statistic function as separate arguments. statistic : function The statistic to compute. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Count non-zero (i.e., True) items in non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> values = [True, False, True, False, False] >>> nnz, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.count_nonzero, size=10 ... ) >>> nnz array([1, 1, 0]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) >>> counts array([2, 2, 1]) Compute a sum over items in half-overlapping windows:: >>> values = [3, 4, 2, 6, 9] >>> x, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.sum, size=10, step=5, fill=0 ... ) >>> x array([ 7, 12, 8, 0, 9]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) >>> counts array([2, 3, 2, 0, 1]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # check lengths are equal if isinstance(values, tuple): # assume multiple values arrays check_equal_length(pos, *values) else: # assume a single values array check_equal_length(pos, values) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # setup outputs out = [] counts = [] # iterate over windows for start_idx, stop_idx in locs: # calculate number of values in window n = stop_idx - start_idx if n == 0: # window is empty s = fill else: if isinstance(values, tuple): # assume multiple values arrays wv = [v[start_idx:stop_idx] for v in values] s = statistic(*wv) else: # assume a single values array wv = values[start_idx:stop_idx] s = statistic(wv) # store outputs out.append(s) counts.append(n) # convert to arrays for output return np.asarray(out), windows, np.asarray(counts)
Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also be a tuple of values arrays, in which case each array will be sliced and passed through to the statistic function as separate arguments. statistic : function The statistic to compute. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Count non-zero (i.e., True) items in non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> values = [True, False, True, False, False] >>> nnz, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.count_nonzero, size=10 ... ) >>> nnz array([1, 1, 0]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) >>> counts array([2, 2, 1]) Compute a sum over items in half-overlapping windows:: >>> values = [3, 4, 2, 6, 9] >>> x, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.sum, size=10, step=5, fill=0 ... ) >>> x array([ 7, 12, 8, 0, 9]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) >>> counts array([2, 3, 2, 0, 1])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L234-L376
cggh/scikit-allel
allel/stats/window.py
per_base
def per_base(x, windows, is_accessible=None, fill=np.nan): """Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based coordinates. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional Use this value where there are no accessible bases in a window. Returns ------- y : ndarray, float, shape (n_windows,) The input array divided by the number of (accessible) bases in each window. n_bases : ndarray, int, shape (n_windows,) The number of (accessible) bases in each window """ # calculate window sizes if is_accessible is None: # N.B., window stops are included n_bases = np.diff(windows, axis=1).reshape(-1) + 1 else: n_bases = np.array([np.count_nonzero(is_accessible[i-1:j]) for i, j in windows]) # deal with multidimensional x if x.ndim == 1: pass elif x.ndim == 2: n_bases = n_bases[:, None] else: raise NotImplementedError('only arrays of 1 or 2 dimensions supported') # calculate density per-base with ignore_invalid(): y = np.where(n_bases > 0, x / n_bases, fill) # restore to 1-dimensional if n_bases.ndim > 1: n_bases = n_bases.reshape(-1) return y, n_bases
python
def per_base(x, windows, is_accessible=None, fill=np.nan): """Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based coordinates. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional Use this value where there are no accessible bases in a window. Returns ------- y : ndarray, float, shape (n_windows,) The input array divided by the number of (accessible) bases in each window. n_bases : ndarray, int, shape (n_windows,) The number of (accessible) bases in each window """ # calculate window sizes if is_accessible is None: # N.B., window stops are included n_bases = np.diff(windows, axis=1).reshape(-1) + 1 else: n_bases = np.array([np.count_nonzero(is_accessible[i-1:j]) for i, j in windows]) # deal with multidimensional x if x.ndim == 1: pass elif x.ndim == 2: n_bases = n_bases[:, None] else: raise NotImplementedError('only arrays of 1 or 2 dimensions supported') # calculate density per-base with ignore_invalid(): y = np.where(n_bases > 0, x / n_bases, fill) # restore to 1-dimensional if n_bases.ndim > 1: n_bases = n_bases.reshape(-1) return y, n_bases
Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based coordinates. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional Use this value where there are no accessible bases in a window. Returns ------- y : ndarray, float, shape (n_windows,) The input array divided by the number of (accessible) bases in each window. n_bases : ndarray, int, shape (n_windows,) The number of (accessible) bases in each window
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L379-L431
cggh/scikit-allel
allel/stats/window.py
equally_accessible_windows
def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None): """Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, optional The genome position at which to start. stop : int, optional The genome position at which to stop. step : int, optional The number of accessible sites between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Use half the window size to get half-overlapping windows. Returns ------- windows : ndarray, int, shape (n_windows, 2) Window start/stop positions (1-based). """ pos_accessible, = np.nonzero(is_accessible) pos_accessible += 1 # convert to 1-based coordinates # N.B., need some care in handling start and stop positions, these are # genomic positions at which to start and stop the windows if start: pos_accessible = pos_accessible[pos_accessible >= start] if stop: pos_accessible = pos_accessible[pos_accessible <= stop] # now construct moving windows windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]], size=size, step=step) return windows
python
def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None): """Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, optional The genome position at which to start. stop : int, optional The genome position at which to stop. step : int, optional The number of accessible sites between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Use half the window size to get half-overlapping windows. Returns ------- windows : ndarray, int, shape (n_windows, 2) Window start/stop positions (1-based). """ pos_accessible, = np.nonzero(is_accessible) pos_accessible += 1 # convert to 1-based coordinates # N.B., need some care in handling start and stop positions, these are # genomic positions at which to start and stop the windows if start: pos_accessible = pos_accessible[pos_accessible >= start] if stop: pos_accessible = pos_accessible[pos_accessible <= stop] # now construct moving windows windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]], size=size, step=step) return windows
Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, optional The genome position at which to start. stop : int, optional The genome position at which to stop. step : int, optional The number of accessible sites between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Use half the window size to get half-overlapping windows. Returns ------- windows : ndarray, int, shape (n_windows, 2) Window start/stop positions (1-based).
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/window.py#L434-L472
bartTC/django-attachments
attachments/templatetags/attachments_tags.py
attachment_form
def attachment_form(context, obj): """ Renders a "upload attachment" form. The user must own ``attachments.add_attachment permission`` to add attachments. """ if context['user'].has_perm('attachments.add_attachment'): return { 'form': AttachmentForm(), 'form_url': add_url_for_obj(obj), 'next': context.request.build_absolute_uri(), } else: return {'form': None}
python
def attachment_form(context, obj): """ Renders a "upload attachment" form. The user must own ``attachments.add_attachment permission`` to add attachments. """ if context['user'].has_perm('attachments.add_attachment'): return { 'form': AttachmentForm(), 'form_url': add_url_for_obj(obj), 'next': context.request.build_absolute_uri(), } else: return {'form': None}
Renders a "upload attachment" form. The user must own ``attachments.add_attachment permission`` to add attachments.
https://github.com/bartTC/django-attachments/blob/012b7168f9342e07683a54ceab57696e0072962e/attachments/templatetags/attachments_tags.py#L12-L26
bartTC/django-attachments
attachments/templatetags/attachments_tags.py
attachment_delete_link
def attachment_delete_link(context, attachment): """ Renders a html link to the delete view of the given attachment. Returns no content if the request-user has no permission to delete attachments. The user must own either the ``attachments.delete_attachment`` permission and is the creator of the attachment, that he can delete it or he has ``attachments.delete_foreign_attachments`` which allows him to delete all attachments. """ if context['user'].has_perm('attachments.delete_foreign_attachments') or ( context['user'] == attachment.creator and context['user'].has_perm('attachments.delete_attachment') ): return { 'next': context.request.build_absolute_uri(), 'delete_url': reverse( 'attachments:delete', kwargs={'attachment_pk': attachment.pk} ), } return {'delete_url': None}
python
def attachment_delete_link(context, attachment): """ Renders a html link to the delete view of the given attachment. Returns no content if the request-user has no permission to delete attachments. The user must own either the ``attachments.delete_attachment`` permission and is the creator of the attachment, that he can delete it or he has ``attachments.delete_foreign_attachments`` which allows him to delete all attachments. """ if context['user'].has_perm('attachments.delete_foreign_attachments') or ( context['user'] == attachment.creator and context['user'].has_perm('attachments.delete_attachment') ): return { 'next': context.request.build_absolute_uri(), 'delete_url': reverse( 'attachments:delete', kwargs={'attachment_pk': attachment.pk} ), } return {'delete_url': None}
Renders a html link to the delete view of the given attachment. Returns no content if the request-user has no permission to delete attachments. The user must own either the ``attachments.delete_attachment`` permission and is the creator of the attachment, that he can delete it or he has ``attachments.delete_foreign_attachments`` which allows him to delete all attachments.
https://github.com/bartTC/django-attachments/blob/012b7168f9342e07683a54ceab57696e0072962e/attachments/templatetags/attachments_tags.py#L30-L50
bartTC/django-attachments
attachments/models.py
attachment_upload
def attachment_upload(instance, filename): """Stores the attachment in a "per module/appname/primary key" folder""" return 'attachments/{app}_{model}/{pk}/{filename}'.format( app=instance.content_object._meta.app_label, model=instance.content_object._meta.object_name.lower(), pk=instance.content_object.pk, filename=filename, )
python
def attachment_upload(instance, filename): """Stores the attachment in a "per module/appname/primary key" folder""" return 'attachments/{app}_{model}/{pk}/{filename}'.format( app=instance.content_object._meta.app_label, model=instance.content_object._meta.object_name.lower(), pk=instance.content_object.pk, filename=filename, )
Stores the attachment in a "per module/appname/primary key" folder
https://github.com/bartTC/django-attachments/blob/012b7168f9342e07683a54ceab57696e0072962e/attachments/models.py#L13-L20
csurfer/pyheat
pyheat/pyheat.py
PyHeat.show_heatmap
def show_heatmap(self, blocking=True, output_file=None, enable_scroll=False): """Method to actually display the heatmap created. @param blocking: When set to False makes an unblocking plot show. @param output_file: If not None the heatmap image is output to this file. Supported formats: (eps, pdf, pgf, png, ps, raw, rgba, svg, svgz) @param enable_scroll: Flag used add a scroll bar to scroll long files. """ if output_file is None: if enable_scroll: # Add a new axes which will be used as scroll bar. axpos = plt.axes([0.12, 0.1, 0.625, 0.03]) spos = Slider(axpos, "Scroll", 10, len(self.pyfile.lines)) def update(val): """Method to update position when slider is moved.""" pos = spos.val self.ax.axis([0, 1, pos, pos - 10]) self.fig.canvas.draw_idle() spos.on_changed(update) plt.show(block=blocking) else: plt.savefig(output_file)
python
def show_heatmap(self, blocking=True, output_file=None, enable_scroll=False): """Method to actually display the heatmap created. @param blocking: When set to False makes an unblocking plot show. @param output_file: If not None the heatmap image is output to this file. Supported formats: (eps, pdf, pgf, png, ps, raw, rgba, svg, svgz) @param enable_scroll: Flag used add a scroll bar to scroll long files. """ if output_file is None: if enable_scroll: # Add a new axes which will be used as scroll bar. axpos = plt.axes([0.12, 0.1, 0.625, 0.03]) spos = Slider(axpos, "Scroll", 10, len(self.pyfile.lines)) def update(val): """Method to update position when slider is moved.""" pos = spos.val self.ax.axis([0, 1, pos, pos - 10]) self.fig.canvas.draw_idle() spos.on_changed(update) plt.show(block=blocking) else: plt.savefig(output_file)
Method to actually display the heatmap created. @param blocking: When set to False makes an unblocking plot show. @param output_file: If not None the heatmap image is output to this file. Supported formats: (eps, pdf, pgf, png, ps, raw, rgba, svg, svgz) @param enable_scroll: Flag used add a scroll bar to scroll long files.
https://github.com/csurfer/pyheat/blob/cc0ee3721aea70a1da4918957500131aa7077afe/pyheat/pyheat.py#L53-L77
csurfer/pyheat
pyheat/pyheat.py
PyHeat.__profile_file
def __profile_file(self): """Method used to profile the given file line by line.""" self.line_profiler = pprofile.Profile() self.line_profiler.runfile( open(self.pyfile.path, "r"), {}, self.pyfile.path )
python
def __profile_file(self): """Method used to profile the given file line by line.""" self.line_profiler = pprofile.Profile() self.line_profiler.runfile( open(self.pyfile.path, "r"), {}, self.pyfile.path )
Method used to profile the given file line by line.
https://github.com/csurfer/pyheat/blob/cc0ee3721aea70a1da4918957500131aa7077afe/pyheat/pyheat.py#L83-L88
csurfer/pyheat
pyheat/pyheat.py
PyHeat.__get_line_profile_data
def __get_line_profile_data(self): """Method to procure line profiles. @return: Line profiles if the file has been profiles else empty dictionary. """ if self.line_profiler is None: return {} # the [0] is because pprofile.Profile.file_dict stores the line_dict # in a list so that it can be modified in a thread-safe way # see https://github.com/vpelletier/pprofile/blob/da3d60a1b59a061a0e2113bf768b7cb4bf002ccb/pprofile.py#L398 return self.line_profiler.file_dict[self.pyfile.path][0].line_dict
python
def __get_line_profile_data(self): """Method to procure line profiles. @return: Line profiles if the file has been profiles else empty dictionary. """ if self.line_profiler is None: return {} # the [0] is because pprofile.Profile.file_dict stores the line_dict # in a list so that it can be modified in a thread-safe way # see https://github.com/vpelletier/pprofile/blob/da3d60a1b59a061a0e2113bf768b7cb4bf002ccb/pprofile.py#L398 return self.line_profiler.file_dict[self.pyfile.path][0].line_dict
Method to procure line profiles. @return: Line profiles if the file has been profiles else empty dictionary.
https://github.com/csurfer/pyheat/blob/cc0ee3721aea70a1da4918957500131aa7077afe/pyheat/pyheat.py#L90-L102
csurfer/pyheat
pyheat/pyheat.py
PyHeat.__fetch_heatmap_data_from_profile
def __fetch_heatmap_data_from_profile(self): """Method to create heatmap data from profile information.""" # Read lines from file. with open(self.pyfile.path, "r") as file_to_read: for line in file_to_read: # Remove return char from the end of the line and add a # space in the beginning for better visibility. self.pyfile.lines.append(" " + line.strip("\n")) # Total number of lines in file. self.pyfile.length = len(self.pyfile.lines) # Fetch line profiles. line_profiles = self.__get_line_profile_data() # Creating an array of data points. As the profile keys are 1 indexed # we should range from 1 to line_count + 1 and not 0 to line_count. arr = [] for line_num in range(1, self.pyfile.length + 1): if line_num in line_profiles: # line_profiles[i] will have multiple entries if line i is # invoked from multiple places in the code. Here we sum over # each invocation to get the total time spent on that line. line_times = [ ltime for _, ltime in line_profiles[line_num].values() ] arr.append([sum(line_times)]) else: arr.append([0.0]) # Create nd-array from list of data points. self.pyfile.data = np.array(arr)
python
def __fetch_heatmap_data_from_profile(self): """Method to create heatmap data from profile information.""" # Read lines from file. with open(self.pyfile.path, "r") as file_to_read: for line in file_to_read: # Remove return char from the end of the line and add a # space in the beginning for better visibility. self.pyfile.lines.append(" " + line.strip("\n")) # Total number of lines in file. self.pyfile.length = len(self.pyfile.lines) # Fetch line profiles. line_profiles = self.__get_line_profile_data() # Creating an array of data points. As the profile keys are 1 indexed # we should range from 1 to line_count + 1 and not 0 to line_count. arr = [] for line_num in range(1, self.pyfile.length + 1): if line_num in line_profiles: # line_profiles[i] will have multiple entries if line i is # invoked from multiple places in the code. Here we sum over # each invocation to get the total time spent on that line. line_times = [ ltime for _, ltime in line_profiles[line_num].values() ] arr.append([sum(line_times)]) else: arr.append([0.0]) # Create nd-array from list of data points. self.pyfile.data = np.array(arr)
Method to create heatmap data from profile information.
https://github.com/csurfer/pyheat/blob/cc0ee3721aea70a1da4918957500131aa7077afe/pyheat/pyheat.py#L104-L135
csurfer/pyheat
pyheat/pyheat.py
PyHeat.__create_heatmap_plot
def __create_heatmap_plot(self): """Method to actually create the heatmap from profile stats.""" # Define the heatmap plot. height = len(self.pyfile.lines) / 3 width = max(map(lambda x: len(x), self.pyfile.lines)) / 8 self.fig, self.ax = plt.subplots(figsize=(width, height)) # Set second sub plot to occupy bottom 20% plt.subplots_adjust(bottom=0.20) # Heat scale orange to red heatmap = self.ax.pcolor(self.pyfile.data, cmap="OrRd") # X Axis # Remove X axis. self.ax.xaxis.set_visible(False) # Y Axis # Create lables for y-axis ticks row_labels = range(1, self.pyfile.length + 1) # Set y-tick labels. self.ax.set_yticklabels(row_labels, minor=False) # Put y-axis major ticks at the middle of each cell. self.ax.set_yticks(np.arange(self.pyfile.data.shape[0]) + 0.5, minor=False) # Inver y-axis to have top down line numbers self.ax.invert_yaxis() # Plot definitions # Set plot y-axis label. plt.ylabel("Line Number") # Annotate each cell with lines in file in order. max_time_spent_on_a_line = max(self.pyfile.data) for i, line in enumerate(self.pyfile.lines): # In order to ensure easy readability of the code, we need to # invert colour of text display for darker colours which # correspond to higher amount of time spent on the line. if self.pyfile.data[i] >= 0.7 * max_time_spent_on_a_line: color = (1.0, 1.0, 1.0) # White text else: color = (0.0, 0.0, 0.0) # Black text plt.text( 0.0, i + 0.5, line, ha="left", va="center", color=color, clip_on=True, ) # Define legend cbar = plt.colorbar(heatmap) cbar.set_label("# of seconds")
python
def __create_heatmap_plot(self): """Method to actually create the heatmap from profile stats.""" # Define the heatmap plot. height = len(self.pyfile.lines) / 3 width = max(map(lambda x: len(x), self.pyfile.lines)) / 8 self.fig, self.ax = plt.subplots(figsize=(width, height)) # Set second sub plot to occupy bottom 20% plt.subplots_adjust(bottom=0.20) # Heat scale orange to red heatmap = self.ax.pcolor(self.pyfile.data, cmap="OrRd") # X Axis # Remove X axis. self.ax.xaxis.set_visible(False) # Y Axis # Create lables for y-axis ticks row_labels = range(1, self.pyfile.length + 1) # Set y-tick labels. self.ax.set_yticklabels(row_labels, minor=False) # Put y-axis major ticks at the middle of each cell. self.ax.set_yticks(np.arange(self.pyfile.data.shape[0]) + 0.5, minor=False) # Inver y-axis to have top down line numbers self.ax.invert_yaxis() # Plot definitions # Set plot y-axis label. plt.ylabel("Line Number") # Annotate each cell with lines in file in order. max_time_spent_on_a_line = max(self.pyfile.data) for i, line in enumerate(self.pyfile.lines): # In order to ensure easy readability of the code, we need to # invert colour of text display for darker colours which # correspond to higher amount of time spent on the line. if self.pyfile.data[i] >= 0.7 * max_time_spent_on_a_line: color = (1.0, 1.0, 1.0) # White text else: color = (0.0, 0.0, 0.0) # Black text plt.text( 0.0, i + 0.5, line, ha="left", va="center", color=color, clip_on=True, ) # Define legend cbar = plt.colorbar(heatmap) cbar.set_label("# of seconds")
Method to actually create the heatmap from profile stats.
https://github.com/csurfer/pyheat/blob/cc0ee3721aea70a1da4918957500131aa7077afe/pyheat/pyheat.py#L137-L189
csurfer/pyheat
pyheat/commandline.py
main
def main(): """Starting point for the program execution.""" # Create command line parser. parser = argparse.ArgumentParser() # Adding command line arguments. parser.add_argument("-o", "--out", help="Output file", default=None) parser.add_argument( "pyfile", help="Python file to be profiled", default=None ) # Parse command line arguments. arguments = parser.parse_args() if arguments.pyfile is not None: # Core functionality. pyheat = PyHeat(arguments.pyfile) pyheat.create_heatmap() pyheat.show_heatmap(output_file=arguments.out, enable_scroll=True) pyheat.close_heatmap() else: # Print command help parser.print_help()
python
def main(): """Starting point for the program execution.""" # Create command line parser. parser = argparse.ArgumentParser() # Adding command line arguments. parser.add_argument("-o", "--out", help="Output file", default=None) parser.add_argument( "pyfile", help="Python file to be profiled", default=None ) # Parse command line arguments. arguments = parser.parse_args() if arguments.pyfile is not None: # Core functionality. pyheat = PyHeat(arguments.pyfile) pyheat.create_heatmap() pyheat.show_heatmap(output_file=arguments.out, enable_scroll=True) pyheat.close_heatmap() else: # Print command help parser.print_help()
Starting point for the program execution.
https://github.com/csurfer/pyheat/blob/cc0ee3721aea70a1da4918957500131aa7077afe/pyheat/commandline.py#L31-L50
limist/py-moneyed
moneyed/classes.py
Money.round
def round(self, ndigits=0): """ Rounds the amount using the current ``Decimal`` rounding algorithm. """ if ndigits is None: ndigits = 0 return self.__class__( amount=self.amount.quantize(Decimal('1e' + str(-ndigits))), currency=self.currency)
python
def round(self, ndigits=0): """ Rounds the amount using the current ``Decimal`` rounding algorithm. """ if ndigits is None: ndigits = 0 return self.__class__( amount=self.amount.quantize(Decimal('1e' + str(-ndigits))), currency=self.currency)
Rounds the amount using the current ``Decimal`` rounding algorithm.
https://github.com/limist/py-moneyed/blob/1822e9f77edc6608b429e54c8831b873af9a4de6/moneyed/classes.py#L158-L166
klen/muffin
muffin/manage.py
run
def run(): """CLI endpoint.""" sys.path.insert(0, os.getcwd()) logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler()]) parser = argparse.ArgumentParser(description="Manage Application", add_help=False) parser.add_argument('app', metavar='app', type=str, help='Application module path') parser.add_argument('--config', type=str, help='Path to configuration.') parser.add_argument('--version', action="version", version=__version__) args_, subargs_ = parser.parse_known_args(sys.argv[1:]) if args_.config: os.environ[CONFIGURATION_ENVIRON_VARIABLE] = args_.config from gunicorn.util import import_app app_uri = args_.app if ':' not in app_uri: app_uri += ':app' try: app = import_app(app_uri) app.uri = app_uri app.logger.info('Application is loaded: %s' % app.name) except Exception as exc: logging.exception(exc) raise sys.exit(1) app.manage(*subargs_, prog='muffin %s' % args_.app)
python
def run(): """CLI endpoint.""" sys.path.insert(0, os.getcwd()) logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler()]) parser = argparse.ArgumentParser(description="Manage Application", add_help=False) parser.add_argument('app', metavar='app', type=str, help='Application module path') parser.add_argument('--config', type=str, help='Path to configuration.') parser.add_argument('--version', action="version", version=__version__) args_, subargs_ = parser.parse_known_args(sys.argv[1:]) if args_.config: os.environ[CONFIGURATION_ENVIRON_VARIABLE] = args_.config from gunicorn.util import import_app app_uri = args_.app if ':' not in app_uri: app_uri += ':app' try: app = import_app(app_uri) app.uri = app_uri app.logger.info('Application is loaded: %s' % app.name) except Exception as exc: logging.exception(exc) raise sys.exit(1) app.manage(*subargs_, prog='muffin %s' % args_.app)
CLI endpoint.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/manage.py#L250-L278
klen/muffin
muffin/manage.py
Manager.command
def command(self, init=False): """Define CLI command.""" def wrapper(func): header = '\n'.join([s for s in (func.__doc__ or '').split('\n') if not s.strip().startswith(':')]) parser = self.parsers.add_parser(func.__name__, description=header) args, vargs, kw, defs, kwargs, kwdefs, anns = inspect.getfullargspec(func) defs = defs or [] kwargs_ = dict(zip(args[-len(defs):], defs)) docs = dict(PARAM_RE.findall(func.__doc__ or "")) def process_arg(name, *, value=..., **opts): argname = name.replace('_', '-').lower() arghelp = docs.get(vargs, '') if value is ...: return parser.add_argument(argname, help=arghelp, **opts) if isinstance(value, bool): if value: return parser.add_argument( "--no-" + argname, dest=name, action="store_false", help="Disable %s" % (arghelp or name).lower()) return parser.add_argument( "--" + argname, dest=name, action="store_true", help="Enable %s" % (arghelp or name).lower()) if isinstance(value, list): return parser.add_argument( "--" + argname, action="append", default=value, help=arghelp) return parser.add_argument( "--" + argname, type=anns.get(name, type(value)), default=value, help=arghelp + ' [%s]' % repr(value)) if vargs: process_arg('*', nargs="*", metavar=vargs) for name, value in (kwdefs or {}).items(): process_arg(name, value=value) for name in args: process_arg(name, value=kwargs_.get(name, ...)) self.handlers[func.__name__] = func func.parser = parser return func if callable(init): init.__init__ = True return wrapper(init) def decorator(func): func.__init__ = bool(init) return wrapper(func) return decorator
python
def command(self, init=False): """Define CLI command.""" def wrapper(func): header = '\n'.join([s for s in (func.__doc__ or '').split('\n') if not s.strip().startswith(':')]) parser = self.parsers.add_parser(func.__name__, description=header) args, vargs, kw, defs, kwargs, kwdefs, anns = inspect.getfullargspec(func) defs = defs or [] kwargs_ = dict(zip(args[-len(defs):], defs)) docs = dict(PARAM_RE.findall(func.__doc__ or "")) def process_arg(name, *, value=..., **opts): argname = name.replace('_', '-').lower() arghelp = docs.get(vargs, '') if value is ...: return parser.add_argument(argname, help=arghelp, **opts) if isinstance(value, bool): if value: return parser.add_argument( "--no-" + argname, dest=name, action="store_false", help="Disable %s" % (arghelp or name).lower()) return parser.add_argument( "--" + argname, dest=name, action="store_true", help="Enable %s" % (arghelp or name).lower()) if isinstance(value, list): return parser.add_argument( "--" + argname, action="append", default=value, help=arghelp) return parser.add_argument( "--" + argname, type=anns.get(name, type(value)), default=value, help=arghelp + ' [%s]' % repr(value)) if vargs: process_arg('*', nargs="*", metavar=vargs) for name, value in (kwdefs or {}).items(): process_arg(name, value=value) for name in args: process_arg(name, value=kwargs_.get(name, ...)) self.handlers[func.__name__] = func func.parser = parser return func if callable(init): init.__init__ = True return wrapper(init) def decorator(func): func.__init__ = bool(init) return wrapper(func) return decorator
Define CLI command.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/manage.py#L145-L202
klen/muffin
muffin/urls.py
routes_register
def routes_register(app, handler, *paths, methods=None, router=None, name=None): """Register routes.""" if router is None: router = app.router handler = to_coroutine(handler) resources = [] for path in paths: # Register any exception to app if isinstance(path, type) and issubclass(path, BaseException): app._error_handlers[path] = handler continue # Ensure that names are unique name = str(name or '') rname, rnum = name, 2 while rname in router: rname = "%s%d" % (name, rnum) rnum += 1 path = parse(path) if isinstance(path, RETYPE): resource = RawReResource(path, name=rname) router.register_resource(resource) else: resource = router.add_resource(path, name=rname) for method in methods or [METH_ANY]: method = method.upper() resource.add_route(method, handler) resources.append(resource) return resources
python
def routes_register(app, handler, *paths, methods=None, router=None, name=None): """Register routes.""" if router is None: router = app.router handler = to_coroutine(handler) resources = [] for path in paths: # Register any exception to app if isinstance(path, type) and issubclass(path, BaseException): app._error_handlers[path] = handler continue # Ensure that names are unique name = str(name or '') rname, rnum = name, 2 while rname in router: rname = "%s%d" % (name, rnum) rnum += 1 path = parse(path) if isinstance(path, RETYPE): resource = RawReResource(path, name=rname) router.register_resource(resource) else: resource = router.add_resource(path, name=rname) for method in methods or [METH_ANY]: method = method.upper() resource.add_route(method, handler) resources.append(resource) return resources
Register routes.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L95-L132
klen/muffin
muffin/urls.py
parse
def parse(path): """Parse URL path and convert it to regexp if needed.""" parsed = re.sre_parse.parse(path) for case, _ in parsed: if case not in (re.sre_parse.LITERAL, re.sre_parse.ANY): break else: return path path = path.strip('^$') def parse_(match): [part] = match.groups() match = DYNR_RE.match(part) params = match.groupdict() return '(?P<%s>%s)' % (params['var'], params['re'] or '[^{}/]+') return re.compile('^%s$' % DYNS_RE.sub(parse_, path))
python
def parse(path): """Parse URL path and convert it to regexp if needed.""" parsed = re.sre_parse.parse(path) for case, _ in parsed: if case not in (re.sre_parse.LITERAL, re.sre_parse.ANY): break else: return path path = path.strip('^$') def parse_(match): [part] = match.groups() match = DYNR_RE.match(part) params = match.groupdict() return '(?P<%s>%s)' % (params['var'], params['re'] or '[^{}/]+') return re.compile('^%s$' % DYNS_RE.sub(parse_, path))
Parse URL path and convert it to regexp if needed.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L135-L152
klen/muffin
muffin/urls.py
RawReResource.url_for
def url_for(self, *subgroups, **groups): """Build URL.""" parsed = re.sre_parse.parse(self._pattern.pattern) subgroups = {n:str(v) for n, v in enumerate(subgroups, 1)} groups_ = dict(parsed.pattern.groupdict) subgroups.update({ groups_[k0]: str(v0) for k0, v0 in groups.items() if k0 in groups_ }) path = ''.join(str(val) for val in Traverser(parsed, subgroups)) return URL.build(path=path, encoded=True)
python
def url_for(self, *subgroups, **groups): """Build URL.""" parsed = re.sre_parse.parse(self._pattern.pattern) subgroups = {n:str(v) for n, v in enumerate(subgroups, 1)} groups_ = dict(parsed.pattern.groupdict) subgroups.update({ groups_[k0]: str(v0) for k0, v0 in groups.items() if k0 in groups_ }) path = ''.join(str(val) for val in Traverser(parsed, subgroups)) return URL.build(path=path, encoded=True)
Build URL.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L38-L49
klen/muffin
muffin/urls.py
Traverser.state_not_literal
def state_not_literal(self, value): """Parse not literal.""" value = negate = chr(value) while value == negate: value = choice(self.literals) yield value
python
def state_not_literal(self, value): """Parse not literal.""" value = negate = chr(value) while value == negate: value = choice(self.literals) yield value
Parse not literal.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L185-L190
klen/muffin
muffin/urls.py
Traverser.state_max_repeat
def state_max_repeat(self, value): """Parse repeatable parts.""" min_, max_, value = value value = [val for val in Traverser(value, self.groups)] if not min_ and max_: for val in value: if isinstance(val, required): min_ = 1 break for val in value * min_: yield val
python
def state_max_repeat(self, value): """Parse repeatable parts.""" min_, max_, value = value value = [val for val in Traverser(value, self.groups)] if not min_ and max_: for val in value: if isinstance(val, required): min_ = 1 break for val in value * min_: yield val
Parse repeatable parts.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L192-L203
klen/muffin
muffin/urls.py
Traverser.state_in
def state_in(self, value): """Parse ranges.""" value = [val for val in Traverser(value, self.groups)] if not value or not value[0]: for val in self.literals - set(value): return (yield val) yield value[0]
python
def state_in(self, value): """Parse ranges.""" value = [val for val in Traverser(value, self.groups)] if not value or not value[0]: for val in self.literals - set(value): return (yield val) yield value[0]
Parse ranges.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L207-L213
klen/muffin
muffin/urls.py
Traverser.state_category
def state_category(value): """Parse categories.""" if value == re.sre_parse.CATEGORY_DIGIT: return (yield '0') if value == re.sre_parse.CATEGORY_WORD: return (yield 'x')
python
def state_category(value): """Parse categories.""" if value == re.sre_parse.CATEGORY_DIGIT: return (yield '0') if value == re.sre_parse.CATEGORY_WORD: return (yield 'x')
Parse categories.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L218-L224
klen/muffin
muffin/urls.py
Traverser.state_subpattern
def state_subpattern(self, value): """Parse subpatterns.""" num, *_, parsed = value if num in self.groups: return (yield required(self.groups[num])) yield from Traverser(parsed, groups=self.groups)
python
def state_subpattern(self, value): """Parse subpatterns.""" num, *_, parsed = value if num in self.groups: return (yield required(self.groups[num])) yield from Traverser(parsed, groups=self.groups)
Parse subpatterns.
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/urls.py#L226-L232