repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
nwilming/ocupy
ocupy/simulator.py
makeHist
def makeHist(x_val, y_val, fit=spline_base.fit2d, bins=[np.linspace(-36.5,36.5,74),np.linspace(-180,180,361)]): """ Constructs a (fitted) histogram of the given data. Parameters: x_val : array The data to be histogrammed along the x-axis. y_val : array The data to be histogrammed along the y-axis. fit : function or None, optional The function to use in order to fit the data. If no fit should be applied, set to None bins : touple of arrays, giving the bin edges to be used in the histogram. (First value: y-axis, Second value: x-axis) """ y_val = y_val[~np.isnan(y_val)] x_val = x_val[~np.isnan(x_val)] samples = list(zip(y_val, x_val)) K, xedges, yedges = np.histogram2d(y_val, x_val, bins=bins) if (fit is None): return K/ K.sum() # Check if given attr is a function elif hasattr(fit, '__call__'): H = fit(np.array(samples), bins[0], bins[1], p_est=K)[0] return H/H.sum() else: raise TypeError("Not a valid argument, insert spline function or None")
python
def makeHist(x_val, y_val, fit=spline_base.fit2d, bins=[np.linspace(-36.5,36.5,74),np.linspace(-180,180,361)]): """ Constructs a (fitted) histogram of the given data. Parameters: x_val : array The data to be histogrammed along the x-axis. y_val : array The data to be histogrammed along the y-axis. fit : function or None, optional The function to use in order to fit the data. If no fit should be applied, set to None bins : touple of arrays, giving the bin edges to be used in the histogram. (First value: y-axis, Second value: x-axis) """ y_val = y_val[~np.isnan(y_val)] x_val = x_val[~np.isnan(x_val)] samples = list(zip(y_val, x_val)) K, xedges, yedges = np.histogram2d(y_val, x_val, bins=bins) if (fit is None): return K/ K.sum() # Check if given attr is a function elif hasattr(fit, '__call__'): H = fit(np.array(samples), bins[0], bins[1], p_est=K)[0] return H/H.sum() else: raise TypeError("Not a valid argument, insert spline function or None")
[ "def", "makeHist", "(", "x_val", ",", "y_val", ",", "fit", "=", "spline_base", ".", "fit2d", ",", "bins", "=", "[", "np", ".", "linspace", "(", "-", "36.5", ",", "36.5", ",", "74", ")", ",", "np", ".", "linspace", "(", "-", "180", ",", "180", ",", "361", ")", "]", ")", ":", "y_val", "=", "y_val", "[", "~", "np", ".", "isnan", "(", "y_val", ")", "]", "x_val", "=", "x_val", "[", "~", "np", ".", "isnan", "(", "x_val", ")", "]", "samples", "=", "list", "(", "zip", "(", "y_val", ",", "x_val", ")", ")", "K", ",", "xedges", ",", "yedges", "=", "np", ".", "histogram2d", "(", "y_val", ",", "x_val", ",", "bins", "=", "bins", ")", "if", "(", "fit", "is", "None", ")", ":", "return", "K", "/", "K", ".", "sum", "(", ")", "# Check if given attr is a function", "elif", "hasattr", "(", "fit", ",", "'__call__'", ")", ":", "H", "=", "fit", "(", "np", ".", "array", "(", "samples", ")", ",", "bins", "[", "0", "]", ",", "bins", "[", "1", "]", ",", "p_est", "=", "K", ")", "[", "0", "]", "return", "H", "/", "H", ".", "sum", "(", ")", "else", ":", "raise", "TypeError", "(", "\"Not a valid argument, insert spline function or None\"", ")" ]
Constructs a (fitted) histogram of the given data. Parameters: x_val : array The data to be histogrammed along the x-axis. y_val : array The data to be histogrammed along the y-axis. fit : function or None, optional The function to use in order to fit the data. If no fit should be applied, set to None bins : touple of arrays, giving the bin edges to be used in the histogram. (First value: y-axis, Second value: x-axis)
[ "Constructs", "a", "(", "fitted", ")", "histogram", "of", "the", "given", "data", ".", "Parameters", ":", "x_val", ":", "array", "The", "data", "to", "be", "histogrammed", "along", "the", "x", "-", "axis", ".", "y_val", ":", "array", "The", "data", "to", "be", "histogrammed", "along", "the", "y", "-", "axis", ".", "fit", ":", "function", "or", "None", "optional", "The", "function", "to", "use", "in", "order", "to", "fit", "the", "data", ".", "If", "no", "fit", "should", "be", "applied", "set", "to", "None", "bins", ":", "touple", "of", "arrays", "giving", "the", "bin", "edges", "to", "be", "used", "in", "the", "histogram", ".", "(", "First", "value", ":", "y", "-", "axis", "Second", "value", ":", "x", "-", "axis", ")" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L374-L405
nwilming/ocupy
ocupy/simulator.py
firstSacDist
def firstSacDist(fm): """ Computes the distribution of angle and length combinations that were made as first saccades Parameters: fm : ocupy.fixmat The fixation data to be analysed """ ang, leng, ad, ld = anglendiff(fm, return_abs=True) y_arg = leng[0][np.roll(fm.fix == min(fm.fix), 1)]/fm.pixels_per_degree x_arg = reshift(ang[0][np.roll(fm.fix == min(fm.fix), 1)]) bins = [list(range(int(ceil(np.nanmax(y_arg)))+1)), np.linspace(-180, 180, 361)] return makeHist(x_arg, y_arg, fit=None, bins = bins)
python
def firstSacDist(fm): """ Computes the distribution of angle and length combinations that were made as first saccades Parameters: fm : ocupy.fixmat The fixation data to be analysed """ ang, leng, ad, ld = anglendiff(fm, return_abs=True) y_arg = leng[0][np.roll(fm.fix == min(fm.fix), 1)]/fm.pixels_per_degree x_arg = reshift(ang[0][np.roll(fm.fix == min(fm.fix), 1)]) bins = [list(range(int(ceil(np.nanmax(y_arg)))+1)), np.linspace(-180, 180, 361)] return makeHist(x_arg, y_arg, fit=None, bins = bins)
[ "def", "firstSacDist", "(", "fm", ")", ":", "ang", ",", "leng", ",", "ad", ",", "ld", "=", "anglendiff", "(", "fm", ",", "return_abs", "=", "True", ")", "y_arg", "=", "leng", "[", "0", "]", "[", "np", ".", "roll", "(", "fm", ".", "fix", "==", "min", "(", "fm", ".", "fix", ")", ",", "1", ")", "]", "/", "fm", ".", "pixels_per_degree", "x_arg", "=", "reshift", "(", "ang", "[", "0", "]", "[", "np", ".", "roll", "(", "fm", ".", "fix", "==", "min", "(", "fm", ".", "fix", ")", ",", "1", ")", "]", ")", "bins", "=", "[", "list", "(", "range", "(", "int", "(", "ceil", "(", "np", ".", "nanmax", "(", "y_arg", ")", ")", ")", "+", "1", ")", ")", ",", "np", ".", "linspace", "(", "-", "180", ",", "180", ",", "361", ")", "]", "return", "makeHist", "(", "x_arg", ",", "y_arg", ",", "fit", "=", "None", ",", "bins", "=", "bins", ")" ]
Computes the distribution of angle and length combinations that were made as first saccades Parameters: fm : ocupy.fixmat The fixation data to be analysed
[ "Computes", "the", "distribution", "of", "angle", "and", "length", "combinations", "that", "were", "made", "as", "first", "saccades", "Parameters", ":", "fm", ":", "ocupy", ".", "fixmat", "The", "fixation", "data", "to", "be", "analysed" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L509-L523
nwilming/ocupy
ocupy/simulator.py
trajLenDist
def trajLenDist(fm): """ Computes the distribution of trajectory lengths, i.e. the number of saccades that were made as a part of one trajectory Parameters: fm : ocupy.fixmat The fixation data to be analysed """ trajLen = np.roll(fm.fix, 1)[fm.fix == min(fm.fix)] val, borders = np.histogram(trajLen, bins=np.linspace(-0.5, max(trajLen)+0.5, max(trajLen)+2)) cumsum = np.cumsum(val.astype(float) / val.sum()) return cumsum, borders
python
def trajLenDist(fm): """ Computes the distribution of trajectory lengths, i.e. the number of saccades that were made as a part of one trajectory Parameters: fm : ocupy.fixmat The fixation data to be analysed """ trajLen = np.roll(fm.fix, 1)[fm.fix == min(fm.fix)] val, borders = np.histogram(trajLen, bins=np.linspace(-0.5, max(trajLen)+0.5, max(trajLen)+2)) cumsum = np.cumsum(val.astype(float) / val.sum()) return cumsum, borders
[ "def", "trajLenDist", "(", "fm", ")", ":", "trajLen", "=", "np", ".", "roll", "(", "fm", ".", "fix", ",", "1", ")", "[", "fm", ".", "fix", "==", "min", "(", "fm", ".", "fix", ")", "]", "val", ",", "borders", "=", "np", ".", "histogram", "(", "trajLen", ",", "bins", "=", "np", ".", "linspace", "(", "-", "0.5", ",", "max", "(", "trajLen", ")", "+", "0.5", ",", "max", "(", "trajLen", ")", "+", "2", ")", ")", "cumsum", "=", "np", ".", "cumsum", "(", "val", ".", "astype", "(", "float", ")", "/", "val", ".", "sum", "(", ")", ")", "return", "cumsum", ",", "borders" ]
Computes the distribution of trajectory lengths, i.e. the number of saccades that were made as a part of one trajectory Parameters: fm : ocupy.fixmat The fixation data to be analysed
[ "Computes", "the", "distribution", "of", "trajectory", "lengths", "i", ".", "e", ".", "the", "number", "of", "saccades", "that", "were", "made", "as", "a", "part", "of", "one", "trajectory", "Parameters", ":", "fm", ":", "ocupy", ".", "fixmat", "The", "fixation", "data", "to", "be", "analysed" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L525-L539
nwilming/ocupy
ocupy/simulator.py
reshift
def reshift(I): """ Transforms the given number element into a range of [-180, 180], which covers all possible angle differences. This method reshifts larger or smaller numbers that might be the output of other angular calculations into that range by adding or subtracting 360, respectively. To make sure that angular data ranges between -180 and 180 in order to be properly histogrammed, apply this method first. Parameters: I : array or list or int or float Number or numbers that shall be reshifted. Farell, Ludwig, Ellis, and Gilchrist Returns: numpy.ndarray : Reshifted number or numbers as array """ # Output -180 to +180 if type(I)==list: I = np.array(I) return ((I-180)%360)-180
python
def reshift(I): """ Transforms the given number element into a range of [-180, 180], which covers all possible angle differences. This method reshifts larger or smaller numbers that might be the output of other angular calculations into that range by adding or subtracting 360, respectively. To make sure that angular data ranges between -180 and 180 in order to be properly histogrammed, apply this method first. Parameters: I : array or list or int or float Number or numbers that shall be reshifted. Farell, Ludwig, Ellis, and Gilchrist Returns: numpy.ndarray : Reshifted number or numbers as array """ # Output -180 to +180 if type(I)==list: I = np.array(I) return ((I-180)%360)-180
[ "def", "reshift", "(", "I", ")", ":", "# Output -180 to +180", "if", "type", "(", "I", ")", "==", "list", ":", "I", "=", "np", ".", "array", "(", "I", ")", "return", "(", "(", "I", "-", "180", ")", "%", "360", ")", "-", "180" ]
Transforms the given number element into a range of [-180, 180], which covers all possible angle differences. This method reshifts larger or smaller numbers that might be the output of other angular calculations into that range by adding or subtracting 360, respectively. To make sure that angular data ranges between -180 and 180 in order to be properly histogrammed, apply this method first. Parameters: I : array or list or int or float Number or numbers that shall be reshifted. Farell, Ludwig, Ellis, and Gilchrist Returns: numpy.ndarray : Reshifted number or numbers as array
[ "Transforms", "the", "given", "number", "element", "into", "a", "range", "of", "[", "-", "180", "180", "]", "which", "covers", "all", "possible", "angle", "differences", ".", "This", "method", "reshifts", "larger", "or", "smaller", "numbers", "that", "might", "be", "the", "output", "of", "other", "angular", "calculations", "into", "that", "range", "by", "adding", "or", "subtracting", "360", "respectively", ".", "To", "make", "sure", "that", "angular", "data", "ranges", "between", "-", "180", "and", "180", "in", "order", "to", "be", "properly", "histogrammed", "apply", "this", "method", "first", ".", "Parameters", ":", "I", ":", "array", "or", "list", "or", "int", "or", "float", "Number", "or", "numbers", "that", "shall", "be", "reshifted", ".", "Farell", "Ludwig", "Ellis", "and", "Gilchrist", "Returns", ":", "numpy", ".", "ndarray", ":", "Reshifted", "number", "or", "numbers", "as", "array" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L541-L562
nwilming/ocupy
ocupy/simulator.py
FixGen.initializeData
def initializeData(self, fit = None, full_H1=None, max_length = 40, in_deg = True): """ Prepares the data to be replicated. Calculates the second-order length and angle dependencies between saccades and stores them in a fitted histogram. Parameters: fit : function, optional The method to use for fitting the histogram full_H1 : twodimensional numpy.ndarray, optional Where applicable, the distribution of angle and length differences to replicate with dimensions [73,361] """ a, l, ad, ld = anglendiff(self.fm, roll=1, return_abs = True) if in_deg: self.fm.pixels_per_degree = 1 samples = np.zeros([3, len(l[0])]) samples[0] = l[0]/self.fm.pixels_per_degree samples[1] = np.roll(l[0]/self.fm.pixels_per_degree,-1) samples[2] = np.roll(reshift(ad[0]),-1) z = np.any(np.isnan(samples), axis=0) samples = samples[:,~np.isnan(samples).any(0)] if full_H1 is None: self.full_H1 = [] for i in range(1, int(ceil(max_length+1))): idx = np.logical_and(samples[0]<=i, samples[0]>i-1) if idx.any(): self.full_H1.append(makeHist(samples[2][idx], samples[1][idx], fit=fit, bins=[np.linspace(0,max_length-1,max_length),np.linspace(-180,180,361)])) # Sometimes if there's only one sample present there seems to occur a problem # with histogram calculation and the hist is filled with nans. In this case, dismiss # the hist. if np.isnan(self.full_H1[-1]).any(): self.full_H1[-1] = np.array([]) self.nosamples.append(len(samples[2][idx])) else: self.full_H1.append(np.array([])) self.nosamples.append(0) else: self.full_H1 = full_H1 self.firstLenAng_cumsum, self.firstLenAng_shape = ( compute_cumsum(firstSacDist(self.fm))) self.probability_cumsum = [] for i in range(len(self.full_H1)): if self.full_H1[i] == []: self.probability_cumsum.append(np.array([])) else: self.probability_cumsum.append(np.cumsum(self.full_H1[i].flat)) self.trajLen_cumsum, self.trajLen_borders = trajLenDist(self.fm) min_distance = 1/np.array([min((np.unique(self.probability_cumsum[i]) \ -np.roll(np.unique(self.probability_cumsum[i]),1))[1:]) \ for i in range(len(self.probability_cumsum))]) # Set a minimal resolution min_distance[min_distance<10] = 10 self.linind = {} for i in range(len(self.probability_cumsum)): self.linind['self.probability_cumsum '+repr(i)] = np.linspace(0,1,min_distance[i])[0:-1] for elem in [self.firstLenAng_cumsum, self.trajLen_cumsum]: self.linind[elem] = np.linspace(0, 1, 1/min((np.unique((elem))-np.roll(np.unique((elem)),1))[1:]))[0:-1]
python
def initializeData(self, fit = None, full_H1=None, max_length = 40, in_deg = True): """ Prepares the data to be replicated. Calculates the second-order length and angle dependencies between saccades and stores them in a fitted histogram. Parameters: fit : function, optional The method to use for fitting the histogram full_H1 : twodimensional numpy.ndarray, optional Where applicable, the distribution of angle and length differences to replicate with dimensions [73,361] """ a, l, ad, ld = anglendiff(self.fm, roll=1, return_abs = True) if in_deg: self.fm.pixels_per_degree = 1 samples = np.zeros([3, len(l[0])]) samples[0] = l[0]/self.fm.pixels_per_degree samples[1] = np.roll(l[0]/self.fm.pixels_per_degree,-1) samples[2] = np.roll(reshift(ad[0]),-1) z = np.any(np.isnan(samples), axis=0) samples = samples[:,~np.isnan(samples).any(0)] if full_H1 is None: self.full_H1 = [] for i in range(1, int(ceil(max_length+1))): idx = np.logical_and(samples[0]<=i, samples[0]>i-1) if idx.any(): self.full_H1.append(makeHist(samples[2][idx], samples[1][idx], fit=fit, bins=[np.linspace(0,max_length-1,max_length),np.linspace(-180,180,361)])) # Sometimes if there's only one sample present there seems to occur a problem # with histogram calculation and the hist is filled with nans. In this case, dismiss # the hist. if np.isnan(self.full_H1[-1]).any(): self.full_H1[-1] = np.array([]) self.nosamples.append(len(samples[2][idx])) else: self.full_H1.append(np.array([])) self.nosamples.append(0) else: self.full_H1 = full_H1 self.firstLenAng_cumsum, self.firstLenAng_shape = ( compute_cumsum(firstSacDist(self.fm))) self.probability_cumsum = [] for i in range(len(self.full_H1)): if self.full_H1[i] == []: self.probability_cumsum.append(np.array([])) else: self.probability_cumsum.append(np.cumsum(self.full_H1[i].flat)) self.trajLen_cumsum, self.trajLen_borders = trajLenDist(self.fm) min_distance = 1/np.array([min((np.unique(self.probability_cumsum[i]) \ -np.roll(np.unique(self.probability_cumsum[i]),1))[1:]) \ for i in range(len(self.probability_cumsum))]) # Set a minimal resolution min_distance[min_distance<10] = 10 self.linind = {} for i in range(len(self.probability_cumsum)): self.linind['self.probability_cumsum '+repr(i)] = np.linspace(0,1,min_distance[i])[0:-1] for elem in [self.firstLenAng_cumsum, self.trajLen_cumsum]: self.linind[elem] = np.linspace(0, 1, 1/min((np.unique((elem))-np.roll(np.unique((elem)),1))[1:]))[0:-1]
[ "def", "initializeData", "(", "self", ",", "fit", "=", "None", ",", "full_H1", "=", "None", ",", "max_length", "=", "40", ",", "in_deg", "=", "True", ")", ":", "a", ",", "l", ",", "ad", ",", "ld", "=", "anglendiff", "(", "self", ".", "fm", ",", "roll", "=", "1", ",", "return_abs", "=", "True", ")", "if", "in_deg", ":", "self", ".", "fm", ".", "pixels_per_degree", "=", "1", "samples", "=", "np", ".", "zeros", "(", "[", "3", ",", "len", "(", "l", "[", "0", "]", ")", "]", ")", "samples", "[", "0", "]", "=", "l", "[", "0", "]", "/", "self", ".", "fm", ".", "pixels_per_degree", "samples", "[", "1", "]", "=", "np", ".", "roll", "(", "l", "[", "0", "]", "/", "self", ".", "fm", ".", "pixels_per_degree", ",", "-", "1", ")", "samples", "[", "2", "]", "=", "np", ".", "roll", "(", "reshift", "(", "ad", "[", "0", "]", ")", ",", "-", "1", ")", "z", "=", "np", ".", "any", "(", "np", ".", "isnan", "(", "samples", ")", ",", "axis", "=", "0", ")", "samples", "=", "samples", "[", ":", ",", "~", "np", ".", "isnan", "(", "samples", ")", ".", "any", "(", "0", ")", "]", "if", "full_H1", "is", "None", ":", "self", ".", "full_H1", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "int", "(", "ceil", "(", "max_length", "+", "1", ")", ")", ")", ":", "idx", "=", "np", ".", "logical_and", "(", "samples", "[", "0", "]", "<=", "i", ",", "samples", "[", "0", "]", ">", "i", "-", "1", ")", "if", "idx", ".", "any", "(", ")", ":", "self", ".", "full_H1", ".", "append", "(", "makeHist", "(", "samples", "[", "2", "]", "[", "idx", "]", ",", "samples", "[", "1", "]", "[", "idx", "]", ",", "fit", "=", "fit", ",", "bins", "=", "[", "np", ".", "linspace", "(", "0", ",", "max_length", "-", "1", ",", "max_length", ")", ",", "np", ".", "linspace", "(", "-", "180", ",", "180", ",", "361", ")", "]", ")", ")", "# Sometimes if there's only one sample present there seems to occur a problem", "# with histogram calculation and the hist is filled with nans. In this case, dismiss", "# the hist.", "if", "np", ".", "isnan", "(", "self", ".", "full_H1", "[", "-", "1", "]", ")", ".", "any", "(", ")", ":", "self", ".", "full_H1", "[", "-", "1", "]", "=", "np", ".", "array", "(", "[", "]", ")", "self", ".", "nosamples", ".", "append", "(", "len", "(", "samples", "[", "2", "]", "[", "idx", "]", ")", ")", "else", ":", "self", ".", "full_H1", ".", "append", "(", "np", ".", "array", "(", "[", "]", ")", ")", "self", ".", "nosamples", ".", "append", "(", "0", ")", "else", ":", "self", ".", "full_H1", "=", "full_H1", "self", ".", "firstLenAng_cumsum", ",", "self", ".", "firstLenAng_shape", "=", "(", "compute_cumsum", "(", "firstSacDist", "(", "self", ".", "fm", ")", ")", ")", "self", ".", "probability_cumsum", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "full_H1", ")", ")", ":", "if", "self", ".", "full_H1", "[", "i", "]", "==", "[", "]", ":", "self", ".", "probability_cumsum", ".", "append", "(", "np", ".", "array", "(", "[", "]", ")", ")", "else", ":", "self", ".", "probability_cumsum", ".", "append", "(", "np", ".", "cumsum", "(", "self", ".", "full_H1", "[", "i", "]", ".", "flat", ")", ")", "self", ".", "trajLen_cumsum", ",", "self", ".", "trajLen_borders", "=", "trajLenDist", "(", "self", ".", "fm", ")", "min_distance", "=", "1", "/", "np", ".", "array", "(", "[", "min", "(", "(", "np", ".", "unique", "(", "self", ".", "probability_cumsum", "[", "i", "]", ")", "-", "np", ".", "roll", "(", "np", ".", "unique", "(", "self", ".", "probability_cumsum", "[", "i", "]", ")", ",", "1", ")", ")", "[", "1", ":", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "probability_cumsum", ")", ")", "]", ")", "# Set a minimal resolution", "min_distance", "[", "min_distance", "<", "10", "]", "=", "10", "self", ".", "linind", "=", "{", "}", "for", "i", "in", "range", "(", "len", "(", "self", ".", "probability_cumsum", ")", ")", ":", "self", ".", "linind", "[", "'self.probability_cumsum '", "+", "repr", "(", "i", ")", "]", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "min_distance", "[", "i", "]", ")", "[", "0", ":", "-", "1", "]", "for", "elem", "in", "[", "self", ".", "firstLenAng_cumsum", ",", "self", ".", "trajLen_cumsum", "]", ":", "self", ".", "linind", "[", "elem", "]", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "1", "/", "min", "(", "(", "np", ".", "unique", "(", "(", "elem", ")", ")", "-", "np", ".", "roll", "(", "np", ".", "unique", "(", "(", "elem", ")", ")", ",", "1", ")", ")", "[", "1", ":", "]", ")", ")", "[", "0", ":", "-", "1", "]" ]
Prepares the data to be replicated. Calculates the second-order length and angle dependencies between saccades and stores them in a fitted histogram. Parameters: fit : function, optional The method to use for fitting the histogram full_H1 : twodimensional numpy.ndarray, optional Where applicable, the distribution of angle and length differences to replicate with dimensions [73,361]
[ "Prepares", "the", "data", "to", "be", "replicated", ".", "Calculates", "the", "second", "-", "order", "length", "and", "angle", "dependencies", "between", "saccades", "and", "stores", "them", "in", "a", "fitted", "histogram", ".", "Parameters", ":", "fit", ":", "function", "optional", "The", "method", "to", "use", "for", "fitting", "the", "histogram", "full_H1", ":", "twodimensional", "numpy", ".", "ndarray", "optional", "Where", "applicable", "the", "distribution", "of", "angle", "and", "length", "differences", "to", "replicate", "with", "dimensions", "[", "73", "361", "]" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L90-L157
nwilming/ocupy
ocupy/simulator.py
FixGen._calc_xy
def _calc_xy(self, xxx_todo_changeme, angle, length): """ Calculates the coordinates after a specific saccade was made. Parameters: (x,y) : tuple of floats or ints The coordinates before the saccade was made angle : float or int The angle that the next saccade encloses with the horizontal display border length: float or int The length of the next saccade """ (x, y) = xxx_todo_changeme return (x+(cos(radians(angle))*length), y+(sin(radians(angle))*length))
python
def _calc_xy(self, xxx_todo_changeme, angle, length): """ Calculates the coordinates after a specific saccade was made. Parameters: (x,y) : tuple of floats or ints The coordinates before the saccade was made angle : float or int The angle that the next saccade encloses with the horizontal display border length: float or int The length of the next saccade """ (x, y) = xxx_todo_changeme return (x+(cos(radians(angle))*length), y+(sin(radians(angle))*length))
[ "def", "_calc_xy", "(", "self", ",", "xxx_todo_changeme", ",", "angle", ",", "length", ")", ":", "(", "x", ",", "y", ")", "=", "xxx_todo_changeme", "return", "(", "x", "+", "(", "cos", "(", "radians", "(", "angle", ")", ")", "*", "length", ")", ",", "y", "+", "(", "sin", "(", "radians", "(", "angle", ")", ")", "*", "length", ")", ")" ]
Calculates the coordinates after a specific saccade was made. Parameters: (x,y) : tuple of floats or ints The coordinates before the saccade was made angle : float or int The angle that the next saccade encloses with the horizontal display border length: float or int The length of the next saccade
[ "Calculates", "the", "coordinates", "after", "a", "specific", "saccade", "was", "made", ".", "Parameters", ":", "(", "x", "y", ")", ":", "tuple", "of", "floats", "or", "ints", "The", "coordinates", "before", "the", "saccade", "was", "made", "angle", ":", "float", "or", "int", "The", "angle", "that", "the", "next", "saccade", "encloses", "with", "the", "horizontal", "display", "border", "length", ":", "float", "or", "int", "The", "length", "of", "the", "next", "saccade" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L159-L174
nwilming/ocupy
ocupy/simulator.py
FixGen._draw
def _draw(self, prev_angle = None, prev_length = None): """ Draws a new length- and angle-difference pair and calculates length and angle absolutes matching the last saccade drawn. Parameters: prev_angle : float, optional The last angle that was drawn in the current trajectory prev_length : float, optional The last length that was drawn in the current trajectory Note: Either both prev_angle and prev_length have to be given or none; if only one parameter is given, it will be neglected. """ if (prev_angle is None) or (prev_length is None): (length, angle)= np.unravel_index(self.drawFrom('self.firstLenAng_cumsum', self.getrand('self.firstLenAng_cumsum')), self.firstLenAng_shape) angle = angle-((self.firstLenAng_shape[1]-1)/2) angle += 0.5 length += 0.5 length *= self.fm.pixels_per_degree else: ind = int(floor(prev_length/self.fm.pixels_per_degree)) while ind >= len(self.probability_cumsum): ind -= 1 while not(self.probability_cumsum[ind]).any(): ind -= 1 J, I = np.unravel_index(self.drawFrom('self.probability_cumsum '+repr(ind),self.getrand('self.probability_cumsum '+repr(ind))), self.full_H1[ind].shape) angle = reshift((I-self.full_H1[ind].shape[1]/2) + prev_angle) angle += 0.5 length = J+0.5 length *= self.fm.pixels_per_degree return angle, length
python
def _draw(self, prev_angle = None, prev_length = None): """ Draws a new length- and angle-difference pair and calculates length and angle absolutes matching the last saccade drawn. Parameters: prev_angle : float, optional The last angle that was drawn in the current trajectory prev_length : float, optional The last length that was drawn in the current trajectory Note: Either both prev_angle and prev_length have to be given or none; if only one parameter is given, it will be neglected. """ if (prev_angle is None) or (prev_length is None): (length, angle)= np.unravel_index(self.drawFrom('self.firstLenAng_cumsum', self.getrand('self.firstLenAng_cumsum')), self.firstLenAng_shape) angle = angle-((self.firstLenAng_shape[1]-1)/2) angle += 0.5 length += 0.5 length *= self.fm.pixels_per_degree else: ind = int(floor(prev_length/self.fm.pixels_per_degree)) while ind >= len(self.probability_cumsum): ind -= 1 while not(self.probability_cumsum[ind]).any(): ind -= 1 J, I = np.unravel_index(self.drawFrom('self.probability_cumsum '+repr(ind),self.getrand('self.probability_cumsum '+repr(ind))), self.full_H1[ind].shape) angle = reshift((I-self.full_H1[ind].shape[1]/2) + prev_angle) angle += 0.5 length = J+0.5 length *= self.fm.pixels_per_degree return angle, length
[ "def", "_draw", "(", "self", ",", "prev_angle", "=", "None", ",", "prev_length", "=", "None", ")", ":", "if", "(", "prev_angle", "is", "None", ")", "or", "(", "prev_length", "is", "None", ")", ":", "(", "length", ",", "angle", ")", "=", "np", ".", "unravel_index", "(", "self", ".", "drawFrom", "(", "'self.firstLenAng_cumsum'", ",", "self", ".", "getrand", "(", "'self.firstLenAng_cumsum'", ")", ")", ",", "self", ".", "firstLenAng_shape", ")", "angle", "=", "angle", "-", "(", "(", "self", ".", "firstLenAng_shape", "[", "1", "]", "-", "1", ")", "/", "2", ")", "angle", "+=", "0.5", "length", "+=", "0.5", "length", "*=", "self", ".", "fm", ".", "pixels_per_degree", "else", ":", "ind", "=", "int", "(", "floor", "(", "prev_length", "/", "self", ".", "fm", ".", "pixels_per_degree", ")", ")", "while", "ind", ">=", "len", "(", "self", ".", "probability_cumsum", ")", ":", "ind", "-=", "1", "while", "not", "(", "self", ".", "probability_cumsum", "[", "ind", "]", ")", ".", "any", "(", ")", ":", "ind", "-=", "1", "J", ",", "I", "=", "np", ".", "unravel_index", "(", "self", ".", "drawFrom", "(", "'self.probability_cumsum '", "+", "repr", "(", "ind", ")", ",", "self", ".", "getrand", "(", "'self.probability_cumsum '", "+", "repr", "(", "ind", ")", ")", ")", ",", "self", ".", "full_H1", "[", "ind", "]", ".", "shape", ")", "angle", "=", "reshift", "(", "(", "I", "-", "self", ".", "full_H1", "[", "ind", "]", ".", "shape", "[", "1", "]", "/", "2", ")", "+", "prev_angle", ")", "angle", "+=", "0.5", "length", "=", "J", "+", "0.5", "length", "*=", "self", ".", "fm", ".", "pixels_per_degree", "return", "angle", ",", "length" ]
Draws a new length- and angle-difference pair and calculates length and angle absolutes matching the last saccade drawn. Parameters: prev_angle : float, optional The last angle that was drawn in the current trajectory prev_length : float, optional The last length that was drawn in the current trajectory Note: Either both prev_angle and prev_length have to be given or none; if only one parameter is given, it will be neglected.
[ "Draws", "a", "new", "length", "-", "and", "angle", "-", "difference", "pair", "and", "calculates", "length", "and", "angle", "absolutes", "matching", "the", "last", "saccade", "drawn", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L176-L212
nwilming/ocupy
ocupy/simulator.py
FixGen.sample_many
def sample_many(self, num_samples = 2000): """ Generates a given number of trajectories, using the method sample(). Returns a fixmat with the generated data. Parameters: num_samples : int, optional The number of trajectories that shall be generated. """ x = [] y = [] fix = [] sample = [] # XXX: Delete ProgressBar pbar = ProgressBar(widgets=[Percentage(),Bar()], maxval=num_samples).start() for s in range(0, num_samples): for i, (xs, ys) in enumerate(self.sample()): x.append(xs) y.append(ys) fix.append(i+1) sample.append(s) pbar.update(s+1) fields = {'fix':np.array(fix), 'y':np.array(y), 'x':np.array(x)} param = {'pixels_per_degree':self.fm.pixels_per_degree} out = fixmat.VectorFixmatFactory(fields, param) return out
python
def sample_many(self, num_samples = 2000): """ Generates a given number of trajectories, using the method sample(). Returns a fixmat with the generated data. Parameters: num_samples : int, optional The number of trajectories that shall be generated. """ x = [] y = [] fix = [] sample = [] # XXX: Delete ProgressBar pbar = ProgressBar(widgets=[Percentage(),Bar()], maxval=num_samples).start() for s in range(0, num_samples): for i, (xs, ys) in enumerate(self.sample()): x.append(xs) y.append(ys) fix.append(i+1) sample.append(s) pbar.update(s+1) fields = {'fix':np.array(fix), 'y':np.array(y), 'x':np.array(x)} param = {'pixels_per_degree':self.fm.pixels_per_degree} out = fixmat.VectorFixmatFactory(fields, param) return out
[ "def", "sample_many", "(", "self", ",", "num_samples", "=", "2000", ")", ":", "x", "=", "[", "]", "y", "=", "[", "]", "fix", "=", "[", "]", "sample", "=", "[", "]", "# XXX: Delete ProgressBar", "pbar", "=", "ProgressBar", "(", "widgets", "=", "[", "Percentage", "(", ")", ",", "Bar", "(", ")", "]", ",", "maxval", "=", "num_samples", ")", ".", "start", "(", ")", "for", "s", "in", "range", "(", "0", ",", "num_samples", ")", ":", "for", "i", ",", "(", "xs", ",", "ys", ")", "in", "enumerate", "(", "self", ".", "sample", "(", ")", ")", ":", "x", ".", "append", "(", "xs", ")", "y", ".", "append", "(", "ys", ")", "fix", ".", "append", "(", "i", "+", "1", ")", "sample", ".", "append", "(", "s", ")", "pbar", ".", "update", "(", "s", "+", "1", ")", "fields", "=", "{", "'fix'", ":", "np", ".", "array", "(", "fix", ")", ",", "'y'", ":", "np", ".", "array", "(", "y", ")", ",", "'x'", ":", "np", ".", "array", "(", "x", ")", "}", "param", "=", "{", "'pixels_per_degree'", ":", "self", ".", "fm", ".", "pixels_per_degree", "}", "out", "=", "fixmat", ".", "VectorFixmatFactory", "(", "fields", ",", "param", ")", "return", "out" ]
Generates a given number of trajectories, using the method sample(). Returns a fixmat with the generated data. Parameters: num_samples : int, optional The number of trajectories that shall be generated.
[ "Generates", "a", "given", "number", "of", "trajectories", "using", "the", "method", "sample", "()", ".", "Returns", "a", "fixmat", "with", "the", "generated", "data", ".", "Parameters", ":", "num_samples", ":", "int", "optional", "The", "number", "of", "trajectories", "that", "shall", "be", "generated", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L241-L269
nwilming/ocupy
ocupy/simulator.py
FixGen.sample
def sample(self): """ Draws a trajectory length, first coordinates, lengths, angles and length-angle-difference pairs according to the empirical distribution. Each call creates one complete trajectory. """ lenghts = [] angles = [] coordinates = [] fix = [] sample_size = int(round(self.trajLen_borders[self.drawFrom('self.trajLen_cumsum', self.getrand('self.trajLen_cumsum'))])) coordinates.append([0, 0]) fix.append(1) while len(coordinates) < sample_size: if len(lenghts) == 0 and len(angles) == 0: angle, length = self._draw(self) else: angle, length = self._draw(prev_angle = angles[-1], prev_length = lenghts[-1]) x, y = self._calc_xy(coordinates[-1], angle, length) coordinates.append([x, y]) lenghts.append(length) angles.append(angle) fix.append(fix[-1]+1) return coordinates
python
def sample(self): """ Draws a trajectory length, first coordinates, lengths, angles and length-angle-difference pairs according to the empirical distribution. Each call creates one complete trajectory. """ lenghts = [] angles = [] coordinates = [] fix = [] sample_size = int(round(self.trajLen_borders[self.drawFrom('self.trajLen_cumsum', self.getrand('self.trajLen_cumsum'))])) coordinates.append([0, 0]) fix.append(1) while len(coordinates) < sample_size: if len(lenghts) == 0 and len(angles) == 0: angle, length = self._draw(self) else: angle, length = self._draw(prev_angle = angles[-1], prev_length = lenghts[-1]) x, y = self._calc_xy(coordinates[-1], angle, length) coordinates.append([x, y]) lenghts.append(length) angles.append(angle) fix.append(fix[-1]+1) return coordinates
[ "def", "sample", "(", "self", ")", ":", "lenghts", "=", "[", "]", "angles", "=", "[", "]", "coordinates", "=", "[", "]", "fix", "=", "[", "]", "sample_size", "=", "int", "(", "round", "(", "self", ".", "trajLen_borders", "[", "self", ".", "drawFrom", "(", "'self.trajLen_cumsum'", ",", "self", ".", "getrand", "(", "'self.trajLen_cumsum'", ")", ")", "]", ")", ")", "coordinates", ".", "append", "(", "[", "0", ",", "0", "]", ")", "fix", ".", "append", "(", "1", ")", "while", "len", "(", "coordinates", ")", "<", "sample_size", ":", "if", "len", "(", "lenghts", ")", "==", "0", "and", "len", "(", "angles", ")", "==", "0", ":", "angle", ",", "length", "=", "self", ".", "_draw", "(", "self", ")", "else", ":", "angle", ",", "length", "=", "self", ".", "_draw", "(", "prev_angle", "=", "angles", "[", "-", "1", "]", ",", "prev_length", "=", "lenghts", "[", "-", "1", "]", ")", "x", ",", "y", "=", "self", ".", "_calc_xy", "(", "coordinates", "[", "-", "1", "]", ",", "angle", ",", "length", ")", "coordinates", ".", "append", "(", "[", "x", ",", "y", "]", ")", "lenghts", ".", "append", "(", "length", ")", "angles", ".", "append", "(", "angle", ")", "fix", ".", "append", "(", "fix", "[", "-", "1", "]", "+", "1", ")", "return", "coordinates" ]
Draws a trajectory length, first coordinates, lengths, angles and length-angle-difference pairs according to the empirical distribution. Each call creates one complete trajectory.
[ "Draws", "a", "trajectory", "length", "first", "coordinates", "lengths", "angles", "and", "length", "-", "angle", "-", "difference", "pairs", "according", "to", "the", "empirical", "distribution", ".", "Each", "call", "creates", "one", "complete", "trajectory", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L271-L299
nwilming/ocupy
ocupy/simulator.py
FixGen.drawFrom
def drawFrom(self, cumsum, r): """ Draws a value from a cumulative sum. Parameters: cumsum : array Cumulative sum from which shall be drawn. Returns: int : Index of the cumulative sum element drawn. """ a = cumsum.rsplit() if len(a)>1: b = eval(a[0])[int(a[1])] else: b = eval(a[0]) return np.nonzero(b>=r)[0][0]
python
def drawFrom(self, cumsum, r): """ Draws a value from a cumulative sum. Parameters: cumsum : array Cumulative sum from which shall be drawn. Returns: int : Index of the cumulative sum element drawn. """ a = cumsum.rsplit() if len(a)>1: b = eval(a[0])[int(a[1])] else: b = eval(a[0]) return np.nonzero(b>=r)[0][0]
[ "def", "drawFrom", "(", "self", ",", "cumsum", ",", "r", ")", ":", "a", "=", "cumsum", ".", "rsplit", "(", ")", "if", "len", "(", "a", ")", ">", "1", ":", "b", "=", "eval", "(", "a", "[", "0", "]", ")", "[", "int", "(", "a", "[", "1", "]", ")", "]", "else", ":", "b", "=", "eval", "(", "a", "[", "0", "]", ")", "return", "np", ".", "nonzero", "(", "b", ">=", "r", ")", "[", "0", "]", "[", "0", "]" ]
Draws a value from a cumulative sum. Parameters: cumsum : array Cumulative sum from which shall be drawn. Returns: int : Index of the cumulative sum element drawn.
[ "Draws", "a", "value", "from", "a", "cumulative", "sum", ".", "Parameters", ":", "cumsum", ":", "array", "Cumulative", "sum", "from", "which", "shall", "be", "drawn", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L305-L322
nwilming/ocupy
ocupy/fixmat.py
load
def load(path): """ Load fixmat at path. Parameters: path : string Absolute path of the file to load from. """ f = h5py.File(path,'r') if 'Fixmat' in f: fm_group = f['Fixmat'] else: fm_group = f['Datamat'] fields = {} params = {} for field, value in list(fm_group.items()): fields[field] = np.array(value) for param, value in list(fm_group.attrs.items()): params[param] = value f.close() return VectorFixmatFactory(fields, params)
python
def load(path): """ Load fixmat at path. Parameters: path : string Absolute path of the file to load from. """ f = h5py.File(path,'r') if 'Fixmat' in f: fm_group = f['Fixmat'] else: fm_group = f['Datamat'] fields = {} params = {} for field, value in list(fm_group.items()): fields[field] = np.array(value) for param, value in list(fm_group.attrs.items()): params[param] = value f.close() return VectorFixmatFactory(fields, params)
[ "def", "load", "(", "path", ")", ":", "f", "=", "h5py", ".", "File", "(", "path", ",", "'r'", ")", "if", "'Fixmat'", "in", "f", ":", "fm_group", "=", "f", "[", "'Fixmat'", "]", "else", ":", "fm_group", "=", "f", "[", "'Datamat'", "]", "fields", "=", "{", "}", "params", "=", "{", "}", "for", "field", ",", "value", "in", "list", "(", "fm_group", ".", "items", "(", ")", ")", ":", "fields", "[", "field", "]", "=", "np", ".", "array", "(", "value", ")", "for", "param", ",", "value", "in", "list", "(", "fm_group", ".", "attrs", ".", "items", "(", ")", ")", ":", "params", "[", "param", "]", "=", "value", "f", ".", "close", "(", ")", "return", "VectorFixmatFactory", "(", "fields", ",", "params", ")" ]
Load fixmat at path. Parameters: path : string Absolute path of the file to load from.
[ "Load", "fixmat", "at", "path", ".", "Parameters", ":", "path", ":", "string", "Absolute", "path", "of", "the", "file", "to", "load", "from", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L140-L160
nwilming/ocupy
ocupy/fixmat.py
compute_fdm
def compute_fdm(fixmat, fwhm=2, scale_factor=1): """ Computes a fixation density map for the calling fixmat. Creates a map the size of the image fixations were recorded on. Every pixel contains the frequency of fixations for this image. The fixation map is smoothed by convolution with a Gaussian kernel to approximate the area with highest processing (usually 2 deg. visual angle). Note: The function does not check whether the fixmat contains fixations from different images as it might be desirable to compute fdms over fixations from more than one image. Parameters: fwhm : float the full width at half maximum of the Gaussian kernel used for convolution of the fixation frequency map. scale_factor : float scale factor for the resulting fdm. Default is 1. Scale_factor must be a float specifying the fraction of the current size. Returns: fdm : numpy.array a numpy.array of size fixmat.image_size containing the fixation probability for every location on the image. """ # image category must exist (>-1) and image_size must be non-empty assert (len(fixmat.image_size) == 2 and (fixmat.image_size[0] > 0) and (fixmat.image_size[1] > 0)), 'The image_size is either 0, or not 2D' # check whether fixmat contains fixations if fixmat._num_fix == 0 or len(fixmat.x) == 0 or len(fixmat.y) == 0 : raise RuntimeError('There are no fixations in the fixmat.') assert not scale_factor <= 0, "scale_factor has to be > 0" # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled e_y = np.arange(0, np.round(scale_factor*fixmat.image_size[0]+1)) e_x = np.arange(0, np.round(scale_factor*fixmat.image_size[1]+1)) samples = np.array(list(zip((scale_factor*fixmat.y), (scale_factor*fixmat.x)))) (hist, _) = np.histogramdd(samples, (e_y, e_x)) kernel_sigma = fwhm * fixmat.pixels_per_degree * scale_factor kernel_sigma = kernel_sigma / (2 * (2 * np.log(2)) ** .5) fdm = gaussian_filter(hist, kernel_sigma, order=0, mode='constant') return fdm / fdm.sum()
python
def compute_fdm(fixmat, fwhm=2, scale_factor=1): """ Computes a fixation density map for the calling fixmat. Creates a map the size of the image fixations were recorded on. Every pixel contains the frequency of fixations for this image. The fixation map is smoothed by convolution with a Gaussian kernel to approximate the area with highest processing (usually 2 deg. visual angle). Note: The function does not check whether the fixmat contains fixations from different images as it might be desirable to compute fdms over fixations from more than one image. Parameters: fwhm : float the full width at half maximum of the Gaussian kernel used for convolution of the fixation frequency map. scale_factor : float scale factor for the resulting fdm. Default is 1. Scale_factor must be a float specifying the fraction of the current size. Returns: fdm : numpy.array a numpy.array of size fixmat.image_size containing the fixation probability for every location on the image. """ # image category must exist (>-1) and image_size must be non-empty assert (len(fixmat.image_size) == 2 and (fixmat.image_size[0] > 0) and (fixmat.image_size[1] > 0)), 'The image_size is either 0, or not 2D' # check whether fixmat contains fixations if fixmat._num_fix == 0 or len(fixmat.x) == 0 or len(fixmat.y) == 0 : raise RuntimeError('There are no fixations in the fixmat.') assert not scale_factor <= 0, "scale_factor has to be > 0" # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled e_y = np.arange(0, np.round(scale_factor*fixmat.image_size[0]+1)) e_x = np.arange(0, np.round(scale_factor*fixmat.image_size[1]+1)) samples = np.array(list(zip((scale_factor*fixmat.y), (scale_factor*fixmat.x)))) (hist, _) = np.histogramdd(samples, (e_y, e_x)) kernel_sigma = fwhm * fixmat.pixels_per_degree * scale_factor kernel_sigma = kernel_sigma / (2 * (2 * np.log(2)) ** .5) fdm = gaussian_filter(hist, kernel_sigma, order=0, mode='constant') return fdm / fdm.sum()
[ "def", "compute_fdm", "(", "fixmat", ",", "fwhm", "=", "2", ",", "scale_factor", "=", "1", ")", ":", "# image category must exist (>-1) and image_size must be non-empty", "assert", "(", "len", "(", "fixmat", ".", "image_size", ")", "==", "2", "and", "(", "fixmat", ".", "image_size", "[", "0", "]", ">", "0", ")", "and", "(", "fixmat", ".", "image_size", "[", "1", "]", ">", "0", ")", ")", ",", "'The image_size is either 0, or not 2D'", "# check whether fixmat contains fixations", "if", "fixmat", ".", "_num_fix", "==", "0", "or", "len", "(", "fixmat", ".", "x", ")", "==", "0", "or", "len", "(", "fixmat", ".", "y", ")", "==", "0", ":", "raise", "RuntimeError", "(", "'There are no fixations in the fixmat.'", ")", "assert", "not", "scale_factor", "<=", "0", ",", "\"scale_factor has to be > 0\"", "# this specifies left edges of the histogram bins, i.e. fixations between", "# ]0 binedge[0]] are included. --> fixations are ceiled", "e_y", "=", "np", ".", "arange", "(", "0", ",", "np", ".", "round", "(", "scale_factor", "*", "fixmat", ".", "image_size", "[", "0", "]", "+", "1", ")", ")", "e_x", "=", "np", ".", "arange", "(", "0", ",", "np", ".", "round", "(", "scale_factor", "*", "fixmat", ".", "image_size", "[", "1", "]", "+", "1", ")", ")", "samples", "=", "np", ".", "array", "(", "list", "(", "zip", "(", "(", "scale_factor", "*", "fixmat", ".", "y", ")", ",", "(", "scale_factor", "*", "fixmat", ".", "x", ")", ")", ")", ")", "(", "hist", ",", "_", ")", "=", "np", ".", "histogramdd", "(", "samples", ",", "(", "e_y", ",", "e_x", ")", ")", "kernel_sigma", "=", "fwhm", "*", "fixmat", ".", "pixels_per_degree", "*", "scale_factor", "kernel_sigma", "=", "kernel_sigma", "/", "(", "2", "*", "(", "2", "*", "np", ".", "log", "(", "2", ")", ")", "**", ".5", ")", "fdm", "=", "gaussian_filter", "(", "hist", ",", "kernel_sigma", ",", "order", "=", "0", ",", "mode", "=", "'constant'", ")", "return", "fdm", "/", "fdm", ".", "sum", "(", ")" ]
Computes a fixation density map for the calling fixmat. Creates a map the size of the image fixations were recorded on. Every pixel contains the frequency of fixations for this image. The fixation map is smoothed by convolution with a Gaussian kernel to approximate the area with highest processing (usually 2 deg. visual angle). Note: The function does not check whether the fixmat contains fixations from different images as it might be desirable to compute fdms over fixations from more than one image. Parameters: fwhm : float the full width at half maximum of the Gaussian kernel used for convolution of the fixation frequency map. scale_factor : float scale factor for the resulting fdm. Default is 1. Scale_factor must be a float specifying the fraction of the current size. Returns: fdm : numpy.array a numpy.array of size fixmat.image_size containing the fixation probability for every location on the image.
[ "Computes", "a", "fixation", "density", "map", "for", "the", "calling", "fixmat", ".", "Creates", "a", "map", "the", "size", "of", "the", "image", "fixations", "were", "recorded", "on", ".", "Every", "pixel", "contains", "the", "frequency", "of", "fixations", "for", "this", "image", ".", "The", "fixation", "map", "is", "smoothed", "by", "convolution", "with", "a", "Gaussian", "kernel", "to", "approximate", "the", "area", "with", "highest", "processing", "(", "usually", "2", "deg", ".", "visual", "angle", ")", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L163-L207
nwilming/ocupy
ocupy/fixmat.py
relative_bias
def relative_bias(fm, scale_factor = 1, estimator = None): """ Computes the relative bias, i.e. the distribution of saccade angles and amplitudes. Parameters: fm : DataMat The fixation data to use scale_factor : double Returns: 2D probability distribution of saccade angles and amplitudes. """ assert 'fix' in fm.fieldnames(), "Can not work without fixation numbers" excl = fm.fix - np.roll(fm.fix, 1) != 1 # Now calculate the direction where the NEXT fixation goes to diff_x = (np.roll(fm.x, 1) - fm.x)[~excl] diff_y = (np.roll(fm.y, 1) - fm.y)[~excl] # Make a histogram of diff values # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled ylim = np.round(scale_factor * fm.image_size[0]) xlim = np.round(scale_factor * fm.image_size[1]) x_steps = np.ceil(2*xlim) +1 if x_steps % 2 != 0: x_steps+=1 y_steps = np.ceil(2*ylim)+1 if y_steps % 2 != 0: y_steps+=1 e_x = np.linspace(-xlim,xlim,x_steps) e_y = np.linspace(-ylim,ylim,y_steps) #e_y = np.arange(-ylim, ylim+1) #e_x = np.arange(-xlim, xlim+1) samples = np.array(list(zip((scale_factor * diff_y), (scale_factor* diff_x)))) if estimator == None: (hist, _) = np.histogramdd(samples, (e_y, e_x)) else: hist = estimator(samples, e_y, e_x) return hist
python
def relative_bias(fm, scale_factor = 1, estimator = None): """ Computes the relative bias, i.e. the distribution of saccade angles and amplitudes. Parameters: fm : DataMat The fixation data to use scale_factor : double Returns: 2D probability distribution of saccade angles and amplitudes. """ assert 'fix' in fm.fieldnames(), "Can not work without fixation numbers" excl = fm.fix - np.roll(fm.fix, 1) != 1 # Now calculate the direction where the NEXT fixation goes to diff_x = (np.roll(fm.x, 1) - fm.x)[~excl] diff_y = (np.roll(fm.y, 1) - fm.y)[~excl] # Make a histogram of diff values # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled ylim = np.round(scale_factor * fm.image_size[0]) xlim = np.round(scale_factor * fm.image_size[1]) x_steps = np.ceil(2*xlim) +1 if x_steps % 2 != 0: x_steps+=1 y_steps = np.ceil(2*ylim)+1 if y_steps % 2 != 0: y_steps+=1 e_x = np.linspace(-xlim,xlim,x_steps) e_y = np.linspace(-ylim,ylim,y_steps) #e_y = np.arange(-ylim, ylim+1) #e_x = np.arange(-xlim, xlim+1) samples = np.array(list(zip((scale_factor * diff_y), (scale_factor* diff_x)))) if estimator == None: (hist, _) = np.histogramdd(samples, (e_y, e_x)) else: hist = estimator(samples, e_y, e_x) return hist
[ "def", "relative_bias", "(", "fm", ",", "scale_factor", "=", "1", ",", "estimator", "=", "None", ")", ":", "assert", "'fix'", "in", "fm", ".", "fieldnames", "(", ")", ",", "\"Can not work without fixation numbers\"", "excl", "=", "fm", ".", "fix", "-", "np", ".", "roll", "(", "fm", ".", "fix", ",", "1", ")", "!=", "1", "# Now calculate the direction where the NEXT fixation goes to", "diff_x", "=", "(", "np", ".", "roll", "(", "fm", ".", "x", ",", "1", ")", "-", "fm", ".", "x", ")", "[", "~", "excl", "]", "diff_y", "=", "(", "np", ".", "roll", "(", "fm", ".", "y", ",", "1", ")", "-", "fm", ".", "y", ")", "[", "~", "excl", "]", "# Make a histogram of diff values", "# this specifies left edges of the histogram bins, i.e. fixations between", "# ]0 binedge[0]] are included. --> fixations are ceiled", "ylim", "=", "np", ".", "round", "(", "scale_factor", "*", "fm", ".", "image_size", "[", "0", "]", ")", "xlim", "=", "np", ".", "round", "(", "scale_factor", "*", "fm", ".", "image_size", "[", "1", "]", ")", "x_steps", "=", "np", ".", "ceil", "(", "2", "*", "xlim", ")", "+", "1", "if", "x_steps", "%", "2", "!=", "0", ":", "x_steps", "+=", "1", "y_steps", "=", "np", ".", "ceil", "(", "2", "*", "ylim", ")", "+", "1", "if", "y_steps", "%", "2", "!=", "0", ":", "y_steps", "+=", "1", "e_x", "=", "np", ".", "linspace", "(", "-", "xlim", ",", "xlim", ",", "x_steps", ")", "e_y", "=", "np", ".", "linspace", "(", "-", "ylim", ",", "ylim", ",", "y_steps", ")", "#e_y = np.arange(-ylim, ylim+1)", "#e_x = np.arange(-xlim, xlim+1)", "samples", "=", "np", ".", "array", "(", "list", "(", "zip", "(", "(", "scale_factor", "*", "diff_y", ")", ",", "(", "scale_factor", "*", "diff_x", ")", ")", ")", ")", "if", "estimator", "==", "None", ":", "(", "hist", ",", "_", ")", "=", "np", ".", "histogramdd", "(", "samples", ",", "(", "e_y", ",", "e_x", ")", ")", "else", ":", "hist", "=", "estimator", "(", "samples", ",", "e_y", ",", "e_x", ")", "return", "hist" ]
Computes the relative bias, i.e. the distribution of saccade angles and amplitudes. Parameters: fm : DataMat The fixation data to use scale_factor : double Returns: 2D probability distribution of saccade angles and amplitudes.
[ "Computes", "the", "relative", "bias", "i", ".", "e", ".", "the", "distribution", "of", "saccade", "angles", "and", "amplitudes", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L209-L249
nwilming/ocupy
ocupy/fixmat.py
DirectoryFixmatFactory
def DirectoryFixmatFactory(directory, categories = None, glob_str = '*.mat', var_name = 'fixmat'): """ Concatenates all fixmats in dir and returns the resulting single fixmat. Parameters: directory : string Path from which the fixmats should be loaded categories : instance of stimuli.Categories, optional If given, the resulting fixmat provides direct access to the data in the categories object. glob_str : string A regular expression that defines which mat files are picked up. var_name : string The variable to load from the mat file. Returns: f_all : instance of FixMat Contains all fixmats that were found in given directory """ files = glob(join(directory,glob_str)) if len(files) == 0: raise ValueError("Could not find any fixmats in " + join(directory, glob_str)) f_all = FixmatFactory(files.pop(), categories, var_name) for fname in files: f_current = FixmatFactory(fname, categories, var_name) f_all.join(f_current) return f_all
python
def DirectoryFixmatFactory(directory, categories = None, glob_str = '*.mat', var_name = 'fixmat'): """ Concatenates all fixmats in dir and returns the resulting single fixmat. Parameters: directory : string Path from which the fixmats should be loaded categories : instance of stimuli.Categories, optional If given, the resulting fixmat provides direct access to the data in the categories object. glob_str : string A regular expression that defines which mat files are picked up. var_name : string The variable to load from the mat file. Returns: f_all : instance of FixMat Contains all fixmats that were found in given directory """ files = glob(join(directory,glob_str)) if len(files) == 0: raise ValueError("Could not find any fixmats in " + join(directory, glob_str)) f_all = FixmatFactory(files.pop(), categories, var_name) for fname in files: f_current = FixmatFactory(fname, categories, var_name) f_all.join(f_current) return f_all
[ "def", "DirectoryFixmatFactory", "(", "directory", ",", "categories", "=", "None", ",", "glob_str", "=", "'*.mat'", ",", "var_name", "=", "'fixmat'", ")", ":", "files", "=", "glob", "(", "join", "(", "directory", ",", "glob_str", ")", ")", "if", "len", "(", "files", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Could not find any fixmats in \"", "+", "join", "(", "directory", ",", "glob_str", ")", ")", "f_all", "=", "FixmatFactory", "(", "files", ".", "pop", "(", ")", ",", "categories", ",", "var_name", ")", "for", "fname", "in", "files", ":", "f_current", "=", "FixmatFactory", "(", "fname", ",", "categories", ",", "var_name", ")", "f_all", ".", "join", "(", "f_current", ")", "return", "f_all" ]
Concatenates all fixmats in dir and returns the resulting single fixmat. Parameters: directory : string Path from which the fixmats should be loaded categories : instance of stimuli.Categories, optional If given, the resulting fixmat provides direct access to the data in the categories object. glob_str : string A regular expression that defines which mat files are picked up. var_name : string The variable to load from the mat file. Returns: f_all : instance of FixMat Contains all fixmats that were found in given directory
[ "Concatenates", "all", "fixmats", "in", "dir", "and", "returns", "the", "resulting", "single", "fixmat", ".", "Parameters", ":", "directory", ":", "string", "Path", "from", "which", "the", "fixmats", "should", "be", "loaded", "categories", ":", "instance", "of", "stimuli", ".", "Categories", "optional", "If", "given", "the", "resulting", "fixmat", "provides", "direct", "access", "to", "the", "data", "in", "the", "categories", "object", ".", "glob_str", ":", "string", "A", "regular", "expression", "that", "defines", "which", "mat", "files", "are", "picked", "up", ".", "var_name", ":", "string", "The", "variable", "to", "load", "from", "the", "mat", "file", ".", "Returns", ":", "f_all", ":", "instance", "of", "FixMat", "Contains", "all", "fixmats", "that", "were", "found", "in", "given", "directory" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L252-L280
nwilming/ocupy
ocupy/fixmat.py
FixmatFactory
def FixmatFactory(fixmatfile, categories = None, var_name = 'fixmat', field_name='x'): """ Loads a single fixmat (fixmatfile). Parameters: fixmatfile : string The matlab fixmat that should be loaded. categories : instance of stimuli.Categories, optional Links data in categories to data in fixmat. """ try: data = loadmat(fixmatfile, struct_as_record = False) keys = list(data.keys()) data = data[var_name][0][0] except KeyError: raise RuntimeError('%s is not a field of the matlab structure. Possible'+ 'Keys are %s'%str(keys)) num_fix = data.__getattribute__(field_name).size # Get a list with fieldnames and a list with parameters fields = {} parameters = {} for field in data._fieldnames: if data.__getattribute__(field).size == num_fix: fields[field] = data.__getattribute__(field) else: parameters[field] = data.__getattribute__(field)[0].tolist() if len(parameters[field]) == 1: parameters[field] = parameters[field][0] # Generate FixMat fixmat = FixMat(categories = categories) fixmat._fields = list(fields.keys()) for (field, value) in list(fields.items()): fixmat.__dict__[field] = value.reshape(-1,) fixmat._parameters = parameters fixmat._subjects = None for (field, value) in list(parameters.items()): fixmat.__dict__[field] = value fixmat._num_fix = num_fix return fixmat
python
def FixmatFactory(fixmatfile, categories = None, var_name = 'fixmat', field_name='x'): """ Loads a single fixmat (fixmatfile). Parameters: fixmatfile : string The matlab fixmat that should be loaded. categories : instance of stimuli.Categories, optional Links data in categories to data in fixmat. """ try: data = loadmat(fixmatfile, struct_as_record = False) keys = list(data.keys()) data = data[var_name][0][0] except KeyError: raise RuntimeError('%s is not a field of the matlab structure. Possible'+ 'Keys are %s'%str(keys)) num_fix = data.__getattribute__(field_name).size # Get a list with fieldnames and a list with parameters fields = {} parameters = {} for field in data._fieldnames: if data.__getattribute__(field).size == num_fix: fields[field] = data.__getattribute__(field) else: parameters[field] = data.__getattribute__(field)[0].tolist() if len(parameters[field]) == 1: parameters[field] = parameters[field][0] # Generate FixMat fixmat = FixMat(categories = categories) fixmat._fields = list(fields.keys()) for (field, value) in list(fields.items()): fixmat.__dict__[field] = value.reshape(-1,) fixmat._parameters = parameters fixmat._subjects = None for (field, value) in list(parameters.items()): fixmat.__dict__[field] = value fixmat._num_fix = num_fix return fixmat
[ "def", "FixmatFactory", "(", "fixmatfile", ",", "categories", "=", "None", ",", "var_name", "=", "'fixmat'", ",", "field_name", "=", "'x'", ")", ":", "try", ":", "data", "=", "loadmat", "(", "fixmatfile", ",", "struct_as_record", "=", "False", ")", "keys", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "data", "=", "data", "[", "var_name", "]", "[", "0", "]", "[", "0", "]", "except", "KeyError", ":", "raise", "RuntimeError", "(", "'%s is not a field of the matlab structure. Possible'", "+", "'Keys are %s'", "%", "str", "(", "keys", ")", ")", "num_fix", "=", "data", ".", "__getattribute__", "(", "field_name", ")", ".", "size", "# Get a list with fieldnames and a list with parameters", "fields", "=", "{", "}", "parameters", "=", "{", "}", "for", "field", "in", "data", ".", "_fieldnames", ":", "if", "data", ".", "__getattribute__", "(", "field", ")", ".", "size", "==", "num_fix", ":", "fields", "[", "field", "]", "=", "data", ".", "__getattribute__", "(", "field", ")", "else", ":", "parameters", "[", "field", "]", "=", "data", ".", "__getattribute__", "(", "field", ")", "[", "0", "]", ".", "tolist", "(", ")", "if", "len", "(", "parameters", "[", "field", "]", ")", "==", "1", ":", "parameters", "[", "field", "]", "=", "parameters", "[", "field", "]", "[", "0", "]", "# Generate FixMat", "fixmat", "=", "FixMat", "(", "categories", "=", "categories", ")", "fixmat", ".", "_fields", "=", "list", "(", "fields", ".", "keys", "(", ")", ")", "for", "(", "field", ",", "value", ")", "in", "list", "(", "fields", ".", "items", "(", ")", ")", ":", "fixmat", ".", "__dict__", "[", "field", "]", "=", "value", ".", "reshape", "(", "-", "1", ",", ")", "fixmat", ".", "_parameters", "=", "parameters", "fixmat", ".", "_subjects", "=", "None", "for", "(", "field", ",", "value", ")", "in", "list", "(", "parameters", ".", "items", "(", ")", ")", ":", "fixmat", ".", "__dict__", "[", "field", "]", "=", "value", "fixmat", ".", "_num_fix", "=", "num_fix", "return", "fixmat" ]
Loads a single fixmat (fixmatfile). Parameters: fixmatfile : string The matlab fixmat that should be loaded. categories : instance of stimuli.Categories, optional Links data in categories to data in fixmat.
[ "Loads", "a", "single", "fixmat", "(", "fixmatfile", ")", ".", "Parameters", ":", "fixmatfile", ":", "string", "The", "matlab", "fixmat", "that", "should", "be", "loaded", ".", "categories", ":", "instance", "of", "stimuli", ".", "Categories", "optional", "Links", "data", "in", "categories", "to", "data", "in", "fixmat", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L283-L325
nwilming/ocupy
ocupy/fixmat.py
FixMat.add_feature_values
def add_feature_values(self, features): """ Adds feature values of feature 'feature' to all fixations in the calling fixmat. For fixations out of the image boundaries, NaNs are returned. The function generates a new attribute field named with the string in features that contains an np.array listing feature values for every fixation in the fixmat. .. note:: The calling fixmat must have been constructed with an stimuli.Categories object Parameters: features : string list of feature names for which feature values are extracted. """ if not 'x' in self.fieldnames(): raise RuntimeError("""add_feature_values expects to find (x,y) locations in self.x and self.y. But self.x does not exist""") if not self._categories: raise RuntimeError( '''"%s" does not exist as a fieldname and the fixmat does not have a Categories object (no features available. The fixmat has these fields: %s''' \ %(features, str(self._fields))) for feature in features: # initialize new field with NaNs feat_vals = np.zeros([len(self.x)]) * np.nan for (cat_mat, imgs) in self.by_cat(): for img in np.unique(cat_mat.filenumber).astype(int): fmap = imgs[img][feature] on_image = (self.x >= 0) & (self.x <= self.image_size[1]) on_image = on_image & (self.y >= 0) & (self.y <= self.image_size[0]) idx = (self.category == imgs.category) & \ (self.filenumber == img) & \ (on_image.astype('bool')) feat_vals[idx] = fmap[self.y[idx].astype('int'), self.x[idx].astype('int')] # setattr(self, feature, feat_vals) self.add_field(feature, feat_vals)
python
def add_feature_values(self, features): """ Adds feature values of feature 'feature' to all fixations in the calling fixmat. For fixations out of the image boundaries, NaNs are returned. The function generates a new attribute field named with the string in features that contains an np.array listing feature values for every fixation in the fixmat. .. note:: The calling fixmat must have been constructed with an stimuli.Categories object Parameters: features : string list of feature names for which feature values are extracted. """ if not 'x' in self.fieldnames(): raise RuntimeError("""add_feature_values expects to find (x,y) locations in self.x and self.y. But self.x does not exist""") if not self._categories: raise RuntimeError( '''"%s" does not exist as a fieldname and the fixmat does not have a Categories object (no features available. The fixmat has these fields: %s''' \ %(features, str(self._fields))) for feature in features: # initialize new field with NaNs feat_vals = np.zeros([len(self.x)]) * np.nan for (cat_mat, imgs) in self.by_cat(): for img in np.unique(cat_mat.filenumber).astype(int): fmap = imgs[img][feature] on_image = (self.x >= 0) & (self.x <= self.image_size[1]) on_image = on_image & (self.y >= 0) & (self.y <= self.image_size[0]) idx = (self.category == imgs.category) & \ (self.filenumber == img) & \ (on_image.astype('bool')) feat_vals[idx] = fmap[self.y[idx].astype('int'), self.x[idx].astype('int')] # setattr(self, feature, feat_vals) self.add_field(feature, feat_vals)
[ "def", "add_feature_values", "(", "self", ",", "features", ")", ":", "if", "not", "'x'", "in", "self", ".", "fieldnames", "(", ")", ":", "raise", "RuntimeError", "(", "\"\"\"add_feature_values expects to find\n (x,y) locations in self.x and self.y. But self.x does not exist\"\"\"", ")", "if", "not", "self", ".", "_categories", ":", "raise", "RuntimeError", "(", "'''\"%s\" does not exist as a fieldname and the\n fixmat does not have a Categories object (no features \n available. The fixmat has these fields: %s'''", "%", "(", "features", ",", "str", "(", "self", ".", "_fields", ")", ")", ")", "for", "feature", "in", "features", ":", "# initialize new field with NaNs", "feat_vals", "=", "np", ".", "zeros", "(", "[", "len", "(", "self", ".", "x", ")", "]", ")", "*", "np", ".", "nan", "for", "(", "cat_mat", ",", "imgs", ")", "in", "self", ".", "by_cat", "(", ")", ":", "for", "img", "in", "np", ".", "unique", "(", "cat_mat", ".", "filenumber", ")", ".", "astype", "(", "int", ")", ":", "fmap", "=", "imgs", "[", "img", "]", "[", "feature", "]", "on_image", "=", "(", "self", ".", "x", ">=", "0", ")", "&", "(", "self", ".", "x", "<=", "self", ".", "image_size", "[", "1", "]", ")", "on_image", "=", "on_image", "&", "(", "self", ".", "y", ">=", "0", ")", "&", "(", "self", ".", "y", "<=", "self", ".", "image_size", "[", "0", "]", ")", "idx", "=", "(", "self", ".", "category", "==", "imgs", ".", "category", ")", "&", "(", "self", ".", "filenumber", "==", "img", ")", "&", "(", "on_image", ".", "astype", "(", "'bool'", ")", ")", "feat_vals", "[", "idx", "]", "=", "fmap", "[", "self", ".", "y", "[", "idx", "]", ".", "astype", "(", "'int'", ")", ",", "self", ".", "x", "[", "idx", "]", ".", "astype", "(", "'int'", ")", "]", "# setattr(self, feature, feat_vals)", "self", ".", "add_field", "(", "feature", ",", "feat_vals", ")" ]
Adds feature values of feature 'feature' to all fixations in the calling fixmat. For fixations out of the image boundaries, NaNs are returned. The function generates a new attribute field named with the string in features that contains an np.array listing feature values for every fixation in the fixmat. .. note:: The calling fixmat must have been constructed with an stimuli.Categories object Parameters: features : string list of feature names for which feature values are extracted.
[ "Adds", "feature", "values", "of", "feature", "feature", "to", "all", "fixations", "in", "the", "calling", "fixmat", ".", "For", "fixations", "out", "of", "the", "image", "boundaries", "NaNs", "are", "returned", ".", "The", "function", "generates", "a", "new", "attribute", "field", "named", "with", "the", "string", "in", "features", "that", "contains", "an", "np", ".", "array", "listing", "feature", "values", "for", "every", "fixation", "in", "the", "fixmat", ".", "..", "note", "::", "The", "calling", "fixmat", "must", "have", "been", "constructed", "with", "an", "stimuli", ".", "Categories", "object", "Parameters", ":", "features", ":", "string", "list", "of", "feature", "names", "for", "which", "feature", "values", "are", "extracted", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L19-L60
nwilming/ocupy
ocupy/fixmat.py
FixMat.make_reg_data
def make_reg_data(self, feature_list=None, all_controls=False): """ Generates two M x N matrices with M feature values at fixations for N features. Controls are a random sample out of all non-fixated regions of an image or fixations of the same subject group on a randomly chosen image. Fixations are pooled over all subjects in the calling fixmat. Parameters : all_controls : bool if True, all non-fixated points on a feature map are takes as control values. If False, controls are fixations from the same subjects but on one other randomly chosen image of the same category feature_list : list of strings contains names of all features that are used to generate the feature value matrix (--> number of dimensions in the model). ...note: this list has to be sorted ! Returns : N x M matrix of N control feature values per feature (M). Rows = Feature number /type Columns = Feature values """ if not 'x' in self.fieldnames(): raise RuntimeError("""make_reg_data expects to find (x,y) locations in self.x and self.y. But self.x does not exist""") on_image = (self.x >= 0) & (self.x <= self.image_size[1]) on_image = on_image & (self.y >= 0) & (self.y <= self.image_size[0]) assert on_image.all(), "All Fixations need to be on the image" assert len(np.unique(self.filenumber) > 1), "Fixmat has to have more than one filenumber" self.x = self.x.astype(int) self.y = self.y.astype(int) if feature_list == None: feature_list = np.sort(self._categories._features) all_act = np.zeros((len(feature_list), 1)) * np.nan all_ctrls = all_act.copy() for (cfm, imgs) in self.by_cat(): # make a list of all filenumbers in this category and then # choose one random filenumber without replacement imfiles = np.array(imgs.images()) # array makes a copy of the list ctrl_imgs = imfiles.copy() np.random.shuffle(ctrl_imgs) while (imfiles == ctrl_imgs).any(): np.random.shuffle(ctrl_imgs) for (imidx, img) in enumerate(imfiles): xact = cfm.x[cfm.filenumber == img] yact = cfm.y[cfm.filenumber == img] if all_controls: # take a sample the same length as the actuals out of every # non-fixated point in the feature map idx = np.ones(self.image_size) idx[cfm.y[cfm.filenumber == img], cfm.x[cfm.filenumber == img]] = 0 yctrl, xctrl = idx.nonzero() idx = np.random.randint(0, len(yctrl), len(xact)) yctrl = yctrl[idx] xctrl = xctrl[idx] del idx else: xctrl = cfm.x[cfm.filenumber == ctrl_imgs[imidx]] yctrl = cfm.y[cfm.filenumber == ctrl_imgs[imidx]] # initialize arrays for this filenumber actuals = np.zeros((1, len(xact))) * np.nan controls = np.zeros((1, len(xctrl))) * np.nan for feature in feature_list: # get the feature map fmap = imgs[img][feature] actuals = np.vstack((actuals, fmap[yact, xact])) controls = np.vstack((controls, fmap[yctrl, xctrl])) all_act = np.hstack((all_act, actuals[1:, :])) all_ctrls = np.hstack((all_ctrls, controls[1:, :])) return (all_act[:, 1:], all_ctrls[:, 1:])
python
def make_reg_data(self, feature_list=None, all_controls=False): """ Generates two M x N matrices with M feature values at fixations for N features. Controls are a random sample out of all non-fixated regions of an image or fixations of the same subject group on a randomly chosen image. Fixations are pooled over all subjects in the calling fixmat. Parameters : all_controls : bool if True, all non-fixated points on a feature map are takes as control values. If False, controls are fixations from the same subjects but on one other randomly chosen image of the same category feature_list : list of strings contains names of all features that are used to generate the feature value matrix (--> number of dimensions in the model). ...note: this list has to be sorted ! Returns : N x M matrix of N control feature values per feature (M). Rows = Feature number /type Columns = Feature values """ if not 'x' in self.fieldnames(): raise RuntimeError("""make_reg_data expects to find (x,y) locations in self.x and self.y. But self.x does not exist""") on_image = (self.x >= 0) & (self.x <= self.image_size[1]) on_image = on_image & (self.y >= 0) & (self.y <= self.image_size[0]) assert on_image.all(), "All Fixations need to be on the image" assert len(np.unique(self.filenumber) > 1), "Fixmat has to have more than one filenumber" self.x = self.x.astype(int) self.y = self.y.astype(int) if feature_list == None: feature_list = np.sort(self._categories._features) all_act = np.zeros((len(feature_list), 1)) * np.nan all_ctrls = all_act.copy() for (cfm, imgs) in self.by_cat(): # make a list of all filenumbers in this category and then # choose one random filenumber without replacement imfiles = np.array(imgs.images()) # array makes a copy of the list ctrl_imgs = imfiles.copy() np.random.shuffle(ctrl_imgs) while (imfiles == ctrl_imgs).any(): np.random.shuffle(ctrl_imgs) for (imidx, img) in enumerate(imfiles): xact = cfm.x[cfm.filenumber == img] yact = cfm.y[cfm.filenumber == img] if all_controls: # take a sample the same length as the actuals out of every # non-fixated point in the feature map idx = np.ones(self.image_size) idx[cfm.y[cfm.filenumber == img], cfm.x[cfm.filenumber == img]] = 0 yctrl, xctrl = idx.nonzero() idx = np.random.randint(0, len(yctrl), len(xact)) yctrl = yctrl[idx] xctrl = xctrl[idx] del idx else: xctrl = cfm.x[cfm.filenumber == ctrl_imgs[imidx]] yctrl = cfm.y[cfm.filenumber == ctrl_imgs[imidx]] # initialize arrays for this filenumber actuals = np.zeros((1, len(xact))) * np.nan controls = np.zeros((1, len(xctrl))) * np.nan for feature in feature_list: # get the feature map fmap = imgs[img][feature] actuals = np.vstack((actuals, fmap[yact, xact])) controls = np.vstack((controls, fmap[yctrl, xctrl])) all_act = np.hstack((all_act, actuals[1:, :])) all_ctrls = np.hstack((all_ctrls, controls[1:, :])) return (all_act[:, 1:], all_ctrls[:, 1:])
[ "def", "make_reg_data", "(", "self", ",", "feature_list", "=", "None", ",", "all_controls", "=", "False", ")", ":", "if", "not", "'x'", "in", "self", ".", "fieldnames", "(", ")", ":", "raise", "RuntimeError", "(", "\"\"\"make_reg_data expects to find\n (x,y) locations in self.x and self.y. But self.x does not exist\"\"\"", ")", "on_image", "=", "(", "self", ".", "x", ">=", "0", ")", "&", "(", "self", ".", "x", "<=", "self", ".", "image_size", "[", "1", "]", ")", "on_image", "=", "on_image", "&", "(", "self", ".", "y", ">=", "0", ")", "&", "(", "self", ".", "y", "<=", "self", ".", "image_size", "[", "0", "]", ")", "assert", "on_image", ".", "all", "(", ")", ",", "\"All Fixations need to be on the image\"", "assert", "len", "(", "np", ".", "unique", "(", "self", ".", "filenumber", ")", ">", "1", ")", ",", "\"Fixmat has to have more than one filenumber\"", "self", ".", "x", "=", "self", ".", "x", ".", "astype", "(", "int", ")", "self", ".", "y", "=", "self", ".", "y", ".", "astype", "(", "int", ")", "if", "feature_list", "==", "None", ":", "feature_list", "=", "np", ".", "sort", "(", "self", ".", "_categories", ".", "_features", ")", "all_act", "=", "np", ".", "zeros", "(", "(", "len", "(", "feature_list", ")", ",", "1", ")", ")", "*", "np", ".", "nan", "all_ctrls", "=", "all_act", ".", "copy", "(", ")", "for", "(", "cfm", ",", "imgs", ")", "in", "self", ".", "by_cat", "(", ")", ":", "# make a list of all filenumbers in this category and then ", "# choose one random filenumber without replacement", "imfiles", "=", "np", ".", "array", "(", "imgs", ".", "images", "(", ")", ")", "# array makes a copy of the list", "ctrl_imgs", "=", "imfiles", ".", "copy", "(", ")", "np", ".", "random", ".", "shuffle", "(", "ctrl_imgs", ")", "while", "(", "imfiles", "==", "ctrl_imgs", ")", ".", "any", "(", ")", ":", "np", ".", "random", ".", "shuffle", "(", "ctrl_imgs", ")", "for", "(", "imidx", ",", "img", ")", "in", "enumerate", "(", "imfiles", ")", ":", "xact", "=", "cfm", ".", "x", "[", "cfm", ".", "filenumber", "==", "img", "]", "yact", "=", "cfm", ".", "y", "[", "cfm", ".", "filenumber", "==", "img", "]", "if", "all_controls", ":", "# take a sample the same length as the actuals out of every ", "# non-fixated point in the feature map", "idx", "=", "np", ".", "ones", "(", "self", ".", "image_size", ")", "idx", "[", "cfm", ".", "y", "[", "cfm", ".", "filenumber", "==", "img", "]", ",", "cfm", ".", "x", "[", "cfm", ".", "filenumber", "==", "img", "]", "]", "=", "0", "yctrl", ",", "xctrl", "=", "idx", ".", "nonzero", "(", ")", "idx", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "len", "(", "yctrl", ")", ",", "len", "(", "xact", ")", ")", "yctrl", "=", "yctrl", "[", "idx", "]", "xctrl", "=", "xctrl", "[", "idx", "]", "del", "idx", "else", ":", "xctrl", "=", "cfm", ".", "x", "[", "cfm", ".", "filenumber", "==", "ctrl_imgs", "[", "imidx", "]", "]", "yctrl", "=", "cfm", ".", "y", "[", "cfm", ".", "filenumber", "==", "ctrl_imgs", "[", "imidx", "]", "]", "# initialize arrays for this filenumber", "actuals", "=", "np", ".", "zeros", "(", "(", "1", ",", "len", "(", "xact", ")", ")", ")", "*", "np", ".", "nan", "controls", "=", "np", ".", "zeros", "(", "(", "1", ",", "len", "(", "xctrl", ")", ")", ")", "*", "np", ".", "nan", "for", "feature", "in", "feature_list", ":", "# get the feature map", "fmap", "=", "imgs", "[", "img", "]", "[", "feature", "]", "actuals", "=", "np", ".", "vstack", "(", "(", "actuals", ",", "fmap", "[", "yact", ",", "xact", "]", ")", ")", "controls", "=", "np", ".", "vstack", "(", "(", "controls", ",", "fmap", "[", "yctrl", ",", "xctrl", "]", ")", ")", "all_act", "=", "np", ".", "hstack", "(", "(", "all_act", ",", "actuals", "[", "1", ":", ",", ":", "]", ")", ")", "all_ctrls", "=", "np", ".", "hstack", "(", "(", "all_ctrls", ",", "controls", "[", "1", ":", ",", ":", "]", ")", ")", "return", "(", "all_act", "[", ":", ",", "1", ":", "]", ",", "all_ctrls", "[", ":", ",", "1", ":", "]", ")" ]
Generates two M x N matrices with M feature values at fixations for N features. Controls are a random sample out of all non-fixated regions of an image or fixations of the same subject group on a randomly chosen image. Fixations are pooled over all subjects in the calling fixmat. Parameters : all_controls : bool if True, all non-fixated points on a feature map are takes as control values. If False, controls are fixations from the same subjects but on one other randomly chosen image of the same category feature_list : list of strings contains names of all features that are used to generate the feature value matrix (--> number of dimensions in the model). ...note: this list has to be sorted ! Returns : N x M matrix of N control feature values per feature (M). Rows = Feature number /type Columns = Feature values
[ "Generates", "two", "M", "x", "N", "matrices", "with", "M", "feature", "values", "at", "fixations", "for", "N", "features", ".", "Controls", "are", "a", "random", "sample", "out", "of", "all", "non", "-", "fixated", "regions", "of", "an", "image", "or", "fixations", "of", "the", "same", "subject", "group", "on", "a", "randomly", "chosen", "image", ".", "Fixations", "are", "pooled", "over", "all", "subjects", "in", "the", "calling", "fixmat", ".", "Parameters", ":", "all_controls", ":", "bool", "if", "True", "all", "non", "-", "fixated", "points", "on", "a", "feature", "map", "are", "takes", "as", "control", "values", ".", "If", "False", "controls", "are", "fixations", "from", "the", "same", "subjects", "but", "on", "one", "other", "randomly", "chosen", "image", "of", "the", "same", "category", "feature_list", ":", "list", "of", "strings", "contains", "names", "of", "all", "features", "that", "are", "used", "to", "generate", "the", "feature", "value", "matrix", "(", "--", ">", "number", "of", "dimensions", "in", "the", "model", ")", ".", "...", "note", ":", "this", "list", "has", "to", "be", "sorted", "!" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/fixmat.py#L62-L138
nwilming/ocupy
ocupy/samples2fix.py
get_velocity
def get_velocity(samplemat, Hz, blinks=None): ''' Compute velocity of eye-movements. Samplemat must contain fields 'x' and 'y', specifying the x,y coordinates of gaze location. The function assumes that the values in x,y are sampled continously at a rate specified by 'Hz'. ''' Hz = float(Hz) distance = ((np.diff(samplemat.x) ** 2) + (np.diff(samplemat.y) ** 2)) ** .5 distance = np.hstack(([distance[0]], distance)) if blinks is not None: distance[blinks[1:]] = np.nan win = np.ones((velocity_window_size)) / float(velocity_window_size) velocity = np.convolve(distance, win, mode='same') velocity = velocity / (velocity_window_size / Hz) acceleration = np.diff(velocity) / (1. / Hz) acceleration = abs(np.hstack(([acceleration[0]], acceleration))) return velocity, acceleration
python
def get_velocity(samplemat, Hz, blinks=None): ''' Compute velocity of eye-movements. Samplemat must contain fields 'x' and 'y', specifying the x,y coordinates of gaze location. The function assumes that the values in x,y are sampled continously at a rate specified by 'Hz'. ''' Hz = float(Hz) distance = ((np.diff(samplemat.x) ** 2) + (np.diff(samplemat.y) ** 2)) ** .5 distance = np.hstack(([distance[0]], distance)) if blinks is not None: distance[blinks[1:]] = np.nan win = np.ones((velocity_window_size)) / float(velocity_window_size) velocity = np.convolve(distance, win, mode='same') velocity = velocity / (velocity_window_size / Hz) acceleration = np.diff(velocity) / (1. / Hz) acceleration = abs(np.hstack(([acceleration[0]], acceleration))) return velocity, acceleration
[ "def", "get_velocity", "(", "samplemat", ",", "Hz", ",", "blinks", "=", "None", ")", ":", "Hz", "=", "float", "(", "Hz", ")", "distance", "=", "(", "(", "np", ".", "diff", "(", "samplemat", ".", "x", ")", "**", "2", ")", "+", "(", "np", ".", "diff", "(", "samplemat", ".", "y", ")", "**", "2", ")", ")", "**", ".5", "distance", "=", "np", ".", "hstack", "(", "(", "[", "distance", "[", "0", "]", "]", ",", "distance", ")", ")", "if", "blinks", "is", "not", "None", ":", "distance", "[", "blinks", "[", "1", ":", "]", "]", "=", "np", ".", "nan", "win", "=", "np", ".", "ones", "(", "(", "velocity_window_size", ")", ")", "/", "float", "(", "velocity_window_size", ")", "velocity", "=", "np", ".", "convolve", "(", "distance", ",", "win", ",", "mode", "=", "'same'", ")", "velocity", "=", "velocity", "/", "(", "velocity_window_size", "/", "Hz", ")", "acceleration", "=", "np", ".", "diff", "(", "velocity", ")", "/", "(", "1.", "/", "Hz", ")", "acceleration", "=", "abs", "(", "np", ".", "hstack", "(", "(", "[", "acceleration", "[", "0", "]", "]", ",", "acceleration", ")", ")", ")", "return", "velocity", ",", "acceleration" ]
Compute velocity of eye-movements. Samplemat must contain fields 'x' and 'y', specifying the x,y coordinates of gaze location. The function assumes that the values in x,y are sampled continously at a rate specified by 'Hz'.
[ "Compute", "velocity", "of", "eye", "-", "movements", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/samples2fix.py#L11-L30
nwilming/ocupy
ocupy/samples2fix.py
saccade_detection
def saccade_detection(samplemat, Hz=200, threshold=30, acc_thresh=2000, min_duration=21, min_movement=.35, ignore_blinks=False): ''' Detect saccades in a stream of gaze location samples. Coordinates in samplemat are assumed to be in degrees. Saccades are detect by a velocity/acceleration threshold approach. A saccade starts when a) the velocity is above threshold, b) the acceleration is above acc_thresh at least once during the interval defined by the velocity threshold, c) the saccade lasts at least min_duration ms and d) the distance between saccade start and enpoint is at least min_movement degrees. ''' if ignore_blinks: velocity, acceleration = get_velocity(samplemat, float(Hz), blinks=samplemat.blinks) else: velocity, acceleration = get_velocity(samplemat, float(Hz)) saccades = (velocity > threshold) #print velocity[samplemat.blinks[1:]] #print saccades[samplemat.blinks[1:]] borders = np.where(np.diff(saccades.astype(int)))[0] + 1 if velocity[1] > threshold: borders = np.hstack(([0], borders)) saccade = 0 * np.ones(samplemat.x.shape) # Only count saccades when acceleration also surpasses threshold for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): if sum(acceleration[start:end] > acc_thresh) >= 1: saccade[start:end] = 1 borders = np.where(np.diff(saccade.astype(int)))[0] + 1 if saccade[0] == 0: borders = np.hstack(([0], borders)) for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): if (1000*(end - start) / float(Hz)) < (min_duration): saccade[start:end] = 1 # Delete saccade between fixations that are too close together. dists_ok = False while not dists_ok: dists_ok = True num_merges = 0 for i, (lfixstart, lfixend, start, end, nfixstart, nfixend) in enumerate(zip( borders[0::2], borders[1::2], borders[1::2], borders[2::2], borders[2::2], borders[3::2])): lastx = samplemat.x[lfixstart:lfixend].mean() lasty = samplemat.y[lfixstart:lfixend].mean() nextx = samplemat.x[nfixstart:nfixend].mean() nexty = samplemat.y[nfixstart:nfixend].mean() if (1000*(lfixend - lfixstart) / float(Hz)) < (min_duration): saccade[lfixstart:lfixend] = 1 continue distance = ((nextx - lastx) ** 2 + (nexty - lasty) ** 2) ** .5 if distance < min_movement: num_merges += 1 dists_ok = False saccade[start:end] = 0 borders = np.where(np.diff(saccade.astype(int)))[0] + 1 if saccade[0] == 0: borders = np.hstack(([0], borders)) return saccade.astype(bool)
python
def saccade_detection(samplemat, Hz=200, threshold=30, acc_thresh=2000, min_duration=21, min_movement=.35, ignore_blinks=False): ''' Detect saccades in a stream of gaze location samples. Coordinates in samplemat are assumed to be in degrees. Saccades are detect by a velocity/acceleration threshold approach. A saccade starts when a) the velocity is above threshold, b) the acceleration is above acc_thresh at least once during the interval defined by the velocity threshold, c) the saccade lasts at least min_duration ms and d) the distance between saccade start and enpoint is at least min_movement degrees. ''' if ignore_blinks: velocity, acceleration = get_velocity(samplemat, float(Hz), blinks=samplemat.blinks) else: velocity, acceleration = get_velocity(samplemat, float(Hz)) saccades = (velocity > threshold) #print velocity[samplemat.blinks[1:]] #print saccades[samplemat.blinks[1:]] borders = np.where(np.diff(saccades.astype(int)))[0] + 1 if velocity[1] > threshold: borders = np.hstack(([0], borders)) saccade = 0 * np.ones(samplemat.x.shape) # Only count saccades when acceleration also surpasses threshold for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): if sum(acceleration[start:end] > acc_thresh) >= 1: saccade[start:end] = 1 borders = np.where(np.diff(saccade.astype(int)))[0] + 1 if saccade[0] == 0: borders = np.hstack(([0], borders)) for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): if (1000*(end - start) / float(Hz)) < (min_duration): saccade[start:end] = 1 # Delete saccade between fixations that are too close together. dists_ok = False while not dists_ok: dists_ok = True num_merges = 0 for i, (lfixstart, lfixend, start, end, nfixstart, nfixend) in enumerate(zip( borders[0::2], borders[1::2], borders[1::2], borders[2::2], borders[2::2], borders[3::2])): lastx = samplemat.x[lfixstart:lfixend].mean() lasty = samplemat.y[lfixstart:lfixend].mean() nextx = samplemat.x[nfixstart:nfixend].mean() nexty = samplemat.y[nfixstart:nfixend].mean() if (1000*(lfixend - lfixstart) / float(Hz)) < (min_duration): saccade[lfixstart:lfixend] = 1 continue distance = ((nextx - lastx) ** 2 + (nexty - lasty) ** 2) ** .5 if distance < min_movement: num_merges += 1 dists_ok = False saccade[start:end] = 0 borders = np.where(np.diff(saccade.astype(int)))[0] + 1 if saccade[0] == 0: borders = np.hstack(([0], borders)) return saccade.astype(bool)
[ "def", "saccade_detection", "(", "samplemat", ",", "Hz", "=", "200", ",", "threshold", "=", "30", ",", "acc_thresh", "=", "2000", ",", "min_duration", "=", "21", ",", "min_movement", "=", ".35", ",", "ignore_blinks", "=", "False", ")", ":", "if", "ignore_blinks", ":", "velocity", ",", "acceleration", "=", "get_velocity", "(", "samplemat", ",", "float", "(", "Hz", ")", ",", "blinks", "=", "samplemat", ".", "blinks", ")", "else", ":", "velocity", ",", "acceleration", "=", "get_velocity", "(", "samplemat", ",", "float", "(", "Hz", ")", ")", "saccades", "=", "(", "velocity", ">", "threshold", ")", "#print velocity[samplemat.blinks[1:]]", "#print saccades[samplemat.blinks[1:]]", "borders", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "saccades", ".", "astype", "(", "int", ")", ")", ")", "[", "0", "]", "+", "1", "if", "velocity", "[", "1", "]", ">", "threshold", ":", "borders", "=", "np", ".", "hstack", "(", "(", "[", "0", "]", ",", "borders", ")", ")", "saccade", "=", "0", "*", "np", ".", "ones", "(", "samplemat", ".", "x", ".", "shape", ")", "# Only count saccades when acceleration also surpasses threshold", "for", "i", ",", "(", "start", ",", "end", ")", "in", "enumerate", "(", "zip", "(", "borders", "[", "0", ":", ":", "2", "]", ",", "borders", "[", "1", ":", ":", "2", "]", ")", ")", ":", "if", "sum", "(", "acceleration", "[", "start", ":", "end", "]", ">", "acc_thresh", ")", ">=", "1", ":", "saccade", "[", "start", ":", "end", "]", "=", "1", "borders", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "saccade", ".", "astype", "(", "int", ")", ")", ")", "[", "0", "]", "+", "1", "if", "saccade", "[", "0", "]", "==", "0", ":", "borders", "=", "np", ".", "hstack", "(", "(", "[", "0", "]", ",", "borders", ")", ")", "for", "i", ",", "(", "start", ",", "end", ")", "in", "enumerate", "(", "zip", "(", "borders", "[", "0", ":", ":", "2", "]", ",", "borders", "[", "1", ":", ":", "2", "]", ")", ")", ":", "if", "(", "1000", "*", "(", "end", "-", "start", ")", "/", "float", "(", "Hz", ")", ")", "<", "(", "min_duration", ")", ":", "saccade", "[", "start", ":", "end", "]", "=", "1", "# Delete saccade between fixations that are too close together.", "dists_ok", "=", "False", "while", "not", "dists_ok", ":", "dists_ok", "=", "True", "num_merges", "=", "0", "for", "i", ",", "(", "lfixstart", ",", "lfixend", ",", "start", ",", "end", ",", "nfixstart", ",", "nfixend", ")", "in", "enumerate", "(", "zip", "(", "borders", "[", "0", ":", ":", "2", "]", ",", "borders", "[", "1", ":", ":", "2", "]", ",", "borders", "[", "1", ":", ":", "2", "]", ",", "borders", "[", "2", ":", ":", "2", "]", ",", "borders", "[", "2", ":", ":", "2", "]", ",", "borders", "[", "3", ":", ":", "2", "]", ")", ")", ":", "lastx", "=", "samplemat", ".", "x", "[", "lfixstart", ":", "lfixend", "]", ".", "mean", "(", ")", "lasty", "=", "samplemat", ".", "y", "[", "lfixstart", ":", "lfixend", "]", ".", "mean", "(", ")", "nextx", "=", "samplemat", ".", "x", "[", "nfixstart", ":", "nfixend", "]", ".", "mean", "(", ")", "nexty", "=", "samplemat", ".", "y", "[", "nfixstart", ":", "nfixend", "]", ".", "mean", "(", ")", "if", "(", "1000", "*", "(", "lfixend", "-", "lfixstart", ")", "/", "float", "(", "Hz", ")", ")", "<", "(", "min_duration", ")", ":", "saccade", "[", "lfixstart", ":", "lfixend", "]", "=", "1", "continue", "distance", "=", "(", "(", "nextx", "-", "lastx", ")", "**", "2", "+", "(", "nexty", "-", "lasty", ")", "**", "2", ")", "**", ".5", "if", "distance", "<", "min_movement", ":", "num_merges", "+=", "1", "dists_ok", "=", "False", "saccade", "[", "start", ":", "end", "]", "=", "0", "borders", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "saccade", ".", "astype", "(", "int", ")", ")", ")", "[", "0", "]", "+", "1", "if", "saccade", "[", "0", "]", "==", "0", ":", "borders", "=", "np", ".", "hstack", "(", "(", "[", "0", "]", ",", "borders", ")", ")", "return", "saccade", ".", "astype", "(", "bool", ")" ]
Detect saccades in a stream of gaze location samples. Coordinates in samplemat are assumed to be in degrees. Saccades are detect by a velocity/acceleration threshold approach. A saccade starts when a) the velocity is above threshold, b) the acceleration is above acc_thresh at least once during the interval defined by the velocity threshold, c) the saccade lasts at least min_duration ms and d) the distance between saccade start and enpoint is at least min_movement degrees.
[ "Detect", "saccades", "in", "a", "stream", "of", "gaze", "location", "samples", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/samples2fix.py#L33-L98
nwilming/ocupy
ocupy/samples2fix.py
fixation_detection
def fixation_detection(samplemat, saccades, Hz=200, samples2fix=None, respect_trial_borders=False, sample_times=None): ''' Detect Fixation from saccades. Fixations are defined as intervals between saccades. This function also calcuates start and end times (in ms) for each fixation. Input: samplemat: datamat Contains the recorded samples and associated metadata. saccades: ndarray Logical vector that is True for samples that belong to a saccade. Hz: Float Number of samples per second. samples2fix: Dict There is usually metadata associated with the samples (e.g. the trial number). This dictionary can be used to specify how the metadata should be collapsed for one fixation. It contains field names from samplemat as keys and functions as values that return one value when they are called with all samples for one fixation. In addition the function can raise an 'InvalidFixation' exception to signal that the fixation should be discarded. ''' if samples2fix is None: samples2fix = {} fixations = ~saccades acc = AccumulatorFactory() if not respect_trial_borders: borders = np.where(np.diff(fixations.astype(int)))[0] + 1 else: borders = np.where( ~(np.diff(fixations.astype(int)) == 0) | ~(np.diff(samplemat.trial.astype(int)) == 0))[0] + 1 fixations = 0 * saccades.copy() if not saccades[0]: borders = np.hstack(([0], borders)) #lasts,laste = borders[0], borders[1] for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): current = {} for k in samplemat.fieldnames(): if k in list(samples2fix.keys()): current[k] = samples2fix[k](samplemat, k, start, end) else: current[k] = np.mean(samplemat.field(k)[start:end]) current['start_sample'] = start current['end_sample'] = end fixations[start:end] = 1 # Calculate start and end time in ms if sample_times is None: current['start'] = 1000 * start / Hz current['end'] = 1000 * end / Hz else: current['start'] = sample_times[start] current['end'] = sample_times[end] #lasts, laste = start,end acc.update(current) return acc.get_dm(params=samplemat.parameters()), fixations.astype(bool)
python
def fixation_detection(samplemat, saccades, Hz=200, samples2fix=None, respect_trial_borders=False, sample_times=None): ''' Detect Fixation from saccades. Fixations are defined as intervals between saccades. This function also calcuates start and end times (in ms) for each fixation. Input: samplemat: datamat Contains the recorded samples and associated metadata. saccades: ndarray Logical vector that is True for samples that belong to a saccade. Hz: Float Number of samples per second. samples2fix: Dict There is usually metadata associated with the samples (e.g. the trial number). This dictionary can be used to specify how the metadata should be collapsed for one fixation. It contains field names from samplemat as keys and functions as values that return one value when they are called with all samples for one fixation. In addition the function can raise an 'InvalidFixation' exception to signal that the fixation should be discarded. ''' if samples2fix is None: samples2fix = {} fixations = ~saccades acc = AccumulatorFactory() if not respect_trial_borders: borders = np.where(np.diff(fixations.astype(int)))[0] + 1 else: borders = np.where( ~(np.diff(fixations.astype(int)) == 0) | ~(np.diff(samplemat.trial.astype(int)) == 0))[0] + 1 fixations = 0 * saccades.copy() if not saccades[0]: borders = np.hstack(([0], borders)) #lasts,laste = borders[0], borders[1] for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): current = {} for k in samplemat.fieldnames(): if k in list(samples2fix.keys()): current[k] = samples2fix[k](samplemat, k, start, end) else: current[k] = np.mean(samplemat.field(k)[start:end]) current['start_sample'] = start current['end_sample'] = end fixations[start:end] = 1 # Calculate start and end time in ms if sample_times is None: current['start'] = 1000 * start / Hz current['end'] = 1000 * end / Hz else: current['start'] = sample_times[start] current['end'] = sample_times[end] #lasts, laste = start,end acc.update(current) return acc.get_dm(params=samplemat.parameters()), fixations.astype(bool)
[ "def", "fixation_detection", "(", "samplemat", ",", "saccades", ",", "Hz", "=", "200", ",", "samples2fix", "=", "None", ",", "respect_trial_borders", "=", "False", ",", "sample_times", "=", "None", ")", ":", "if", "samples2fix", "is", "None", ":", "samples2fix", "=", "{", "}", "fixations", "=", "~", "saccades", "acc", "=", "AccumulatorFactory", "(", ")", "if", "not", "respect_trial_borders", ":", "borders", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "fixations", ".", "astype", "(", "int", ")", ")", ")", "[", "0", "]", "+", "1", "else", ":", "borders", "=", "np", ".", "where", "(", "~", "(", "np", ".", "diff", "(", "fixations", ".", "astype", "(", "int", ")", ")", "==", "0", ")", "|", "~", "(", "np", ".", "diff", "(", "samplemat", ".", "trial", ".", "astype", "(", "int", ")", ")", "==", "0", ")", ")", "[", "0", "]", "+", "1", "fixations", "=", "0", "*", "saccades", ".", "copy", "(", ")", "if", "not", "saccades", "[", "0", "]", ":", "borders", "=", "np", ".", "hstack", "(", "(", "[", "0", "]", ",", "borders", ")", ")", "#lasts,laste = borders[0], borders[1]", "for", "i", ",", "(", "start", ",", "end", ")", "in", "enumerate", "(", "zip", "(", "borders", "[", "0", ":", ":", "2", "]", ",", "borders", "[", "1", ":", ":", "2", "]", ")", ")", ":", "current", "=", "{", "}", "for", "k", "in", "samplemat", ".", "fieldnames", "(", ")", ":", "if", "k", "in", "list", "(", "samples2fix", ".", "keys", "(", ")", ")", ":", "current", "[", "k", "]", "=", "samples2fix", "[", "k", "]", "(", "samplemat", ",", "k", ",", "start", ",", "end", ")", "else", ":", "current", "[", "k", "]", "=", "np", ".", "mean", "(", "samplemat", ".", "field", "(", "k", ")", "[", "start", ":", "end", "]", ")", "current", "[", "'start_sample'", "]", "=", "start", "current", "[", "'end_sample'", "]", "=", "end", "fixations", "[", "start", ":", "end", "]", "=", "1", "# Calculate start and end time in ms", "if", "sample_times", "is", "None", ":", "current", "[", "'start'", "]", "=", "1000", "*", "start", "/", "Hz", "current", "[", "'end'", "]", "=", "1000", "*", "end", "/", "Hz", "else", ":", "current", "[", "'start'", "]", "=", "sample_times", "[", "start", "]", "current", "[", "'end'", "]", "=", "sample_times", "[", "end", "]", "#lasts, laste = start,end", "acc", ".", "update", "(", "current", ")", "return", "acc", ".", "get_dm", "(", "params", "=", "samplemat", ".", "parameters", "(", ")", ")", ",", "fixations", ".", "astype", "(", "bool", ")" ]
Detect Fixation from saccades. Fixations are defined as intervals between saccades. This function also calcuates start and end times (in ms) for each fixation. Input: samplemat: datamat Contains the recorded samples and associated metadata. saccades: ndarray Logical vector that is True for samples that belong to a saccade. Hz: Float Number of samples per second. samples2fix: Dict There is usually metadata associated with the samples (e.g. the trial number). This dictionary can be used to specify how the metadata should be collapsed for one fixation. It contains field names from samplemat as keys and functions as values that return one value when they are called with all samples for one fixation. In addition the function can raise an 'InvalidFixation' exception to signal that the fixation should be discarded.
[ "Detect", "Fixation", "from", "saccades", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/samples2fix.py#L101-L161
kurtmckee/listparser
listparser/__init__.py
parse
def parse(parse_obj, agent=None, etag=None, modified=None, inject=False): """Parse a subscription list and return a dict containing the results. :param parse_obj: A file-like object or a string containing a URL, an absolute or relative filename, or an XML document. :type parse_obj: str or file :param agent: User-Agent header to be sent when requesting a URL :type agent: str :param etag: The ETag header to be sent when requesting a URL. :type etag: str :param modified: The Last-Modified header to be sent when requesting a URL. :type modified: str or datetime.datetime :returns: All of the parsed information, webserver HTTP response headers, and any exception encountered. :rtype: dict :py:func:`~listparser.parse` is the only public function exposed by listparser. If *parse_obj* is a URL, the *agent* will identify the software making the request, *etag* will identify the last HTTP ETag header returned by the webserver, and *modified* will identify the last HTTP Last-Modified header returned by the webserver. *agent* and *etag* must be strings, while *modified* can be either a string or a Python *datetime.datetime* object. If *agent* is not provided, the :py:data:`~listparser.USER_AGENT` global variable will be used by default. """ guarantees = common.SuperDict({ 'bozo': 0, 'feeds': [], 'lists': [], 'opportunities': [], 'meta': common.SuperDict(), 'version': '', }) fileobj, info = _mkfile(parse_obj, (agent or USER_AGENT), etag, modified) guarantees.update(info) if not fileobj: return guarantees handler = Handler() handler.harvest.update(guarantees) parser = xml.sax.make_parser() parser.setFeature(xml.sax.handler.feature_namespaces, True) parser.setContentHandler(handler) parser.setErrorHandler(handler) if inject: fileobj = Injector(fileobj) try: parser.parse(fileobj) except (SAXParseException, MalformedByteSequenceException): # noqa: E501 # pragma: no cover # Jython propagates exceptions past the ErrorHandler. err = sys.exc_info()[1] handler.harvest.bozo = 1 handler.harvest.bozo_exception = err finally: fileobj.close() # Test if a DOCTYPE injection is needed if hasattr(handler.harvest, 'bozo_exception'): if 'entity' in handler.harvest.bozo_exception.__str__(): if not inject: return parse(parse_obj, agent, etag, modified, True) # Make it clear that the XML file is broken # (if no other exception has been assigned) if inject and not handler.harvest.bozo: handler.harvest.bozo = 1 handler.harvest.bozo_exception = ListError('undefined entity found') return handler.harvest
python
def parse(parse_obj, agent=None, etag=None, modified=None, inject=False): """Parse a subscription list and return a dict containing the results. :param parse_obj: A file-like object or a string containing a URL, an absolute or relative filename, or an XML document. :type parse_obj: str or file :param agent: User-Agent header to be sent when requesting a URL :type agent: str :param etag: The ETag header to be sent when requesting a URL. :type etag: str :param modified: The Last-Modified header to be sent when requesting a URL. :type modified: str or datetime.datetime :returns: All of the parsed information, webserver HTTP response headers, and any exception encountered. :rtype: dict :py:func:`~listparser.parse` is the only public function exposed by listparser. If *parse_obj* is a URL, the *agent* will identify the software making the request, *etag* will identify the last HTTP ETag header returned by the webserver, and *modified* will identify the last HTTP Last-Modified header returned by the webserver. *agent* and *etag* must be strings, while *modified* can be either a string or a Python *datetime.datetime* object. If *agent* is not provided, the :py:data:`~listparser.USER_AGENT` global variable will be used by default. """ guarantees = common.SuperDict({ 'bozo': 0, 'feeds': [], 'lists': [], 'opportunities': [], 'meta': common.SuperDict(), 'version': '', }) fileobj, info = _mkfile(parse_obj, (agent or USER_AGENT), etag, modified) guarantees.update(info) if not fileobj: return guarantees handler = Handler() handler.harvest.update(guarantees) parser = xml.sax.make_parser() parser.setFeature(xml.sax.handler.feature_namespaces, True) parser.setContentHandler(handler) parser.setErrorHandler(handler) if inject: fileobj = Injector(fileobj) try: parser.parse(fileobj) except (SAXParseException, MalformedByteSequenceException): # noqa: E501 # pragma: no cover # Jython propagates exceptions past the ErrorHandler. err = sys.exc_info()[1] handler.harvest.bozo = 1 handler.harvest.bozo_exception = err finally: fileobj.close() # Test if a DOCTYPE injection is needed if hasattr(handler.harvest, 'bozo_exception'): if 'entity' in handler.harvest.bozo_exception.__str__(): if not inject: return parse(parse_obj, agent, etag, modified, True) # Make it clear that the XML file is broken # (if no other exception has been assigned) if inject and not handler.harvest.bozo: handler.harvest.bozo = 1 handler.harvest.bozo_exception = ListError('undefined entity found') return handler.harvest
[ "def", "parse", "(", "parse_obj", ",", "agent", "=", "None", ",", "etag", "=", "None", ",", "modified", "=", "None", ",", "inject", "=", "False", ")", ":", "guarantees", "=", "common", ".", "SuperDict", "(", "{", "'bozo'", ":", "0", ",", "'feeds'", ":", "[", "]", ",", "'lists'", ":", "[", "]", ",", "'opportunities'", ":", "[", "]", ",", "'meta'", ":", "common", ".", "SuperDict", "(", ")", ",", "'version'", ":", "''", ",", "}", ")", "fileobj", ",", "info", "=", "_mkfile", "(", "parse_obj", ",", "(", "agent", "or", "USER_AGENT", ")", ",", "etag", ",", "modified", ")", "guarantees", ".", "update", "(", "info", ")", "if", "not", "fileobj", ":", "return", "guarantees", "handler", "=", "Handler", "(", ")", "handler", ".", "harvest", ".", "update", "(", "guarantees", ")", "parser", "=", "xml", ".", "sax", ".", "make_parser", "(", ")", "parser", ".", "setFeature", "(", "xml", ".", "sax", ".", "handler", ".", "feature_namespaces", ",", "True", ")", "parser", ".", "setContentHandler", "(", "handler", ")", "parser", ".", "setErrorHandler", "(", "handler", ")", "if", "inject", ":", "fileobj", "=", "Injector", "(", "fileobj", ")", "try", ":", "parser", ".", "parse", "(", "fileobj", ")", "except", "(", "SAXParseException", ",", "MalformedByteSequenceException", ")", ":", "# noqa: E501 # pragma: no cover", "# Jython propagates exceptions past the ErrorHandler.", "err", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "handler", ".", "harvest", ".", "bozo", "=", "1", "handler", ".", "harvest", ".", "bozo_exception", "=", "err", "finally", ":", "fileobj", ".", "close", "(", ")", "# Test if a DOCTYPE injection is needed", "if", "hasattr", "(", "handler", ".", "harvest", ",", "'bozo_exception'", ")", ":", "if", "'entity'", "in", "handler", ".", "harvest", ".", "bozo_exception", ".", "__str__", "(", ")", ":", "if", "not", "inject", ":", "return", "parse", "(", "parse_obj", ",", "agent", ",", "etag", ",", "modified", ",", "True", ")", "# Make it clear that the XML file is broken", "# (if no other exception has been assigned)", "if", "inject", "and", "not", "handler", ".", "harvest", ".", "bozo", ":", "handler", ".", "harvest", ".", "bozo", "=", "1", "handler", ".", "harvest", ".", "bozo_exception", "=", "ListError", "(", "'undefined entity found'", ")", "return", "handler", ".", "harvest" ]
Parse a subscription list and return a dict containing the results. :param parse_obj: A file-like object or a string containing a URL, an absolute or relative filename, or an XML document. :type parse_obj: str or file :param agent: User-Agent header to be sent when requesting a URL :type agent: str :param etag: The ETag header to be sent when requesting a URL. :type etag: str :param modified: The Last-Modified header to be sent when requesting a URL. :type modified: str or datetime.datetime :returns: All of the parsed information, webserver HTTP response headers, and any exception encountered. :rtype: dict :py:func:`~listparser.parse` is the only public function exposed by listparser. If *parse_obj* is a URL, the *agent* will identify the software making the request, *etag* will identify the last HTTP ETag header returned by the webserver, and *modified* will identify the last HTTP Last-Modified header returned by the webserver. *agent* and *etag* must be strings, while *modified* can be either a string or a Python *datetime.datetime* object. If *agent* is not provided, the :py:data:`~listparser.USER_AGENT` global variable will be used by default.
[ "Parse", "a", "subscription", "list", "and", "return", "a", "dict", "containing", "the", "results", "." ]
train
https://github.com/kurtmckee/listparser/blob/f9bc310a0ce567cd0611fea68be99974021f53c7/listparser/__init__.py#L71-L144
nwilming/ocupy
ocupy/stimuli.py
FixmatStimuliFactory
def FixmatStimuliFactory(fm, loader): """ Constructs an categories object for all image / category combinations in the fixmat. Parameters: fm: FixMat Used for extracting valid category/image combination. loader: loader Loader that accesses the stimuli for this fixmat Returns: Categories object """ # Find all feature names features = [] if loader.ftrpath: assert os.access(loader.ftrpath, os.R_OK) features = os.listdir(os.path.join(loader.ftrpath, str(fm.category[0]))) # Find all images in all categories img_per_cat = {} for cat in np.unique(fm.category): if not loader.test_for_category(cat): raise ValueError('Category %s is specified in fixmat but '%( str(cat) + 'can not be located by loader')) img_per_cat[cat] = [] for img in np.unique(fm[(fm.category == cat)].filenumber): if not loader.test_for_image(cat, img): raise ValueError('Image %s in category %s is '%(str(cat), str(img)) + 'specified in fixmat but can be located by loader') img_per_cat[cat].append(img) if loader.ftrpath: for feature in features: if not loader.test_for_feature(cat, img, feature): raise RuntimeError( 'Feature %s for image %s' %(str(feature),str(img)) + ' in category %s ' %str(cat) + 'can not be located by loader') return Categories(loader, img_per_cat = img_per_cat, features = features, fixations = fm)
python
def FixmatStimuliFactory(fm, loader): """ Constructs an categories object for all image / category combinations in the fixmat. Parameters: fm: FixMat Used for extracting valid category/image combination. loader: loader Loader that accesses the stimuli for this fixmat Returns: Categories object """ # Find all feature names features = [] if loader.ftrpath: assert os.access(loader.ftrpath, os.R_OK) features = os.listdir(os.path.join(loader.ftrpath, str(fm.category[0]))) # Find all images in all categories img_per_cat = {} for cat in np.unique(fm.category): if not loader.test_for_category(cat): raise ValueError('Category %s is specified in fixmat but '%( str(cat) + 'can not be located by loader')) img_per_cat[cat] = [] for img in np.unique(fm[(fm.category == cat)].filenumber): if not loader.test_for_image(cat, img): raise ValueError('Image %s in category %s is '%(str(cat), str(img)) + 'specified in fixmat but can be located by loader') img_per_cat[cat].append(img) if loader.ftrpath: for feature in features: if not loader.test_for_feature(cat, img, feature): raise RuntimeError( 'Feature %s for image %s' %(str(feature),str(img)) + ' in category %s ' %str(cat) + 'can not be located by loader') return Categories(loader, img_per_cat = img_per_cat, features = features, fixations = fm)
[ "def", "FixmatStimuliFactory", "(", "fm", ",", "loader", ")", ":", "# Find all feature names", "features", "=", "[", "]", "if", "loader", ".", "ftrpath", ":", "assert", "os", ".", "access", "(", "loader", ".", "ftrpath", ",", "os", ".", "R_OK", ")", "features", "=", "os", ".", "listdir", "(", "os", ".", "path", ".", "join", "(", "loader", ".", "ftrpath", ",", "str", "(", "fm", ".", "category", "[", "0", "]", ")", ")", ")", "# Find all images in all categories ", "img_per_cat", "=", "{", "}", "for", "cat", "in", "np", ".", "unique", "(", "fm", ".", "category", ")", ":", "if", "not", "loader", ".", "test_for_category", "(", "cat", ")", ":", "raise", "ValueError", "(", "'Category %s is specified in fixmat but '", "%", "(", "str", "(", "cat", ")", "+", "'can not be located by loader'", ")", ")", "img_per_cat", "[", "cat", "]", "=", "[", "]", "for", "img", "in", "np", ".", "unique", "(", "fm", "[", "(", "fm", ".", "category", "==", "cat", ")", "]", ".", "filenumber", ")", ":", "if", "not", "loader", ".", "test_for_image", "(", "cat", ",", "img", ")", ":", "raise", "ValueError", "(", "'Image %s in category %s is '", "%", "(", "str", "(", "cat", ")", ",", "str", "(", "img", ")", ")", "+", "'specified in fixmat but can be located by loader'", ")", "img_per_cat", "[", "cat", "]", ".", "append", "(", "img", ")", "if", "loader", ".", "ftrpath", ":", "for", "feature", "in", "features", ":", "if", "not", "loader", ".", "test_for_feature", "(", "cat", ",", "img", ",", "feature", ")", ":", "raise", "RuntimeError", "(", "'Feature %s for image %s'", "%", "(", "str", "(", "feature", ")", ",", "str", "(", "img", ")", ")", "+", "' in category %s '", "%", "str", "(", "cat", ")", "+", "'can not be located by loader'", ")", "return", "Categories", "(", "loader", ",", "img_per_cat", "=", "img_per_cat", ",", "features", "=", "features", ",", "fixations", "=", "fm", ")" ]
Constructs an categories object for all image / category combinations in the fixmat. Parameters: fm: FixMat Used for extracting valid category/image combination. loader: loader Loader that accesses the stimuli for this fixmat Returns: Categories object
[ "Constructs", "an", "categories", "object", "for", "all", "image", "/", "category", "combinations", "in", "the", "fixmat", ".", "Parameters", ":", "fm", ":", "FixMat", "Used", "for", "extracting", "valid", "category", "/", "image", "combination", ".", "loader", ":", "loader", "Loader", "that", "accesses", "the", "stimuli", "for", "this", "fixmat", "Returns", ":", "Categories", "object" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/stimuli.py#L177-L217
nwilming/ocupy
ocupy/stimuli.py
DirectoryStimuliFactory
def DirectoryStimuliFactory(loader): """ Takes an input path to the images folder of an experiment and generates automatically the category - filenumber list needed to construct an appropriate _categories object. Parameters : loader : Loader object which contains impath : string path to the input, i.e. image-, files of the experiment. All subfolders in that path will be treated as categories. If no subfolders are present, category 1 will be assigned and all files in the folder are considered input images. Images have to end in '.png'. ftrpath : string path to the feature folder. It is expected that the folder structure corresponds to the structure in impath, i.e. ftrpath/category/featurefolder/featuremap.mat Furthermore, features are assumed to be the same for all categories. """ impath = loader.impath ftrpath = loader.ftrpath # checks whether user has reading permission for the path assert os.access(impath, os.R_OK) assert os.access(ftrpath, os.R_OK) # EXTRACTING IMAGE NAMES img_per_cat = {} # extract only directories in the given folder subfolders = [name for name in os.listdir(impath) if os.path.isdir( os.path.join(impath, name))] # if there are no subfolders, walk through files. Take 1 as key for the # categories object if not subfolders: [_, _, files] = next(os.walk(os.path.join(impath))) # this only takes entries that end with '.png' entries = {1: [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if cur_file.endswith('.png')]} img_per_cat.update(entries) subfolders = [''] # if there are subfolders, walk through them else: for directory in subfolders: [_, _, files] = next(os.walk(os.path.join(impath, directory))) # this only takes entries that end with '.png'. Strips ending and # considers everything after the first '_' as the imagenumber imagenumbers = [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if (cur_file.endswith('.png') & (len(cur_file) > 4))] entries = {int(directory): imagenumbers} img_per_cat.update(entries) del directory del imagenumbers # in case subfolders do not exist, '' is appended here. _, features, files = next(os.walk(os.path.join(ftrpath, subfolders[0]))) return Categories(loader, img_per_cat = img_per_cat, features = features)
python
def DirectoryStimuliFactory(loader): """ Takes an input path to the images folder of an experiment and generates automatically the category - filenumber list needed to construct an appropriate _categories object. Parameters : loader : Loader object which contains impath : string path to the input, i.e. image-, files of the experiment. All subfolders in that path will be treated as categories. If no subfolders are present, category 1 will be assigned and all files in the folder are considered input images. Images have to end in '.png'. ftrpath : string path to the feature folder. It is expected that the folder structure corresponds to the structure in impath, i.e. ftrpath/category/featurefolder/featuremap.mat Furthermore, features are assumed to be the same for all categories. """ impath = loader.impath ftrpath = loader.ftrpath # checks whether user has reading permission for the path assert os.access(impath, os.R_OK) assert os.access(ftrpath, os.R_OK) # EXTRACTING IMAGE NAMES img_per_cat = {} # extract only directories in the given folder subfolders = [name for name in os.listdir(impath) if os.path.isdir( os.path.join(impath, name))] # if there are no subfolders, walk through files. Take 1 as key for the # categories object if not subfolders: [_, _, files] = next(os.walk(os.path.join(impath))) # this only takes entries that end with '.png' entries = {1: [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if cur_file.endswith('.png')]} img_per_cat.update(entries) subfolders = [''] # if there are subfolders, walk through them else: for directory in subfolders: [_, _, files] = next(os.walk(os.path.join(impath, directory))) # this only takes entries that end with '.png'. Strips ending and # considers everything after the first '_' as the imagenumber imagenumbers = [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if (cur_file.endswith('.png') & (len(cur_file) > 4))] entries = {int(directory): imagenumbers} img_per_cat.update(entries) del directory del imagenumbers # in case subfolders do not exist, '' is appended here. _, features, files = next(os.walk(os.path.join(ftrpath, subfolders[0]))) return Categories(loader, img_per_cat = img_per_cat, features = features)
[ "def", "DirectoryStimuliFactory", "(", "loader", ")", ":", "impath", "=", "loader", ".", "impath", "ftrpath", "=", "loader", ".", "ftrpath", "# checks whether user has reading permission for the path", "assert", "os", ".", "access", "(", "impath", ",", "os", ".", "R_OK", ")", "assert", "os", ".", "access", "(", "ftrpath", ",", "os", ".", "R_OK", ")", "# EXTRACTING IMAGE NAMES", "img_per_cat", "=", "{", "}", "# extract only directories in the given folder", "subfolders", "=", "[", "name", "for", "name", "in", "os", ".", "listdir", "(", "impath", ")", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "impath", ",", "name", ")", ")", "]", "# if there are no subfolders, walk through files. Take 1 as key for the ", "# categories object", "if", "not", "subfolders", ":", "[", "_", ",", "_", ",", "files", "]", "=", "next", "(", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "impath", ")", ")", ")", "# this only takes entries that end with '.png'", "entries", "=", "{", "1", ":", "[", "int", "(", "cur_file", "[", "cur_file", ".", "find", "(", "'_'", ")", "+", "1", ":", "-", "4", "]", ")", "for", "cur_file", "in", "files", "if", "cur_file", ".", "endswith", "(", "'.png'", ")", "]", "}", "img_per_cat", ".", "update", "(", "entries", ")", "subfolders", "=", "[", "''", "]", "# if there are subfolders, walk through them", "else", ":", "for", "directory", "in", "subfolders", ":", "[", "_", ",", "_", ",", "files", "]", "=", "next", "(", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "impath", ",", "directory", ")", ")", ")", "# this only takes entries that end with '.png'. Strips ending and", "# considers everything after the first '_' as the imagenumber", "imagenumbers", "=", "[", "int", "(", "cur_file", "[", "cur_file", ".", "find", "(", "'_'", ")", "+", "1", ":", "-", "4", "]", ")", "for", "cur_file", "in", "files", "if", "(", "cur_file", ".", "endswith", "(", "'.png'", ")", "&", "(", "len", "(", "cur_file", ")", ">", "4", ")", ")", "]", "entries", "=", "{", "int", "(", "directory", ")", ":", "imagenumbers", "}", "img_per_cat", ".", "update", "(", "entries", ")", "del", "directory", "del", "imagenumbers", "# in case subfolders do not exist, '' is appended here.", "_", ",", "features", ",", "files", "=", "next", "(", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "ftrpath", ",", "subfolders", "[", "0", "]", ")", ")", ")", "return", "Categories", "(", "loader", ",", "img_per_cat", "=", "img_per_cat", ",", "features", "=", "features", ")" ]
Takes an input path to the images folder of an experiment and generates automatically the category - filenumber list needed to construct an appropriate _categories object. Parameters : loader : Loader object which contains impath : string path to the input, i.e. image-, files of the experiment. All subfolders in that path will be treated as categories. If no subfolders are present, category 1 will be assigned and all files in the folder are considered input images. Images have to end in '.png'. ftrpath : string path to the feature folder. It is expected that the folder structure corresponds to the structure in impath, i.e. ftrpath/category/featurefolder/featuremap.mat Furthermore, features are assumed to be the same for all categories.
[ "Takes", "an", "input", "path", "to", "the", "images", "folder", "of", "an", "experiment", "and", "generates", "automatically", "the", "category", "-", "filenumber", "list", "needed", "to", "construct", "an", "appropriate", "_categories", "object", ".", "Parameters", ":", "loader", ":", "Loader", "object", "which", "contains", "impath", ":", "string", "path", "to", "the", "input", "i", ".", "e", ".", "image", "-", "files", "of", "the", "experiment", ".", "All", "subfolders", "in", "that", "path", "will", "be", "treated", "as", "categories", ".", "If", "no", "subfolders", "are", "present", "category", "1", "will", "be", "assigned", "and", "all", "files", "in", "the", "folder", "are", "considered", "input", "images", ".", "Images", "have", "to", "end", "in", ".", "png", ".", "ftrpath", ":", "string", "path", "to", "the", "feature", "folder", ".", "It", "is", "expected", "that", "the", "folder", "structure", "corresponds", "to", "the", "structure", "in", "impath", "i", ".", "e", ".", "ftrpath", "/", "category", "/", "featurefolder", "/", "featuremap", ".", "mat", "Furthermore", "features", "are", "assumed", "to", "be", "the", "same", "for", "all", "categories", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/stimuli.py#L219-L278
nwilming/ocupy
ocupy/stimuli.py
Categories.fixations
def fixations(self): ''' Filter the fixmat such that it only contains fixations on images in categories that are also in the categories object''' if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') if len(list(self._categories.keys())) == 0: return None else: idx = np.zeros(self._fixations.x.shape, dtype='bool') for (cat, _) in list(self._categories.items()): idx = idx | ((self._fixations.category == cat)) return self._fixations[idx]
python
def fixations(self): ''' Filter the fixmat such that it only contains fixations on images in categories that are also in the categories object''' if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') if len(list(self._categories.keys())) == 0: return None else: idx = np.zeros(self._fixations.x.shape, dtype='bool') for (cat, _) in list(self._categories.items()): idx = idx | ((self._fixations.category == cat)) return self._fixations[idx]
[ "def", "fixations", "(", "self", ")", ":", "if", "not", "self", ".", "_fixations", ":", "raise", "RuntimeError", "(", "'This Images object does not have'", "+", "' an associated fixmat'", ")", "if", "len", "(", "list", "(", "self", ".", "_categories", ".", "keys", "(", ")", ")", ")", "==", "0", ":", "return", "None", "else", ":", "idx", "=", "np", ".", "zeros", "(", "self", ".", "_fixations", ".", "x", ".", "shape", ",", "dtype", "=", "'bool'", ")", "for", "(", "cat", ",", "_", ")", "in", "list", "(", "self", ".", "_categories", ".", "items", "(", ")", ")", ":", "idx", "=", "idx", "|", "(", "(", "self", ".", "_fixations", ".", "category", "==", "cat", ")", ")", "return", "self", ".", "_fixations", "[", "idx", "]" ]
Filter the fixmat such that it only contains fixations on images in categories that are also in the categories object
[ "Filter", "the", "fixmat", "such", "that", "it", "only", "contains", "fixations", "on", "images", "in", "categories", "that", "are", "also", "in", "the", "categories", "object" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/stimuli.py#L49-L61
nwilming/ocupy
ocupy/stimuli.py
Image.data
def data(self, value): """ Saves a new image to disk """ self.loader.save_image(self.category, self.image, value)
python
def data(self, value): """ Saves a new image to disk """ self.loader.save_image(self.category, self.image, value)
[ "def", "data", "(", "self", ",", "value", ")", ":", "self", ".", "loader", ".", "save_image", "(", "self", ".", "category", ",", "self", ".", "image", ",", "value", ")" ]
Saves a new image to disk
[ "Saves", "a", "new", "image", "to", "disk" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/stimuli.py#L140-L144
nwilming/ocupy
ocupy/stimuli.py
Image.fixations
def fixations(self): """ Returns all fixations that are on this image. A precondition for this to work is that a fixmat is associated with this Image object. """ if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') return self._fixations[(self._fixations.category == self.category) & (self._fixations.filenumber == self.image)]
python
def fixations(self): """ Returns all fixations that are on this image. A precondition for this to work is that a fixmat is associated with this Image object. """ if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') return self._fixations[(self._fixations.category == self.category) & (self._fixations.filenumber == self.image)]
[ "def", "fixations", "(", "self", ")", ":", "if", "not", "self", ".", "_fixations", ":", "raise", "RuntimeError", "(", "'This Images object does not have'", "+", "' an associated fixmat'", ")", "return", "self", ".", "_fixations", "[", "(", "self", ".", "_fixations", ".", "category", "==", "self", ".", "category", ")", "&", "(", "self", ".", "_fixations", ".", "filenumber", "==", "self", ".", "image", ")", "]" ]
Returns all fixations that are on this image. A precondition for this to work is that a fixmat is associated with this Image object.
[ "Returns", "all", "fixations", "that", "are", "on", "this", "image", ".", "A", "precondition", "for", "this", "to", "work", "is", "that", "a", "fixmat", "is", "associated", "with", "this", "Image", "object", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/stimuli.py#L164-L174
nwilming/ocupy
ocupy/xvalidation.py
SimpleXValidation.generate
def generate(self): """ Generator for creating the cross-validation slices. Returns A tuple of that contains two fixmats (training and test) and two Category objects (test and train). """ for _ in range(0, self.num_slices): #1. separate fixmat into test and training fixmat subjects = np.unique(self.fm.SUBJECTINDEX) test_subs = randsample(subjects, self.subject_hold_out*len(subjects)) train_subs = [x for x in subjects if x not in test_subs] test_fm = self.fm[ismember(self.fm.SUBJECTINDEX, test_subs)] train_fm = self.fm[ismember(self.fm.SUBJECTINDEX, train_subs)] #2. distribute images test_imgs = {} train_imgs = {} id_test = (test_fm.x <1) & False id_train = (train_fm.x <1) & False for cat in self.categories: imgs = cat.images() test_imgs.update({cat.category:randsample(imgs, self.image_hold_out*len(imgs)).tolist()}) train_imgs.update({cat.category:[x for x in imgs if not ismember(x, test_imgs[cat.category])]}) id_test = id_test | ((ismember(test_fm.filenumber, test_imgs[cat.category])) & (test_fm.category == cat.category)) id_train = id_train | ((ismember(train_fm.filenumber, train_imgs[cat.category])) & (train_fm.category == cat.category)) #3. Create categories objects and yield result test_stimuli = Categories(self.categories.loader, test_imgs, features=self.categories._features, fixations=test_fm) train_stimuli = Categories(self.categories.loader, train_imgs, features=self.categories._features, fixations=train_fm) yield (train_fm[id_train], train_stimuli, test_fm[id_test], test_stimuli)
python
def generate(self): """ Generator for creating the cross-validation slices. Returns A tuple of that contains two fixmats (training and test) and two Category objects (test and train). """ for _ in range(0, self.num_slices): #1. separate fixmat into test and training fixmat subjects = np.unique(self.fm.SUBJECTINDEX) test_subs = randsample(subjects, self.subject_hold_out*len(subjects)) train_subs = [x for x in subjects if x not in test_subs] test_fm = self.fm[ismember(self.fm.SUBJECTINDEX, test_subs)] train_fm = self.fm[ismember(self.fm.SUBJECTINDEX, train_subs)] #2. distribute images test_imgs = {} train_imgs = {} id_test = (test_fm.x <1) & False id_train = (train_fm.x <1) & False for cat in self.categories: imgs = cat.images() test_imgs.update({cat.category:randsample(imgs, self.image_hold_out*len(imgs)).tolist()}) train_imgs.update({cat.category:[x for x in imgs if not ismember(x, test_imgs[cat.category])]}) id_test = id_test | ((ismember(test_fm.filenumber, test_imgs[cat.category])) & (test_fm.category == cat.category)) id_train = id_train | ((ismember(train_fm.filenumber, train_imgs[cat.category])) & (train_fm.category == cat.category)) #3. Create categories objects and yield result test_stimuli = Categories(self.categories.loader, test_imgs, features=self.categories._features, fixations=test_fm) train_stimuli = Categories(self.categories.loader, train_imgs, features=self.categories._features, fixations=train_fm) yield (train_fm[id_train], train_stimuli, test_fm[id_test], test_stimuli)
[ "def", "generate", "(", "self", ")", ":", "for", "_", "in", "range", "(", "0", ",", "self", ".", "num_slices", ")", ":", "#1. separate fixmat into test and training fixmat", "subjects", "=", "np", ".", "unique", "(", "self", ".", "fm", ".", "SUBJECTINDEX", ")", "test_subs", "=", "randsample", "(", "subjects", ",", "self", ".", "subject_hold_out", "*", "len", "(", "subjects", ")", ")", "train_subs", "=", "[", "x", "for", "x", "in", "subjects", "if", "x", "not", "in", "test_subs", "]", "test_fm", "=", "self", ".", "fm", "[", "ismember", "(", "self", ".", "fm", ".", "SUBJECTINDEX", ",", "test_subs", ")", "]", "train_fm", "=", "self", ".", "fm", "[", "ismember", "(", "self", ".", "fm", ".", "SUBJECTINDEX", ",", "train_subs", ")", "]", "#2. distribute images ", "test_imgs", "=", "{", "}", "train_imgs", "=", "{", "}", "id_test", "=", "(", "test_fm", ".", "x", "<", "1", ")", "&", "False", "id_train", "=", "(", "train_fm", ".", "x", "<", "1", ")", "&", "False", "for", "cat", "in", "self", ".", "categories", ":", "imgs", "=", "cat", ".", "images", "(", ")", "test_imgs", ".", "update", "(", "{", "cat", ".", "category", ":", "randsample", "(", "imgs", ",", "self", ".", "image_hold_out", "*", "len", "(", "imgs", ")", ")", ".", "tolist", "(", ")", "}", ")", "train_imgs", ".", "update", "(", "{", "cat", ".", "category", ":", "[", "x", "for", "x", "in", "imgs", "if", "not", "ismember", "(", "x", ",", "test_imgs", "[", "cat", ".", "category", "]", ")", "]", "}", ")", "id_test", "=", "id_test", "|", "(", "(", "ismember", "(", "test_fm", ".", "filenumber", ",", "test_imgs", "[", "cat", ".", "category", "]", ")", ")", "&", "(", "test_fm", ".", "category", "==", "cat", ".", "category", ")", ")", "id_train", "=", "id_train", "|", "(", "(", "ismember", "(", "train_fm", ".", "filenumber", ",", "train_imgs", "[", "cat", ".", "category", "]", ")", ")", "&", "(", "train_fm", ".", "category", "==", "cat", ".", "category", ")", ")", "#3. Create categories objects and yield result", "test_stimuli", "=", "Categories", "(", "self", ".", "categories", ".", "loader", ",", "test_imgs", ",", "features", "=", "self", ".", "categories", ".", "_features", ",", "fixations", "=", "test_fm", ")", "train_stimuli", "=", "Categories", "(", "self", ".", "categories", ".", "loader", ",", "train_imgs", ",", "features", "=", "self", ".", "categories", ".", "_features", ",", "fixations", "=", "train_fm", ")", "yield", "(", "train_fm", "[", "id_train", "]", ",", "train_stimuli", ",", "test_fm", "[", "id_test", "]", ",", "test_stimuli", ")" ]
Generator for creating the cross-validation slices. Returns A tuple of that contains two fixmats (training and test) and two Category objects (test and train).
[ "Generator", "for", "creating", "the", "cross", "-", "validation", "slices", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/xvalidation.py#L42-L88
nwilming/ocupy
ocupy/saccade_geometry.py
prepare_data
def prepare_data(fm, max_back, dur_cap=700): ''' Computes angle and length differences up to given order and deletes suspiciously long fixations. Input fm: Fixmat Fixmat for which to comput angle and length differences max_back: Int Computes delta angle and amplitude up to order max_back. dur_cap: Int Longest allowed fixation duration Output fm: Fixmat Filtered fixmat that aligns to the other outputs. durations: ndarray Duration for each fixation in fm forward_angle: Angle between previous and next saccade. ''' durations = np.roll(fm.end - fm.start, 1).astype(float) angles, lengths, ads, lds = anglendiff(fm, roll=max_back, return_abs=True) # durations and ads are aligned in a way that an entry in ads # encodes the angle of the saccade away from a fixation in # durations forward_angle = abs(reshift(ads[0])).astype(float) ads = [abs(reshift(a)) for a in ads] # Now filter out weird fixation durations id_in = durations > dur_cap durations[id_in] = np.nan forward_angle[id_in] = np.nan return fm, durations, forward_angle, ads, lds
python
def prepare_data(fm, max_back, dur_cap=700): ''' Computes angle and length differences up to given order and deletes suspiciously long fixations. Input fm: Fixmat Fixmat for which to comput angle and length differences max_back: Int Computes delta angle and amplitude up to order max_back. dur_cap: Int Longest allowed fixation duration Output fm: Fixmat Filtered fixmat that aligns to the other outputs. durations: ndarray Duration for each fixation in fm forward_angle: Angle between previous and next saccade. ''' durations = np.roll(fm.end - fm.start, 1).astype(float) angles, lengths, ads, lds = anglendiff(fm, roll=max_back, return_abs=True) # durations and ads are aligned in a way that an entry in ads # encodes the angle of the saccade away from a fixation in # durations forward_angle = abs(reshift(ads[0])).astype(float) ads = [abs(reshift(a)) for a in ads] # Now filter out weird fixation durations id_in = durations > dur_cap durations[id_in] = np.nan forward_angle[id_in] = np.nan return fm, durations, forward_angle, ads, lds
[ "def", "prepare_data", "(", "fm", ",", "max_back", ",", "dur_cap", "=", "700", ")", ":", "durations", "=", "np", ".", "roll", "(", "fm", ".", "end", "-", "fm", ".", "start", ",", "1", ")", ".", "astype", "(", "float", ")", "angles", ",", "lengths", ",", "ads", ",", "lds", "=", "anglendiff", "(", "fm", ",", "roll", "=", "max_back", ",", "return_abs", "=", "True", ")", "# durations and ads are aligned in a way that an entry in ads", "# encodes the angle of the saccade away from a fixation in", "# durations", "forward_angle", "=", "abs", "(", "reshift", "(", "ads", "[", "0", "]", ")", ")", ".", "astype", "(", "float", ")", "ads", "=", "[", "abs", "(", "reshift", "(", "a", ")", ")", "for", "a", "in", "ads", "]", "# Now filter out weird fixation durations", "id_in", "=", "durations", ">", "dur_cap", "durations", "[", "id_in", "]", "=", "np", ".", "nan", "forward_angle", "[", "id_in", "]", "=", "np", ".", "nan", "return", "fm", ",", "durations", ",", "forward_angle", ",", "ads", ",", "lds" ]
Computes angle and length differences up to given order and deletes suspiciously long fixations. Input fm: Fixmat Fixmat for which to comput angle and length differences max_back: Int Computes delta angle and amplitude up to order max_back. dur_cap: Int Longest allowed fixation duration Output fm: Fixmat Filtered fixmat that aligns to the other outputs. durations: ndarray Duration for each fixation in fm forward_angle: Angle between previous and next saccade.
[ "Computes", "angle", "and", "length", "differences", "up", "to", "given", "order", "and", "deletes", "suspiciously", "long", "fixations", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/saccade_geometry.py#L15-L48
nwilming/ocupy
ocupy/saccade_geometry.py
saccadic_momentum_effect
def saccadic_momentum_effect(durations, forward_angle, summary_stat=nanmean): """ Computes the mean fixation duration at forward angles. """ durations_per_da = np.nan * np.ones((len(e_angle) - 1,)) for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])): idx = ( bo <= forward_angle) & ( forward_angle < b1) & ( ~np.isnan(durations)) durations_per_da[i] = summary_stat(durations[idx]) return durations_per_da
python
def saccadic_momentum_effect(durations, forward_angle, summary_stat=nanmean): """ Computes the mean fixation duration at forward angles. """ durations_per_da = np.nan * np.ones((len(e_angle) - 1,)) for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])): idx = ( bo <= forward_angle) & ( forward_angle < b1) & ( ~np.isnan(durations)) durations_per_da[i] = summary_stat(durations[idx]) return durations_per_da
[ "def", "saccadic_momentum_effect", "(", "durations", ",", "forward_angle", ",", "summary_stat", "=", "nanmean", ")", ":", "durations_per_da", "=", "np", ".", "nan", "*", "np", ".", "ones", "(", "(", "len", "(", "e_angle", ")", "-", "1", ",", ")", ")", "for", "i", ",", "(", "bo", ",", "b1", ")", "in", "enumerate", "(", "zip", "(", "e_angle", "[", ":", "-", "1", "]", ",", "e_angle", "[", "1", ":", "]", ")", ")", ":", "idx", "=", "(", "bo", "<=", "forward_angle", ")", "&", "(", "forward_angle", "<", "b1", ")", "&", "(", "~", "np", ".", "isnan", "(", "durations", ")", ")", "durations_per_da", "[", "i", "]", "=", "summary_stat", "(", "durations", "[", "idx", "]", ")", "return", "durations_per_da" ]
Computes the mean fixation duration at forward angles.
[ "Computes", "the", "mean", "fixation", "duration", "at", "forward", "angles", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/saccade_geometry.py#L51-L63
nwilming/ocupy
ocupy/saccade_geometry.py
ior_effect
def ior_effect(durations, angle_diffs, length_diffs, summary_stat=np.mean, parallel=True, min_samples=20): """ Computes a measure of fixation durations at delta angle and delta length combinations. """ raster = np.empty((len(e_dist) - 1, len(e_angle) - 1), dtype=object) for a, (a_low, a_upp) in enumerate(zip(e_angle[:-1], e_angle[1:])): for d, (d_low, d_upp) in enumerate(zip(e_dist[:-1], e_dist[1:])): idx = ((d_low <= length_diffs) & (length_diffs < d_upp) & (a_low <= angle_diffs) & (angle_diffs < a_upp)) if sum(idx) < min_samples: raster[d, a] = np.array([np.nan]) else: raster[d, a] = durations[idx] if parallel: p = pool.Pool(3) result = p.map(summary_stat, list(raster.flatten())) p.terminate() else: result = list(map(summary_stat, list(raster.flatten()))) for idx, value in enumerate(result): i, j = np.unravel_index(idx, raster.shape) raster[i, j] = value return raster
python
def ior_effect(durations, angle_diffs, length_diffs, summary_stat=np.mean, parallel=True, min_samples=20): """ Computes a measure of fixation durations at delta angle and delta length combinations. """ raster = np.empty((len(e_dist) - 1, len(e_angle) - 1), dtype=object) for a, (a_low, a_upp) in enumerate(zip(e_angle[:-1], e_angle[1:])): for d, (d_low, d_upp) in enumerate(zip(e_dist[:-1], e_dist[1:])): idx = ((d_low <= length_diffs) & (length_diffs < d_upp) & (a_low <= angle_diffs) & (angle_diffs < a_upp)) if sum(idx) < min_samples: raster[d, a] = np.array([np.nan]) else: raster[d, a] = durations[idx] if parallel: p = pool.Pool(3) result = p.map(summary_stat, list(raster.flatten())) p.terminate() else: result = list(map(summary_stat, list(raster.flatten()))) for idx, value in enumerate(result): i, j = np.unravel_index(idx, raster.shape) raster[i, j] = value return raster
[ "def", "ior_effect", "(", "durations", ",", "angle_diffs", ",", "length_diffs", ",", "summary_stat", "=", "np", ".", "mean", ",", "parallel", "=", "True", ",", "min_samples", "=", "20", ")", ":", "raster", "=", "np", ".", "empty", "(", "(", "len", "(", "e_dist", ")", "-", "1", ",", "len", "(", "e_angle", ")", "-", "1", ")", ",", "dtype", "=", "object", ")", "for", "a", ",", "(", "a_low", ",", "a_upp", ")", "in", "enumerate", "(", "zip", "(", "e_angle", "[", ":", "-", "1", "]", ",", "e_angle", "[", "1", ":", "]", ")", ")", ":", "for", "d", ",", "(", "d_low", ",", "d_upp", ")", "in", "enumerate", "(", "zip", "(", "e_dist", "[", ":", "-", "1", "]", ",", "e_dist", "[", "1", ":", "]", ")", ")", ":", "idx", "=", "(", "(", "d_low", "<=", "length_diffs", ")", "&", "(", "length_diffs", "<", "d_upp", ")", "&", "(", "a_low", "<=", "angle_diffs", ")", "&", "(", "angle_diffs", "<", "a_upp", ")", ")", "if", "sum", "(", "idx", ")", "<", "min_samples", ":", "raster", "[", "d", ",", "a", "]", "=", "np", ".", "array", "(", "[", "np", ".", "nan", "]", ")", "else", ":", "raster", "[", "d", ",", "a", "]", "=", "durations", "[", "idx", "]", "if", "parallel", ":", "p", "=", "pool", ".", "Pool", "(", "3", ")", "result", "=", "p", ".", "map", "(", "summary_stat", ",", "list", "(", "raster", ".", "flatten", "(", ")", ")", ")", "p", ".", "terminate", "(", ")", "else", ":", "result", "=", "list", "(", "map", "(", "summary_stat", ",", "list", "(", "raster", ".", "flatten", "(", ")", ")", ")", ")", "for", "idx", ",", "value", "in", "enumerate", "(", "result", ")", ":", "i", ",", "j", "=", "np", ".", "unravel_index", "(", "idx", ",", "raster", ".", "shape", ")", "raster", "[", "i", ",", "j", "]", "=", "value", "return", "raster" ]
Computes a measure of fixation durations at delta angle and delta length combinations.
[ "Computes", "a", "measure", "of", "fixation", "durations", "at", "delta", "angle", "and", "delta", "length", "combinations", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/saccade_geometry.py#L66-L90
nwilming/ocupy
ocupy/saccade_geometry.py
predict_fixation_duration
def predict_fixation_duration( durations, angles, length_diffs, dataset=None, params=None): """ Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations. """ if dataset is None: dataset = np.ones(durations.shape) corrected_durations = np.nan * np.ones(durations.shape) for i, ds in enumerate(np.unique(dataset)): e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y) v0 = [120, 220.0, -.1, 0.5, .1, .1] id_ds = dataset == ds idnan = ( ~np.isnan(angles)) & ( ~np.isnan(durations)) & ( ~np.isnan(length_diffs)) v, s = leastsq( e, v0, args=( angles[ idnan & id_ds], durations[ idnan & id_ds], length_diffs[ idnan & id_ds]), maxfev=10000) corrected_durations[id_ds] = (durations[id_ds] - (leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v))) if params is not None: params['v' + str(i)] = v params['s' + str(i)] = s return corrected_durations
python
def predict_fixation_duration( durations, angles, length_diffs, dataset=None, params=None): """ Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations. """ if dataset is None: dataset = np.ones(durations.shape) corrected_durations = np.nan * np.ones(durations.shape) for i, ds in enumerate(np.unique(dataset)): e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y) v0 = [120, 220.0, -.1, 0.5, .1, .1] id_ds = dataset == ds idnan = ( ~np.isnan(angles)) & ( ~np.isnan(durations)) & ( ~np.isnan(length_diffs)) v, s = leastsq( e, v0, args=( angles[ idnan & id_ds], durations[ idnan & id_ds], length_diffs[ idnan & id_ds]), maxfev=10000) corrected_durations[id_ds] = (durations[id_ds] - (leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v))) if params is not None: params['v' + str(i)] = v params['s' + str(i)] = s return corrected_durations
[ "def", "predict_fixation_duration", "(", "durations", ",", "angles", ",", "length_diffs", ",", "dataset", "=", "None", ",", "params", "=", "None", ")", ":", "if", "dataset", "is", "None", ":", "dataset", "=", "np", ".", "ones", "(", "durations", ".", "shape", ")", "corrected_durations", "=", "np", ".", "nan", "*", "np", ".", "ones", "(", "durations", ".", "shape", ")", "for", "i", ",", "ds", "in", "enumerate", "(", "np", ".", "unique", "(", "dataset", ")", ")", ":", "e", "=", "lambda", "v", ",", "x", ",", "y", ",", "z", ":", "(", "leastsq_dual_model", "(", "x", ",", "z", ",", "*", "v", ")", "-", "y", ")", "v0", "=", "[", "120", ",", "220.0", ",", "-", ".1", ",", "0.5", ",", ".1", ",", ".1", "]", "id_ds", "=", "dataset", "==", "ds", "idnan", "=", "(", "~", "np", ".", "isnan", "(", "angles", ")", ")", "&", "(", "~", "np", ".", "isnan", "(", "durations", ")", ")", "&", "(", "~", "np", ".", "isnan", "(", "length_diffs", ")", ")", "v", ",", "s", "=", "leastsq", "(", "e", ",", "v0", ",", "args", "=", "(", "angles", "[", "idnan", "&", "id_ds", "]", ",", "durations", "[", "idnan", "&", "id_ds", "]", ",", "length_diffs", "[", "idnan", "&", "id_ds", "]", ")", ",", "maxfev", "=", "10000", ")", "corrected_durations", "[", "id_ds", "]", "=", "(", "durations", "[", "id_ds", "]", "-", "(", "leastsq_dual_model", "(", "angles", "[", "id_ds", "]", ",", "length_diffs", "[", "id_ds", "]", ",", "*", "v", ")", ")", ")", "if", "params", "is", "not", "None", ":", "params", "[", "'v'", "+", "str", "(", "i", ")", "]", "=", "v", "params", "[", "'s'", "+", "str", "(", "i", ")", "]", "=", "s", "return", "corrected_durations" ]
Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations.
[ "Fits", "a", "non", "-", "linear", "piecewise", "regression", "to", "fixtaion", "durations", "for", "a", "fixmat", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/saccade_geometry.py#L93-L122
nwilming/ocupy
ocupy/saccade_geometry.py
subject_predictions
def subject_predictions(fm, field='SUBJECTINDEX', method=predict_fixation_duration, data=None): ''' Calculates the saccadic momentum effect for individual subjects. Removes any effect of amplitude differences. The parameters are fitted on unbinned data. The effects are computed on binned data. See e_dist and e_angle for the binning parameter. ''' if data is None: fma, dura, faa, adsa, ldsa = prepare_data(fm, dur_cap=700, max_back=5) adsa = adsa[0] ldsa = ldsa[0] else: fma, dura, faa, adsa, ldsa = data fma = fma.copy() # [ones(fm.x.shape)] sub_effects = [] sub_predictions = [] parameters = [] for i, fmsub in enumerate(np.unique(fma.field(field))): id = fma.field(field) == fmsub #_, dur, fa, ads, lds = prepare_data(fmsub, dur_cap = 700, max_back=5) dur, fa, ads, lds = dura[id], faa[id], adsa[id], ldsa[id] params = {} _ = method(dur, fa, lds, params=params) ps = params['v0'] ld_corrected = leastsq_only_dist(lds, ps[4], ps[5]) prediction = leastsq_only_angle(fa, ps[0], ps[1], ps[2], ps[3]) sub_predictions += [saccadic_momentum_effect(prediction, fa)] sub_effects += [saccadic_momentum_effect(dur - ld_corrected, fa)] parameters += [ps] return np.array(sub_effects), np.array(sub_predictions), parameters
python
def subject_predictions(fm, field='SUBJECTINDEX', method=predict_fixation_duration, data=None): ''' Calculates the saccadic momentum effect for individual subjects. Removes any effect of amplitude differences. The parameters are fitted on unbinned data. The effects are computed on binned data. See e_dist and e_angle for the binning parameter. ''' if data is None: fma, dura, faa, adsa, ldsa = prepare_data(fm, dur_cap=700, max_back=5) adsa = adsa[0] ldsa = ldsa[0] else: fma, dura, faa, adsa, ldsa = data fma = fma.copy() # [ones(fm.x.shape)] sub_effects = [] sub_predictions = [] parameters = [] for i, fmsub in enumerate(np.unique(fma.field(field))): id = fma.field(field) == fmsub #_, dur, fa, ads, lds = prepare_data(fmsub, dur_cap = 700, max_back=5) dur, fa, ads, lds = dura[id], faa[id], adsa[id], ldsa[id] params = {} _ = method(dur, fa, lds, params=params) ps = params['v0'] ld_corrected = leastsq_only_dist(lds, ps[4], ps[5]) prediction = leastsq_only_angle(fa, ps[0], ps[1], ps[2], ps[3]) sub_predictions += [saccadic_momentum_effect(prediction, fa)] sub_effects += [saccadic_momentum_effect(dur - ld_corrected, fa)] parameters += [ps] return np.array(sub_effects), np.array(sub_predictions), parameters
[ "def", "subject_predictions", "(", "fm", ",", "field", "=", "'SUBJECTINDEX'", ",", "method", "=", "predict_fixation_duration", ",", "data", "=", "None", ")", ":", "if", "data", "is", "None", ":", "fma", ",", "dura", ",", "faa", ",", "adsa", ",", "ldsa", "=", "prepare_data", "(", "fm", ",", "dur_cap", "=", "700", ",", "max_back", "=", "5", ")", "adsa", "=", "adsa", "[", "0", "]", "ldsa", "=", "ldsa", "[", "0", "]", "else", ":", "fma", ",", "dura", ",", "faa", ",", "adsa", ",", "ldsa", "=", "data", "fma", "=", "fma", ".", "copy", "(", ")", "# [ones(fm.x.shape)]", "sub_effects", "=", "[", "]", "sub_predictions", "=", "[", "]", "parameters", "=", "[", "]", "for", "i", ",", "fmsub", "in", "enumerate", "(", "np", ".", "unique", "(", "fma", ".", "field", "(", "field", ")", ")", ")", ":", "id", "=", "fma", ".", "field", "(", "field", ")", "==", "fmsub", "#_, dur, fa, ads, lds = prepare_data(fmsub, dur_cap = 700, max_back=5)", "dur", ",", "fa", ",", "ads", ",", "lds", "=", "dura", "[", "id", "]", ",", "faa", "[", "id", "]", ",", "adsa", "[", "id", "]", ",", "ldsa", "[", "id", "]", "params", "=", "{", "}", "_", "=", "method", "(", "dur", ",", "fa", ",", "lds", ",", "params", "=", "params", ")", "ps", "=", "params", "[", "'v0'", "]", "ld_corrected", "=", "leastsq_only_dist", "(", "lds", ",", "ps", "[", "4", "]", ",", "ps", "[", "5", "]", ")", "prediction", "=", "leastsq_only_angle", "(", "fa", ",", "ps", "[", "0", "]", ",", "ps", "[", "1", "]", ",", "ps", "[", "2", "]", ",", "ps", "[", "3", "]", ")", "sub_predictions", "+=", "[", "saccadic_momentum_effect", "(", "prediction", ",", "fa", ")", "]", "sub_effects", "+=", "[", "saccadic_momentum_effect", "(", "dur", "-", "ld_corrected", ",", "fa", ")", "]", "parameters", "+=", "[", "ps", "]", "return", "np", ".", "array", "(", "sub_effects", ")", ",", "np", ".", "array", "(", "sub_predictions", ")", ",", "parameters" ]
Calculates the saccadic momentum effect for individual subjects. Removes any effect of amplitude differences. The parameters are fitted on unbinned data. The effects are computed on binned data. See e_dist and e_angle for the binning parameter.
[ "Calculates", "the", "saccadic", "momentum", "effect", "for", "individual", "subjects", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/saccade_geometry.py#L125-L158
nwilming/ocupy
ocupy/bounds.py
intersubject_scores
def intersubject_scores(fm, category, predicting_filenumbers, predicting_subjects, predicted_filenumbers, predicted_subjects, controls = True, scale_factor = 1): """ Calculates how well the fixations from a set of subjects on a set of images can be predicted with the fixations from another set of subjects on another set of images. The prediction is carried out by computing a fixation density map from fixations of predicting_subjects subjects on predicting_images images. Prediction accuracy is assessed by measures.prediction_scores. Parameters fm : fixmat instance category : int Category from which the fixations are taken. predicting_filenumbers : list List of filenumbers used for prediction, i.e. images where fixations for the prediction are taken from. predicting_subjects : list List of subjects whose fixations on images in predicting_filenumbers are used for the prediction. predicted_filenumnbers : list List of images from which the to be predicted fixations are taken. predicted_subjects : list List of subjects used for evaluation, i.e subjects whose fixations on images in predicted_filenumbers are taken for evaluation. controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns auc : area under the roc curve for sets of actuals and controls true_pos_rate : ndarray Rate of true positives for every given threshold value. All values appearing in actuals are taken as thresholds. Uses lower sum interpolation. false_pos_rate : ndarray See true_pos_rate but for false positives. """ predicting_fm = fm[ (ismember(fm.SUBJECTINDEX, predicting_subjects)) & (ismember(fm.filenumber, predicting_filenumbers)) & (fm.category == category)] predicted_fm = fm[ (ismember(fm.SUBJECTINDEX,predicted_subjects)) & (ismember(fm.filenumber,predicted_filenumbers))& (fm.category == category)] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None if controls == True: fm_controls = fm[ (ismember(fm.SUBJECTINDEX, predicted_subjects)) & ((ismember(fm.filenumber, predicted_filenumbers)) != True) & (fm.category == category)] return measures.prediction_scores(predicting_fdm, predicted_fm, controls = (fm_controls.y, fm_controls.x)) return measures.prediction_scores(predicting_fdm, predicted_fm, controls = None)
python
def intersubject_scores(fm, category, predicting_filenumbers, predicting_subjects, predicted_filenumbers, predicted_subjects, controls = True, scale_factor = 1): """ Calculates how well the fixations from a set of subjects on a set of images can be predicted with the fixations from another set of subjects on another set of images. The prediction is carried out by computing a fixation density map from fixations of predicting_subjects subjects on predicting_images images. Prediction accuracy is assessed by measures.prediction_scores. Parameters fm : fixmat instance category : int Category from which the fixations are taken. predicting_filenumbers : list List of filenumbers used for prediction, i.e. images where fixations for the prediction are taken from. predicting_subjects : list List of subjects whose fixations on images in predicting_filenumbers are used for the prediction. predicted_filenumnbers : list List of images from which the to be predicted fixations are taken. predicted_subjects : list List of subjects used for evaluation, i.e subjects whose fixations on images in predicted_filenumbers are taken for evaluation. controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns auc : area under the roc curve for sets of actuals and controls true_pos_rate : ndarray Rate of true positives for every given threshold value. All values appearing in actuals are taken as thresholds. Uses lower sum interpolation. false_pos_rate : ndarray See true_pos_rate but for false positives. """ predicting_fm = fm[ (ismember(fm.SUBJECTINDEX, predicting_subjects)) & (ismember(fm.filenumber, predicting_filenumbers)) & (fm.category == category)] predicted_fm = fm[ (ismember(fm.SUBJECTINDEX,predicted_subjects)) & (ismember(fm.filenumber,predicted_filenumbers))& (fm.category == category)] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None if controls == True: fm_controls = fm[ (ismember(fm.SUBJECTINDEX, predicted_subjects)) & ((ismember(fm.filenumber, predicted_filenumbers)) != True) & (fm.category == category)] return measures.prediction_scores(predicting_fdm, predicted_fm, controls = (fm_controls.y, fm_controls.x)) return measures.prediction_scores(predicting_fdm, predicted_fm, controls = None)
[ "def", "intersubject_scores", "(", "fm", ",", "category", ",", "predicting_filenumbers", ",", "predicting_subjects", ",", "predicted_filenumbers", ",", "predicted_subjects", ",", "controls", "=", "True", ",", "scale_factor", "=", "1", ")", ":", "predicting_fm", "=", "fm", "[", "(", "ismember", "(", "fm", ".", "SUBJECTINDEX", ",", "predicting_subjects", ")", ")", "&", "(", "ismember", "(", "fm", ".", "filenumber", ",", "predicting_filenumbers", ")", ")", "&", "(", "fm", ".", "category", "==", "category", ")", "]", "predicted_fm", "=", "fm", "[", "(", "ismember", "(", "fm", ".", "SUBJECTINDEX", ",", "predicted_subjects", ")", ")", "&", "(", "ismember", "(", "fm", ".", "filenumber", ",", "predicted_filenumbers", ")", ")", "&", "(", "fm", ".", "category", "==", "category", ")", "]", "try", ":", "predicting_fdm", "=", "compute_fdm", "(", "predicting_fm", ",", "scale_factor", "=", "scale_factor", ")", "except", "RuntimeError", ":", "predicting_fdm", "=", "None", "if", "controls", "==", "True", ":", "fm_controls", "=", "fm", "[", "(", "ismember", "(", "fm", ".", "SUBJECTINDEX", ",", "predicted_subjects", ")", ")", "&", "(", "(", "ismember", "(", "fm", ".", "filenumber", ",", "predicted_filenumbers", ")", ")", "!=", "True", ")", "&", "(", "fm", ".", "category", "==", "category", ")", "]", "return", "measures", ".", "prediction_scores", "(", "predicting_fdm", ",", "predicted_fm", ",", "controls", "=", "(", "fm_controls", ".", "y", ",", "fm_controls", ".", "x", ")", ")", "return", "measures", ".", "prediction_scores", "(", "predicting_fdm", ",", "predicted_fm", ",", "controls", "=", "None", ")" ]
Calculates how well the fixations from a set of subjects on a set of images can be predicted with the fixations from another set of subjects on another set of images. The prediction is carried out by computing a fixation density map from fixations of predicting_subjects subjects on predicting_images images. Prediction accuracy is assessed by measures.prediction_scores. Parameters fm : fixmat instance category : int Category from which the fixations are taken. predicting_filenumbers : list List of filenumbers used for prediction, i.e. images where fixations for the prediction are taken from. predicting_subjects : list List of subjects whose fixations on images in predicting_filenumbers are used for the prediction. predicted_filenumnbers : list List of images from which the to be predicted fixations are taken. predicted_subjects : list List of subjects used for evaluation, i.e subjects whose fixations on images in predicted_filenumbers are taken for evaluation. controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns auc : area under the roc curve for sets of actuals and controls true_pos_rate : ndarray Rate of true positives for every given threshold value. All values appearing in actuals are taken as thresholds. Uses lower sum interpolation. false_pos_rate : ndarray See true_pos_rate but for false positives.
[ "Calculates", "how", "well", "the", "fixations", "from", "a", "set", "of", "subjects", "on", "a", "set", "of", "images", "can", "be", "predicted", "with", "the", "fixations", "from", "another", "set", "of", "subjects", "on", "another", "set", "of", "images", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/bounds.py#L12-L76
nwilming/ocupy
ocupy/bounds.py
intersubject_scores_random_subjects
def intersubject_scores_random_subjects(fm, category, filenumber, n_train, n_predict, controls=True, scale_factor = 1): """ Calculates how well the fixations of n random subjects on one image can be predicted with the fixations of m other random subjects. Notes Function that uses intersubject_auc for computing auc. Parameters fm : fixmat instance category : int Category from which the fixations are taken. filnumber : int Image from which fixations are taken. n_train : int The number of subjects which are used for prediction. n_predict : int The number of subjects to predict controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns tuple : prediction scores """ subjects = np.unique(fm.SUBJECTINDEX) if len(subjects) < n_train + n_predict: raise ValueError("""Not enough subjects in fixmat""") # draw a random sample of subjects for testing and evaluation, according # to the specified set sizes (n_train, n_predict) np.random.shuffle(subjects) predicted_subjects = subjects[0 : n_predict] predicting_subjects = subjects[n_predict : n_predict + n_train] assert len(predicting_subjects) == n_train assert len(predicted_subjects) == n_predict assert [x not in predicting_subjects for x in predicted_subjects] return intersubject_scores(fm, category, [filenumber], predicting_subjects, [filenumber], predicted_subjects, controls, scale_factor)
python
def intersubject_scores_random_subjects(fm, category, filenumber, n_train, n_predict, controls=True, scale_factor = 1): """ Calculates how well the fixations of n random subjects on one image can be predicted with the fixations of m other random subjects. Notes Function that uses intersubject_auc for computing auc. Parameters fm : fixmat instance category : int Category from which the fixations are taken. filnumber : int Image from which fixations are taken. n_train : int The number of subjects which are used for prediction. n_predict : int The number of subjects to predict controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns tuple : prediction scores """ subjects = np.unique(fm.SUBJECTINDEX) if len(subjects) < n_train + n_predict: raise ValueError("""Not enough subjects in fixmat""") # draw a random sample of subjects for testing and evaluation, according # to the specified set sizes (n_train, n_predict) np.random.shuffle(subjects) predicted_subjects = subjects[0 : n_predict] predicting_subjects = subjects[n_predict : n_predict + n_train] assert len(predicting_subjects) == n_train assert len(predicted_subjects) == n_predict assert [x not in predicting_subjects for x in predicted_subjects] return intersubject_scores(fm, category, [filenumber], predicting_subjects, [filenumber], predicted_subjects, controls, scale_factor)
[ "def", "intersubject_scores_random_subjects", "(", "fm", ",", "category", ",", "filenumber", ",", "n_train", ",", "n_predict", ",", "controls", "=", "True", ",", "scale_factor", "=", "1", ")", ":", "subjects", "=", "np", ".", "unique", "(", "fm", ".", "SUBJECTINDEX", ")", "if", "len", "(", "subjects", ")", "<", "n_train", "+", "n_predict", ":", "raise", "ValueError", "(", "\"\"\"Not enough subjects in fixmat\"\"\"", ")", "# draw a random sample of subjects for testing and evaluation, according", "# to the specified set sizes (n_train, n_predict)", "np", ".", "random", ".", "shuffle", "(", "subjects", ")", "predicted_subjects", "=", "subjects", "[", "0", ":", "n_predict", "]", "predicting_subjects", "=", "subjects", "[", "n_predict", ":", "n_predict", "+", "n_train", "]", "assert", "len", "(", "predicting_subjects", ")", "==", "n_train", "assert", "len", "(", "predicted_subjects", ")", "==", "n_predict", "assert", "[", "x", "not", "in", "predicting_subjects", "for", "x", "in", "predicted_subjects", "]", "return", "intersubject_scores", "(", "fm", ",", "category", ",", "[", "filenumber", "]", ",", "predicting_subjects", ",", "[", "filenumber", "]", ",", "predicted_subjects", ",", "controls", ",", "scale_factor", ")" ]
Calculates how well the fixations of n random subjects on one image can be predicted with the fixations of m other random subjects. Notes Function that uses intersubject_auc for computing auc. Parameters fm : fixmat instance category : int Category from which the fixations are taken. filnumber : int Image from which fixations are taken. n_train : int The number of subjects which are used for prediction. n_predict : int The number of subjects to predict controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns tuple : prediction scores
[ "Calculates", "how", "well", "the", "fixations", "of", "n", "random", "subjects", "on", "one", "image", "can", "be", "predicted", "with", "the", "fixations", "of", "m", "other", "random", "subjects", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/bounds.py#L78-L121
nwilming/ocupy
ocupy/bounds.py
upper_bound
def upper_bound(fm, nr_subs = None, scale_factor = 1): """ compute the inter-subject consistency upper bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """ nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if not nr_subs: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values intersub_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category), result_vectors))) intersub_scores.append(res_dict) #compute inter-subject scores for every stimulus, with leave-one-out #over subjects for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): predicting_subs = (np.setdiff1d(np.unique( fm_single.SUBJECTINDEX),[sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fm = fm_single[ (ismember(fm_single.SUBJECTINDEX, predicting_subs))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores( predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): intersub_scores[measure][cat][sub_counter] = score return intersub_scores
python
def upper_bound(fm, nr_subs = None, scale_factor = 1): """ compute the inter-subject consistency upper bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """ nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if not nr_subs: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values intersub_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category), result_vectors))) intersub_scores.append(res_dict) #compute inter-subject scores for every stimulus, with leave-one-out #over subjects for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): predicting_subs = (np.setdiff1d(np.unique( fm_single.SUBJECTINDEX),[sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fm = fm_single[ (ismember(fm_single.SUBJECTINDEX, predicting_subs))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores( predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): intersub_scores[measure][cat][sub_counter] = score return intersub_scores
[ "def", "upper_bound", "(", "fm", ",", "nr_subs", "=", "None", ",", "scale_factor", "=", "1", ")", ":", "nr_subs_total", "=", "len", "(", "np", ".", "unique", "(", "fm", ".", "SUBJECTINDEX", ")", ")", "if", "not", "nr_subs", ":", "nr_subs", "=", "nr_subs_total", "-", "1", "assert", "(", "nr_subs", "<", "nr_subs_total", ")", "# initialize output structure; every measure gets one dict with", "# category numbers as keys and numpy-arrays as values", "intersub_scores", "=", "[", "]", "for", "measure", "in", "range", "(", "len", "(", "measures", ".", "scores", ")", ")", ":", "res_dict", "=", "{", "}", "result_vectors", "=", "[", "np", ".", "empty", "(", "nr_subs_total", ")", "+", "np", ".", "nan", "for", "_", "in", "np", ".", "unique", "(", "fm", ".", "category", ")", "]", "res_dict", ".", "update", "(", "list", "(", "zip", "(", "np", ".", "unique", "(", "fm", ".", "category", ")", ",", "result_vectors", ")", ")", ")", "intersub_scores", ".", "append", "(", "res_dict", ")", "#compute inter-subject scores for every stimulus, with leave-one-out", "#over subjects", "for", "fm_cat", "in", "fm", ".", "by_field", "(", "'category'", ")", ":", "cat", "=", "fm_cat", ".", "category", "[", "0", "]", "for", "(", "sub_counter", ",", "sub", ")", "in", "enumerate", "(", "np", ".", "unique", "(", "fm_cat", ".", "SUBJECTINDEX", ")", ")", ":", "image_scores", "=", "[", "]", "for", "fm_single", "in", "fm_cat", ".", "by_field", "(", "'filenumber'", ")", ":", "predicting_subs", "=", "(", "np", ".", "setdiff1d", "(", "np", ".", "unique", "(", "fm_single", ".", "SUBJECTINDEX", ")", ",", "[", "sub", "]", ")", ")", "np", ".", "random", ".", "shuffle", "(", "predicting_subs", ")", "predicting_subs", "=", "predicting_subs", "[", "0", ":", "nr_subs", "]", "predicting_fm", "=", "fm_single", "[", "(", "ismember", "(", "fm_single", ".", "SUBJECTINDEX", ",", "predicting_subs", ")", ")", "]", "predicted_fm", "=", "fm_single", "[", "fm_single", ".", "SUBJECTINDEX", "==", "sub", "]", "try", ":", "predicting_fdm", "=", "compute_fdm", "(", "predicting_fm", ",", "scale_factor", "=", "scale_factor", ")", "except", "RuntimeError", ":", "predicting_fdm", "=", "None", "image_scores", ".", "append", "(", "measures", ".", "prediction_scores", "(", "predicting_fdm", ",", "predicted_fm", ")", ")", "for", "(", "measure", ",", "score", ")", "in", "enumerate", "(", "nanmean", "(", "image_scores", ",", "0", ")", ")", ":", "intersub_scores", "[", "measure", "]", "[", "cat", "]", "[", "sub_counter", "]", "=", "score", "return", "intersub_scores" ]
compute the inter-subject consistency upper bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject.
[ "compute", "the", "inter", "-", "subject", "consistency", "upper", "bound", "for", "a", "fixmat", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/bounds.py#L123-L173
nwilming/ocupy
ocupy/bounds.py
lower_bound
def lower_bound(fm, nr_subs = None, nr_imgs = None, scale_factor = 1): """ Compute the spatial bias lower bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 nr_imgs : the number of images used for prediction. If given, the same number will be used for every category. If not given, leave-one-out will be used in all categories. scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of spatial bias scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """ nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if nr_subs is None: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values sb_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category),result_vectors))) sb_scores.append(res_dict) # compute mean spatial bias predictive power for all subjects in all # categories for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] nr_imgs_cat = len(np.unique(fm_cat.filenumber)) if not nr_imgs: nr_imgs_current = nr_imgs_cat - 1 else: nr_imgs_current = nr_imgs assert(nr_imgs_current < nr_imgs_cat) for (sub_counter, sub) in enumerate(np.unique(fm.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): # Iterating by field filenumber makes filenumbers # in fm_single unique: Just take the first one to get the # filenumber for this fixmat fn = fm_single.filenumber[0] predicting_subs = (np.setdiff1d(np.unique( fm_cat.SUBJECTINDEX), [sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fns = (np.setdiff1d(np.unique( fm_cat.filenumber), [fn])) np.random.shuffle(predicting_fns) predicting_fns = predicting_fns[0:nr_imgs_current] predicting_fm = fm_cat[ (ismember(fm_cat.SUBJECTINDEX, predicting_subs)) & (ismember(fm_cat.filenumber, predicting_fns))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores(predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): sb_scores[measure][cat][sub_counter] = score return sb_scores
python
def lower_bound(fm, nr_subs = None, nr_imgs = None, scale_factor = 1): """ Compute the spatial bias lower bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 nr_imgs : the number of images used for prediction. If given, the same number will be used for every category. If not given, leave-one-out will be used in all categories. scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of spatial bias scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """ nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if nr_subs is None: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values sb_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category),result_vectors))) sb_scores.append(res_dict) # compute mean spatial bias predictive power for all subjects in all # categories for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] nr_imgs_cat = len(np.unique(fm_cat.filenumber)) if not nr_imgs: nr_imgs_current = nr_imgs_cat - 1 else: nr_imgs_current = nr_imgs assert(nr_imgs_current < nr_imgs_cat) for (sub_counter, sub) in enumerate(np.unique(fm.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): # Iterating by field filenumber makes filenumbers # in fm_single unique: Just take the first one to get the # filenumber for this fixmat fn = fm_single.filenumber[0] predicting_subs = (np.setdiff1d(np.unique( fm_cat.SUBJECTINDEX), [sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fns = (np.setdiff1d(np.unique( fm_cat.filenumber), [fn])) np.random.shuffle(predicting_fns) predicting_fns = predicting_fns[0:nr_imgs_current] predicting_fm = fm_cat[ (ismember(fm_cat.SUBJECTINDEX, predicting_subs)) & (ismember(fm_cat.filenumber, predicting_fns))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores(predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): sb_scores[measure][cat][sub_counter] = score return sb_scores
[ "def", "lower_bound", "(", "fm", ",", "nr_subs", "=", "None", ",", "nr_imgs", "=", "None", ",", "scale_factor", "=", "1", ")", ":", "nr_subs_total", "=", "len", "(", "np", ".", "unique", "(", "fm", ".", "SUBJECTINDEX", ")", ")", "if", "nr_subs", "is", "None", ":", "nr_subs", "=", "nr_subs_total", "-", "1", "assert", "(", "nr_subs", "<", "nr_subs_total", ")", "# initialize output structure; every measure gets one dict with", "# category numbers as keys and numpy-arrays as values", "sb_scores", "=", "[", "]", "for", "measure", "in", "range", "(", "len", "(", "measures", ".", "scores", ")", ")", ":", "res_dict", "=", "{", "}", "result_vectors", "=", "[", "np", ".", "empty", "(", "nr_subs_total", ")", "+", "np", ".", "nan", "for", "_", "in", "np", ".", "unique", "(", "fm", ".", "category", ")", "]", "res_dict", ".", "update", "(", "list", "(", "zip", "(", "np", ".", "unique", "(", "fm", ".", "category", ")", ",", "result_vectors", ")", ")", ")", "sb_scores", ".", "append", "(", "res_dict", ")", "# compute mean spatial bias predictive power for all subjects in all", "# categories", "for", "fm_cat", "in", "fm", ".", "by_field", "(", "'category'", ")", ":", "cat", "=", "fm_cat", ".", "category", "[", "0", "]", "nr_imgs_cat", "=", "len", "(", "np", ".", "unique", "(", "fm_cat", ".", "filenumber", ")", ")", "if", "not", "nr_imgs", ":", "nr_imgs_current", "=", "nr_imgs_cat", "-", "1", "else", ":", "nr_imgs_current", "=", "nr_imgs", "assert", "(", "nr_imgs_current", "<", "nr_imgs_cat", ")", "for", "(", "sub_counter", ",", "sub", ")", "in", "enumerate", "(", "np", ".", "unique", "(", "fm", ".", "SUBJECTINDEX", ")", ")", ":", "image_scores", "=", "[", "]", "for", "fm_single", "in", "fm_cat", ".", "by_field", "(", "'filenumber'", ")", ":", "# Iterating by field filenumber makes filenumbers", "# in fm_single unique: Just take the first one to get the", "# filenumber for this fixmat", "fn", "=", "fm_single", ".", "filenumber", "[", "0", "]", "predicting_subs", "=", "(", "np", ".", "setdiff1d", "(", "np", ".", "unique", "(", "fm_cat", ".", "SUBJECTINDEX", ")", ",", "[", "sub", "]", ")", ")", "np", ".", "random", ".", "shuffle", "(", "predicting_subs", ")", "predicting_subs", "=", "predicting_subs", "[", "0", ":", "nr_subs", "]", "predicting_fns", "=", "(", "np", ".", "setdiff1d", "(", "np", ".", "unique", "(", "fm_cat", ".", "filenumber", ")", ",", "[", "fn", "]", ")", ")", "np", ".", "random", ".", "shuffle", "(", "predicting_fns", ")", "predicting_fns", "=", "predicting_fns", "[", "0", ":", "nr_imgs_current", "]", "predicting_fm", "=", "fm_cat", "[", "(", "ismember", "(", "fm_cat", ".", "SUBJECTINDEX", ",", "predicting_subs", ")", ")", "&", "(", "ismember", "(", "fm_cat", ".", "filenumber", ",", "predicting_fns", ")", ")", "]", "predicted_fm", "=", "fm_single", "[", "fm_single", ".", "SUBJECTINDEX", "==", "sub", "]", "try", ":", "predicting_fdm", "=", "compute_fdm", "(", "predicting_fm", ",", "scale_factor", "=", "scale_factor", ")", "except", "RuntimeError", ":", "predicting_fdm", "=", "None", "image_scores", ".", "append", "(", "measures", ".", "prediction_scores", "(", "predicting_fdm", ",", "predicted_fm", ")", ")", "for", "(", "measure", ",", "score", ")", "in", "enumerate", "(", "nanmean", "(", "image_scores", ",", "0", ")", ")", ":", "sb_scores", "[", "measure", "]", "[", "cat", "]", "[", "sub_counter", "]", "=", "score", "return", "sb_scores" ]
Compute the spatial bias lower bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 nr_imgs : the number of images used for prediction. If given, the same number will be used for every category. If not given, leave-one-out will be used in all categories. scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of spatial bias scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject.
[ "Compute", "the", "spatial", "bias", "lower", "bound", "for", "a", "fixmat", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/bounds.py#L175-L243
nwilming/ocupy
ocupy/parallel.py
ind2sub
def ind2sub(ind, dimensions): """ Calculates subscripts for indices into regularly spaced matrixes. """ # check that the index is within range if ind >= np.prod(dimensions): raise RuntimeError("ind2sub: index exceeds array size") cum_dims = list(dimensions) cum_dims.reverse() m = 1 mult = [] for d in cum_dims: m = m*d mult.append(m) mult.pop() mult.reverse() mult.append(1) indices = [] for d in mult: indices.append((ind/d)+1) ind = ind - (ind/d)*d return indices
python
def ind2sub(ind, dimensions): """ Calculates subscripts for indices into regularly spaced matrixes. """ # check that the index is within range if ind >= np.prod(dimensions): raise RuntimeError("ind2sub: index exceeds array size") cum_dims = list(dimensions) cum_dims.reverse() m = 1 mult = [] for d in cum_dims: m = m*d mult.append(m) mult.pop() mult.reverse() mult.append(1) indices = [] for d in mult: indices.append((ind/d)+1) ind = ind - (ind/d)*d return indices
[ "def", "ind2sub", "(", "ind", ",", "dimensions", ")", ":", "# check that the index is within range", "if", "ind", ">=", "np", ".", "prod", "(", "dimensions", ")", ":", "raise", "RuntimeError", "(", "\"ind2sub: index exceeds array size\"", ")", "cum_dims", "=", "list", "(", "dimensions", ")", "cum_dims", ".", "reverse", "(", ")", "m", "=", "1", "mult", "=", "[", "]", "for", "d", "in", "cum_dims", ":", "m", "=", "m", "*", "d", "mult", ".", "append", "(", "m", ")", "mult", ".", "pop", "(", ")", "mult", ".", "reverse", "(", ")", "mult", ".", "append", "(", "1", ")", "indices", "=", "[", "]", "for", "d", "in", "mult", ":", "indices", ".", "append", "(", "(", "ind", "/", "d", ")", "+", "1", ")", "ind", "=", "ind", "-", "(", "ind", "/", "d", ")", "*", "d", "return", "indices" ]
Calculates subscripts for indices into regularly spaced matrixes.
[ "Calculates", "subscripts", "for", "indices", "into", "regularly", "spaced", "matrixes", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L268-L289
nwilming/ocupy
ocupy/parallel.py
sub2ind
def sub2ind(indices, dimensions): """ An exemplary sub2ind implementation to create randomization scripts. This function calculates indices from subscripts into regularly spaced matrixes. """ # check that none of the indices exceeds the size of the array if any([i > j for i, j in zip(indices, dimensions)]): raise RuntimeError("sub2ind:an index exceeds its dimension's size") dims = list(dimensions) dims.append(1) dims.remove(dims[0]) dims.reverse() ind = list(indices) ind.reverse() idx = 0 mult = 1 for (cnt, dim) in zip(ind, dims): mult = dim*mult idx = idx + (cnt-1)*mult return idx
python
def sub2ind(indices, dimensions): """ An exemplary sub2ind implementation to create randomization scripts. This function calculates indices from subscripts into regularly spaced matrixes. """ # check that none of the indices exceeds the size of the array if any([i > j for i, j in zip(indices, dimensions)]): raise RuntimeError("sub2ind:an index exceeds its dimension's size") dims = list(dimensions) dims.append(1) dims.remove(dims[0]) dims.reverse() ind = list(indices) ind.reverse() idx = 0 mult = 1 for (cnt, dim) in zip(ind, dims): mult = dim*mult idx = idx + (cnt-1)*mult return idx
[ "def", "sub2ind", "(", "indices", ",", "dimensions", ")", ":", "# check that none of the indices exceeds the size of the array", "if", "any", "(", "[", "i", ">", "j", "for", "i", ",", "j", "in", "zip", "(", "indices", ",", "dimensions", ")", "]", ")", ":", "raise", "RuntimeError", "(", "\"sub2ind:an index exceeds its dimension's size\"", ")", "dims", "=", "list", "(", "dimensions", ")", "dims", ".", "append", "(", "1", ")", "dims", ".", "remove", "(", "dims", "[", "0", "]", ")", "dims", ".", "reverse", "(", ")", "ind", "=", "list", "(", "indices", ")", "ind", ".", "reverse", "(", ")", "idx", "=", "0", "mult", "=", "1", "for", "(", "cnt", ",", "dim", ")", "in", "zip", "(", "ind", ",", "dims", ")", ":", "mult", "=", "dim", "*", "mult", "idx", "=", "idx", "+", "(", "cnt", "-", "1", ")", "*", "mult", "return", "idx" ]
An exemplary sub2ind implementation to create randomization scripts. This function calculates indices from subscripts into regularly spaced matrixes.
[ "An", "exemplary", "sub2ind", "implementation", "to", "create", "randomization", "scripts", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L292-L314
nwilming/ocupy
ocupy/parallel.py
RestoreTaskStoreFactory
def RestoreTaskStoreFactory(store_class, chunk_size, restore_file, save_file): """ Restores a task store from file. """ intm_results = np.load(restore_file) intm = intm_results[intm_results.files[0]] idx = np.isnan(intm).flatten().nonzero()[0] partitions = math.ceil(len(idx) / float(chunk_size)) task_store = store_class(partitions, idx.tolist(), save_file) task_store.num_tasks = len(idx) # Also set up matrices for saving results for f in intm_results.files: task_store.__dict__[f] = intm_results[f] return task_store
python
def RestoreTaskStoreFactory(store_class, chunk_size, restore_file, save_file): """ Restores a task store from file. """ intm_results = np.load(restore_file) intm = intm_results[intm_results.files[0]] idx = np.isnan(intm).flatten().nonzero()[0] partitions = math.ceil(len(idx) / float(chunk_size)) task_store = store_class(partitions, idx.tolist(), save_file) task_store.num_tasks = len(idx) # Also set up matrices for saving results for f in intm_results.files: task_store.__dict__[f] = intm_results[f] return task_store
[ "def", "RestoreTaskStoreFactory", "(", "store_class", ",", "chunk_size", ",", "restore_file", ",", "save_file", ")", ":", "intm_results", "=", "np", ".", "load", "(", "restore_file", ")", "intm", "=", "intm_results", "[", "intm_results", ".", "files", "[", "0", "]", "]", "idx", "=", "np", ".", "isnan", "(", "intm", ")", ".", "flatten", "(", ")", ".", "nonzero", "(", ")", "[", "0", "]", "partitions", "=", "math", ".", "ceil", "(", "len", "(", "idx", ")", "/", "float", "(", "chunk_size", ")", ")", "task_store", "=", "store_class", "(", "partitions", ",", "idx", ".", "tolist", "(", ")", ",", "save_file", ")", "task_store", ".", "num_tasks", "=", "len", "(", "idx", ")", "# Also set up matrices for saving results", "for", "f", "in", "intm_results", ".", "files", ":", "task_store", ".", "__dict__", "[", "f", "]", "=", "intm_results", "[", "f", "]", "return", "task_store" ]
Restores a task store from file.
[ "Restores", "a", "task", "store", "from", "file", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L316-L329
nwilming/ocupy
ocupy/parallel.py
TaskManager.xmlrpc_reschedule
def xmlrpc_reschedule(self): """ Reschedule all running tasks. """ if not len(self.scheduled_tasks) == 0: self.reschedule = list(self.scheduled_tasks.items()) self.scheduled_tasks = {} return True
python
def xmlrpc_reschedule(self): """ Reschedule all running tasks. """ if not len(self.scheduled_tasks) == 0: self.reschedule = list(self.scheduled_tasks.items()) self.scheduled_tasks = {} return True
[ "def", "xmlrpc_reschedule", "(", "self", ")", ":", "if", "not", "len", "(", "self", ".", "scheduled_tasks", ")", "==", "0", ":", "self", ".", "reschedule", "=", "list", "(", "self", ".", "scheduled_tasks", ".", "items", "(", ")", ")", "self", ".", "scheduled_tasks", "=", "{", "}", "return", "True" ]
Reschedule all running tasks.
[ "Reschedule", "all", "running", "tasks", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L61-L68
nwilming/ocupy
ocupy/parallel.py
TaskManager.xmlrpc_get_task
def xmlrpc_get_task(self): """ Return a new task description: ID and necessary parameters, all are given in a dictionary """ try: if len(self.reschedule) == 0: (task_id, cur_task) = next(self.task_iterator) else: (task_id, cur_task) = self.reschedule.pop() self.scheduled_tasks.update({task_id: cur_task}) return (task_id, cur_task.to_dict()) except StopIteration: print('StopIteration: No more tasks') return False except Exception as err: print('Some other error') print(err) return False
python
def xmlrpc_get_task(self): """ Return a new task description: ID and necessary parameters, all are given in a dictionary """ try: if len(self.reschedule) == 0: (task_id, cur_task) = next(self.task_iterator) else: (task_id, cur_task) = self.reschedule.pop() self.scheduled_tasks.update({task_id: cur_task}) return (task_id, cur_task.to_dict()) except StopIteration: print('StopIteration: No more tasks') return False except Exception as err: print('Some other error') print(err) return False
[ "def", "xmlrpc_get_task", "(", "self", ")", ":", "try", ":", "if", "len", "(", "self", ".", "reschedule", ")", "==", "0", ":", "(", "task_id", ",", "cur_task", ")", "=", "next", "(", "self", ".", "task_iterator", ")", "else", ":", "(", "task_id", ",", "cur_task", ")", "=", "self", ".", "reschedule", ".", "pop", "(", ")", "self", ".", "scheduled_tasks", ".", "update", "(", "{", "task_id", ":", "cur_task", "}", ")", "return", "(", "task_id", ",", "cur_task", ".", "to_dict", "(", ")", ")", "except", "StopIteration", ":", "print", "(", "'StopIteration: No more tasks'", ")", "return", "False", "except", "Exception", "as", "err", ":", "print", "(", "'Some other error'", ")", "print", "(", "err", ")", "return", "False" ]
Return a new task description: ID and necessary parameters, all are given in a dictionary
[ "Return", "a", "new", "task", "description", ":", "ID", "and", "necessary", "parameters", "all", "are", "given", "in", "a", "dictionary" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L70-L88
nwilming/ocupy
ocupy/parallel.py
TaskManager.xmlrpc_task_done
def xmlrpc_task_done(self, result): """ Take the results of a computation and put it into the results list. """ (task_id, task_results) = result del self.scheduled_tasks[task_id] self.task_store.update_results(task_id, task_results) self.results += 1 return True
python
def xmlrpc_task_done(self, result): """ Take the results of a computation and put it into the results list. """ (task_id, task_results) = result del self.scheduled_tasks[task_id] self.task_store.update_results(task_id, task_results) self.results += 1 return True
[ "def", "xmlrpc_task_done", "(", "self", ",", "result", ")", ":", "(", "task_id", ",", "task_results", ")", "=", "result", "del", "self", ".", "scheduled_tasks", "[", "task_id", "]", "self", ".", "task_store", ".", "update_results", "(", "task_id", ",", "task_results", ")", "self", ".", "results", "+=", "1", "return", "True" ]
Take the results of a computation and put it into the results list.
[ "Take", "the", "results", "of", "a", "computation", "and", "put", "it", "into", "the", "results", "list", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L90-L98
nwilming/ocupy
ocupy/parallel.py
TaskManager.xmlrpc_status
def xmlrpc_status(self): """ Return a status message """ return (""" %i Jobs are still wating for execution %i Jobs are being processed %i Jobs are done """ %(self.task_store.partitions - self.results - len(self.scheduled_tasks), len(self.scheduled_tasks), self.results))
python
def xmlrpc_status(self): """ Return a status message """ return (""" %i Jobs are still wating for execution %i Jobs are being processed %i Jobs are done """ %(self.task_store.partitions - self.results - len(self.scheduled_tasks), len(self.scheduled_tasks), self.results))
[ "def", "xmlrpc_status", "(", "self", ")", ":", "return", "(", "\"\"\"\n %i Jobs are still wating for execution\n %i Jobs are being processed\n %i Jobs are done\n \"\"\"", "%", "(", "self", ".", "task_store", ".", "partitions", "-", "self", ".", "results", "-", "len", "(", "self", ".", "scheduled_tasks", ")", ",", "len", "(", "self", ".", "scheduled_tasks", ")", ",", "self", ".", "results", ")", ")" ]
Return a status message
[ "Return", "a", "status", "message" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L100-L112
nwilming/ocupy
ocupy/parallel.py
TaskManager.xmlrpc_save2file
def xmlrpc_save2file(self, filename): """ Save results and own state into file. """ savefile = open(filename,'wb') try: pickle.dump({'scheduled':self.scheduled_tasks, 'reschedule':self.reschedule},savefile) except pickle.PicklingError: return -1 savefile.close() return 1
python
def xmlrpc_save2file(self, filename): """ Save results and own state into file. """ savefile = open(filename,'wb') try: pickle.dump({'scheduled':self.scheduled_tasks, 'reschedule':self.reschedule},savefile) except pickle.PicklingError: return -1 savefile.close() return 1
[ "def", "xmlrpc_save2file", "(", "self", ",", "filename", ")", ":", "savefile", "=", "open", "(", "filename", ",", "'wb'", ")", "try", ":", "pickle", ".", "dump", "(", "{", "'scheduled'", ":", "self", ".", "scheduled_tasks", ",", "'reschedule'", ":", "self", ".", "reschedule", "}", ",", "savefile", ")", "except", "pickle", ".", "PicklingError", ":", "return", "-", "1", "savefile", ".", "close", "(", ")", "return", "1" ]
Save results and own state into file.
[ "Save", "results", "and", "own", "state", "into", "file", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L114-L125
nwilming/ocupy
ocupy/parallel.py
Worker.run
def run(self): """This function needs to be called to start the computation.""" (task_id, tasks) = self.server.get_task() self.task_store.from_dict(tasks) for (index, task) in self.task_store: result = self.compute(index, task) self.results.append(result) self.server.task_done((task_id, self.results))
python
def run(self): """This function needs to be called to start the computation.""" (task_id, tasks) = self.server.get_task() self.task_store.from_dict(tasks) for (index, task) in self.task_store: result = self.compute(index, task) self.results.append(result) self.server.task_done((task_id, self.results))
[ "def", "run", "(", "self", ")", ":", "(", "task_id", ",", "tasks", ")", "=", "self", ".", "server", ".", "get_task", "(", ")", "self", ".", "task_store", ".", "from_dict", "(", "tasks", ")", "for", "(", "index", ",", "task", ")", "in", "self", ".", "task_store", ":", "result", "=", "self", ".", "compute", "(", "index", ",", "task", ")", "self", ".", "results", ".", "append", "(", "result", ")", "self", ".", "server", ".", "task_done", "(", "(", "task_id", ",", "self", ".", "results", ")", ")" ]
This function needs to be called to start the computation.
[ "This", "function", "needs", "to", "be", "called", "to", "start", "the", "computation", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L158-L165
nwilming/ocupy
ocupy/parallel.py
TaskStore.from_dict
def from_dict(self, description): """Configures the task store to be the task_store described in description""" assert(self.ident == description['ident']) self.partitions = description['partitions'] self.indices = description['indices']
python
def from_dict(self, description): """Configures the task store to be the task_store described in description""" assert(self.ident == description['ident']) self.partitions = description['partitions'] self.indices = description['indices']
[ "def", "from_dict", "(", "self", ",", "description", ")", ":", "assert", "(", "self", ".", "ident", "==", "description", "[", "'ident'", "]", ")", "self", ".", "partitions", "=", "description", "[", "'partitions'", "]", "self", ".", "indices", "=", "description", "[", "'indices'", "]" ]
Configures the task store to be the task_store described in description
[ "Configures", "the", "task", "store", "to", "be", "the", "task_store", "described", "in", "description" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L216-L221
nwilming/ocupy
ocupy/parallel.py
TaskStore.partition
def partition(self): """Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks.""" step = int(math.ceil(self.num_tasks / float(self.partitions))) if self.indices == None: slice_ind = list(range(0, self.num_tasks, step)) for start in slice_ind: yield self.__class__(self.partitions, list(range(start, start + step))) else: slice_ind = list(range(0, len(self.indices), step)) for start in slice_ind: if start + step <= len(self.indices): yield self.__class__(self.partitions, self.indices[start: start + step]) else: yield self.__class__(self.partitions, self.indices[start:])
python
def partition(self): """Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks.""" step = int(math.ceil(self.num_tasks / float(self.partitions))) if self.indices == None: slice_ind = list(range(0, self.num_tasks, step)) for start in slice_ind: yield self.__class__(self.partitions, list(range(start, start + step))) else: slice_ind = list(range(0, len(self.indices), step)) for start in slice_ind: if start + step <= len(self.indices): yield self.__class__(self.partitions, self.indices[start: start + step]) else: yield self.__class__(self.partitions, self.indices[start:])
[ "def", "partition", "(", "self", ")", ":", "step", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "num_tasks", "/", "float", "(", "self", ".", "partitions", ")", ")", ")", "if", "self", ".", "indices", "==", "None", ":", "slice_ind", "=", "list", "(", "range", "(", "0", ",", "self", ".", "num_tasks", ",", "step", ")", ")", "for", "start", "in", "slice_ind", ":", "yield", "self", ".", "__class__", "(", "self", ".", "partitions", ",", "list", "(", "range", "(", "start", ",", "start", "+", "step", ")", ")", ")", "else", ":", "slice_ind", "=", "list", "(", "range", "(", "0", ",", "len", "(", "self", ".", "indices", ")", ",", "step", ")", ")", "for", "start", "in", "slice_ind", ":", "if", "start", "+", "step", "<=", "len", "(", "self", ".", "indices", ")", ":", "yield", "self", ".", "__class__", "(", "self", ".", "partitions", ",", "self", ".", "indices", "[", "start", ":", "start", "+", "step", "]", ")", "else", ":", "yield", "self", ".", "__class__", "(", "self", ".", "partitions", ",", "self", ".", "indices", "[", "start", ":", "]", ")" ]
Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks.
[ "Partitions", "all", "tasks", "into", "groups", "of", "tasks", ".", "A", "group", "is", "represented", "by", "a", "task_store", "object", "that", "indexes", "a", "sub", "-", "set", "of", "tasks", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L228-L245
nwilming/ocupy
ocupy/spline_base.py
fit3d
def fit3d(samples, e_x, e_y, e_z, remove_zeros = False, **kw): """Fits a 3D distribution with splines. Input: samples: Array Array of samples from a probability distribution e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """ height, width, depth = len(e_y)-1, len(e_x)-1, len(e_z)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y, e_z)) p_est = p_est/sum(p_est.flat) p_est = p_est.flatten() if remove_zeros: non_zero = ~(p_est == 0) else: non_zero = (p_est >= 0) basis = spline_base3d(width,height, depth, **kw) model = linear_model.BayesianRidge() model.fit(basis[:, non_zero].T, p_est[:,np.newaxis][non_zero,:]) return (model.predict(basis.T).reshape((width, height, depth)), p_est.reshape((width, height, depth)))
python
def fit3d(samples, e_x, e_y, e_z, remove_zeros = False, **kw): """Fits a 3D distribution with splines. Input: samples: Array Array of samples from a probability distribution e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """ height, width, depth = len(e_y)-1, len(e_x)-1, len(e_z)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y, e_z)) p_est = p_est/sum(p_est.flat) p_est = p_est.flatten() if remove_zeros: non_zero = ~(p_est == 0) else: non_zero = (p_est >= 0) basis = spline_base3d(width,height, depth, **kw) model = linear_model.BayesianRidge() model.fit(basis[:, non_zero].T, p_est[:,np.newaxis][non_zero,:]) return (model.predict(basis.T).reshape((width, height, depth)), p_est.reshape((width, height, depth)))
[ "def", "fit3d", "(", "samples", ",", "e_x", ",", "e_y", ",", "e_z", ",", "remove_zeros", "=", "False", ",", "*", "*", "kw", ")", ":", "height", ",", "width", ",", "depth", "=", "len", "(", "e_y", ")", "-", "1", ",", "len", "(", "e_x", ")", "-", "1", ",", "len", "(", "e_z", ")", "-", "1", "(", "p_est", ",", "_", ")", "=", "np", ".", "histogramdd", "(", "samples", ",", "(", "e_x", ",", "e_y", ",", "e_z", ")", ")", "p_est", "=", "p_est", "/", "sum", "(", "p_est", ".", "flat", ")", "p_est", "=", "p_est", ".", "flatten", "(", ")", "if", "remove_zeros", ":", "non_zero", "=", "~", "(", "p_est", "==", "0", ")", "else", ":", "non_zero", "=", "(", "p_est", ">=", "0", ")", "basis", "=", "spline_base3d", "(", "width", ",", "height", ",", "depth", ",", "*", "*", "kw", ")", "model", "=", "linear_model", ".", "BayesianRidge", "(", ")", "model", ".", "fit", "(", "basis", "[", ":", ",", "non_zero", "]", ".", "T", ",", "p_est", "[", ":", ",", "np", ".", "newaxis", "]", "[", "non_zero", ",", ":", "]", ")", "return", "(", "model", ".", "predict", "(", "basis", ".", "T", ")", ".", "reshape", "(", "(", "width", ",", "height", ",", "depth", ")", ")", ",", "p_est", ".", "reshape", "(", "(", "width", ",", "height", ",", "depth", ")", ")", ")" ]
Fits a 3D distribution with splines. Input: samples: Array Array of samples from a probability distribution e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y)
[ "Fits", "a", "3D", "distribution", "with", "splines", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L9-L48
nwilming/ocupy
ocupy/spline_base.py
fit2d
def fit2d(samples,e_x, e_y, remove_zeros = False, p_est = None, **kw): """Fits a 2D distribution with splines. Input: samples: Matrix or list of arrays If matrix, it must be of size Nx2, where N is the number of observations. If list, it must contain two arrays of length N. e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """ if p_est is None: height = len(e_y)-1 width = len(e_x)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y)) else: p_est = p_est.T width, height = p_est.shape # p_est contains x in dim 1 and y in dim 0 shape = p_est.shape p_est = (p_est/sum(p_est.flat)).reshape(shape) mx = p_est.sum(1) my = p_est.sum(0) # Transpose hist to have x in dim 0 p_est = p_est.T.flatten() basis, knots = spline_base2d(width, height, marginal_x = mx, marginal_y = my, **kw) model = linear_model.BayesianRidge() if remove_zeros: non_zero = ~(p_est == 0) model.fit(basis[:, non_zero].T, p_est[non_zero]) else: non_zero = (p_est >= 0) p_est[~non_zero,:] = np.finfo(float).eps model.fit(basis.T, p_est) return (model.predict(basis.T).reshape((height, width)), p_est.reshape((height, width)), knots)
python
def fit2d(samples,e_x, e_y, remove_zeros = False, p_est = None, **kw): """Fits a 2D distribution with splines. Input: samples: Matrix or list of arrays If matrix, it must be of size Nx2, where N is the number of observations. If list, it must contain two arrays of length N. e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """ if p_est is None: height = len(e_y)-1 width = len(e_x)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y)) else: p_est = p_est.T width, height = p_est.shape # p_est contains x in dim 1 and y in dim 0 shape = p_est.shape p_est = (p_est/sum(p_est.flat)).reshape(shape) mx = p_est.sum(1) my = p_est.sum(0) # Transpose hist to have x in dim 0 p_est = p_est.T.flatten() basis, knots = spline_base2d(width, height, marginal_x = mx, marginal_y = my, **kw) model = linear_model.BayesianRidge() if remove_zeros: non_zero = ~(p_est == 0) model.fit(basis[:, non_zero].T, p_est[non_zero]) else: non_zero = (p_est >= 0) p_est[~non_zero,:] = np.finfo(float).eps model.fit(basis.T, p_est) return (model.predict(basis.T).reshape((height, width)), p_est.reshape((height, width)), knots)
[ "def", "fit2d", "(", "samples", ",", "e_x", ",", "e_y", ",", "remove_zeros", "=", "False", ",", "p_est", "=", "None", ",", "*", "*", "kw", ")", ":", "if", "p_est", "is", "None", ":", "height", "=", "len", "(", "e_y", ")", "-", "1", "width", "=", "len", "(", "e_x", ")", "-", "1", "(", "p_est", ",", "_", ")", "=", "np", ".", "histogramdd", "(", "samples", ",", "(", "e_x", ",", "e_y", ")", ")", "else", ":", "p_est", "=", "p_est", ".", "T", "width", ",", "height", "=", "p_est", ".", "shape", "# p_est contains x in dim 1 and y in dim 0", "shape", "=", "p_est", ".", "shape", "p_est", "=", "(", "p_est", "/", "sum", "(", "p_est", ".", "flat", ")", ")", ".", "reshape", "(", "shape", ")", "mx", "=", "p_est", ".", "sum", "(", "1", ")", "my", "=", "p_est", ".", "sum", "(", "0", ")", "# Transpose hist to have x in dim 0", "p_est", "=", "p_est", ".", "T", ".", "flatten", "(", ")", "basis", ",", "knots", "=", "spline_base2d", "(", "width", ",", "height", ",", "marginal_x", "=", "mx", ",", "marginal_y", "=", "my", ",", "*", "*", "kw", ")", "model", "=", "linear_model", ".", "BayesianRidge", "(", ")", "if", "remove_zeros", ":", "non_zero", "=", "~", "(", "p_est", "==", "0", ")", "model", ".", "fit", "(", "basis", "[", ":", ",", "non_zero", "]", ".", "T", ",", "p_est", "[", "non_zero", "]", ")", "else", ":", "non_zero", "=", "(", "p_est", ">=", "0", ")", "p_est", "[", "~", "non_zero", ",", ":", "]", "=", "np", ".", "finfo", "(", "float", ")", ".", "eps", "model", ".", "fit", "(", "basis", ".", "T", ",", "p_est", ")", "return", "(", "model", ".", "predict", "(", "basis", ".", "T", ")", ".", "reshape", "(", "(", "height", ",", "width", ")", ")", ",", "p_est", ".", "reshape", "(", "(", "height", ",", "width", ")", ")", ",", "knots", ")" ]
Fits a 2D distribution with splines. Input: samples: Matrix or list of arrays If matrix, it must be of size Nx2, where N is the number of observations. If list, it must contain two arrays of length N. e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y)
[ "Fits", "a", "2D", "distribution", "with", "splines", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L51-L103
nwilming/ocupy
ocupy/spline_base.py
fit1d
def fit1d(samples, e, remove_zeros = False, **kw): """Fits a 1D distribution with splines. Input: samples: Array Array of samples from a probability distribution e: Array Edges that define the events in the probability distribution. For example, e[0] < x <= e[1] is the range of values that are associated with the first event. **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Array Sequence of knots that were used for the spline basis """ samples = samples[~np.isnan(samples)] length = len(e)-1 hist,_ = np.histogramdd(samples, (e,)) hist = hist/sum(hist) basis, knots = spline_base1d(length, marginal = hist, **kw) non_zero = hist>0 model = linear_model.BayesianRidge() if remove_zeros: model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:]) else: hist[~non_zero] = np.finfo(float).eps model.fit(basis, hist[:,np.newaxis]) return model.predict(basis), hist, knots
python
def fit1d(samples, e, remove_zeros = False, **kw): """Fits a 1D distribution with splines. Input: samples: Array Array of samples from a probability distribution e: Array Edges that define the events in the probability distribution. For example, e[0] < x <= e[1] is the range of values that are associated with the first event. **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Array Sequence of knots that were used for the spline basis """ samples = samples[~np.isnan(samples)] length = len(e)-1 hist,_ = np.histogramdd(samples, (e,)) hist = hist/sum(hist) basis, knots = spline_base1d(length, marginal = hist, **kw) non_zero = hist>0 model = linear_model.BayesianRidge() if remove_zeros: model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:]) else: hist[~non_zero] = np.finfo(float).eps model.fit(basis, hist[:,np.newaxis]) return model.predict(basis), hist, knots
[ "def", "fit1d", "(", "samples", ",", "e", ",", "remove_zeros", "=", "False", ",", "*", "*", "kw", ")", ":", "samples", "=", "samples", "[", "~", "np", ".", "isnan", "(", "samples", ")", "]", "length", "=", "len", "(", "e", ")", "-", "1", "hist", ",", "_", "=", "np", ".", "histogramdd", "(", "samples", ",", "(", "e", ",", ")", ")", "hist", "=", "hist", "/", "sum", "(", "hist", ")", "basis", ",", "knots", "=", "spline_base1d", "(", "length", ",", "marginal", "=", "hist", ",", "*", "*", "kw", ")", "non_zero", "=", "hist", ">", "0", "model", "=", "linear_model", ".", "BayesianRidge", "(", ")", "if", "remove_zeros", ":", "model", ".", "fit", "(", "basis", "[", "non_zero", ",", ":", "]", ",", "hist", "[", ":", ",", "np", ".", "newaxis", "]", "[", "non_zero", ",", ":", "]", ")", "else", ":", "hist", "[", "~", "non_zero", "]", "=", "np", ".", "finfo", "(", "float", ")", ".", "eps", "model", ".", "fit", "(", "basis", ",", "hist", "[", ":", ",", "np", ".", "newaxis", "]", ")", "return", "model", ".", "predict", "(", "basis", ")", ",", "hist", ",", "knots" ]
Fits a 1D distribution with splines. Input: samples: Array Array of samples from a probability distribution e: Array Edges that define the events in the probability distribution. For example, e[0] < x <= e[1] is the range of values that are associated with the first event. **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Array Sequence of knots that were used for the spline basis
[ "Fits", "a", "1D", "distribution", "with", "splines", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L105-L137
nwilming/ocupy
ocupy/spline_base.py
knots_from_marginal
def knots_from_marginal(marginal, nr_knots, spline_order): """ Determines knot placement based on a marginal distribution. It places knots such that each knot covers the same amount of probability mass. Two of the knots are reserved for the borders which are treated seperatly. For example, a uniform distribution with 5 knots will cause the knots to be equally spaced with 25% of the probability mass between each two knots. Input: marginal: Array Estimate of the marginal distribution used to estimate knot placement. nr_knots: int Number of knots to be placed. spline_order: int Order of the splines Returns: knots: Array Sequence of knot positions """ cumsum = np.cumsum(marginal) cumsum = cumsum/cumsum.max() borders = np.linspace(0,1,nr_knots) knot_placement = [0] + np.unique([np.where(cumsum>=b)[0][0] for b in borders[1:-1]]).tolist() +[len(marginal)-1] knots = augknt(knot_placement, spline_order) return knots
python
def knots_from_marginal(marginal, nr_knots, spline_order): """ Determines knot placement based on a marginal distribution. It places knots such that each knot covers the same amount of probability mass. Two of the knots are reserved for the borders which are treated seperatly. For example, a uniform distribution with 5 knots will cause the knots to be equally spaced with 25% of the probability mass between each two knots. Input: marginal: Array Estimate of the marginal distribution used to estimate knot placement. nr_knots: int Number of knots to be placed. spline_order: int Order of the splines Returns: knots: Array Sequence of knot positions """ cumsum = np.cumsum(marginal) cumsum = cumsum/cumsum.max() borders = np.linspace(0,1,nr_knots) knot_placement = [0] + np.unique([np.where(cumsum>=b)[0][0] for b in borders[1:-1]]).tolist() +[len(marginal)-1] knots = augknt(knot_placement, spline_order) return knots
[ "def", "knots_from_marginal", "(", "marginal", ",", "nr_knots", ",", "spline_order", ")", ":", "cumsum", "=", "np", ".", "cumsum", "(", "marginal", ")", "cumsum", "=", "cumsum", "/", "cumsum", ".", "max", "(", ")", "borders", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "nr_knots", ")", "knot_placement", "=", "[", "0", "]", "+", "np", ".", "unique", "(", "[", "np", ".", "where", "(", "cumsum", ">=", "b", ")", "[", "0", "]", "[", "0", "]", "for", "b", "in", "borders", "[", "1", ":", "-", "1", "]", "]", ")", ".", "tolist", "(", ")", "+", "[", "len", "(", "marginal", ")", "-", "1", "]", "knots", "=", "augknt", "(", "knot_placement", ",", "spline_order", ")", "return", "knots" ]
Determines knot placement based on a marginal distribution. It places knots such that each knot covers the same amount of probability mass. Two of the knots are reserved for the borders which are treated seperatly. For example, a uniform distribution with 5 knots will cause the knots to be equally spaced with 25% of the probability mass between each two knots. Input: marginal: Array Estimate of the marginal distribution used to estimate knot placement. nr_knots: int Number of knots to be placed. spline_order: int Order of the splines Returns: knots: Array Sequence of knot positions
[ "Determines", "knot", "placement", "based", "on", "a", "marginal", "distribution", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L139-L168
nwilming/ocupy
ocupy/spline_base.py
spline_base1d
def spline_base1d(length, nr_knots = 20, spline_order = 5, marginal = None): """Computes a 1D spline basis Input: length: int length of each basis nr_knots: int Number of knots, i.e. number of basis functions. spline_order: int Order of the splines. marginal: array, optional Estimate of the marginal distribution of the input to be fitted. If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. """ if marginal is None: knots = augknt(np.linspace(0,length+1, nr_knots), spline_order) else: knots = knots_from_marginal(marginal, nr_knots, spline_order) x_eval = np.arange(1,length+1).astype(float) Bsplines = spcol(x_eval,knots,spline_order) return Bsplines, knots
python
def spline_base1d(length, nr_knots = 20, spline_order = 5, marginal = None): """Computes a 1D spline basis Input: length: int length of each basis nr_knots: int Number of knots, i.e. number of basis functions. spline_order: int Order of the splines. marginal: array, optional Estimate of the marginal distribution of the input to be fitted. If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. """ if marginal is None: knots = augknt(np.linspace(0,length+1, nr_knots), spline_order) else: knots = knots_from_marginal(marginal, nr_knots, spline_order) x_eval = np.arange(1,length+1).astype(float) Bsplines = spcol(x_eval,knots,spline_order) return Bsplines, knots
[ "def", "spline_base1d", "(", "length", ",", "nr_knots", "=", "20", ",", "spline_order", "=", "5", ",", "marginal", "=", "None", ")", ":", "if", "marginal", "is", "None", ":", "knots", "=", "augknt", "(", "np", ".", "linspace", "(", "0", ",", "length", "+", "1", ",", "nr_knots", ")", ",", "spline_order", ")", "else", ":", "knots", "=", "knots_from_marginal", "(", "marginal", ",", "nr_knots", ",", "spline_order", ")", "x_eval", "=", "np", ".", "arange", "(", "1", ",", "length", "+", "1", ")", ".", "astype", "(", "float", ")", "Bsplines", "=", "spcol", "(", "x_eval", ",", "knots", ",", "spline_order", ")", "return", "Bsplines", ",", "knots" ]
Computes a 1D spline basis Input: length: int length of each basis nr_knots: int Number of knots, i.e. number of basis functions. spline_order: int Order of the splines. marginal: array, optional Estimate of the marginal distribution of the input to be fitted. If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced.
[ "Computes", "a", "1D", "spline", "basis", "Input", ":", "length", ":", "int", "length", "of", "each", "basis", "nr_knots", ":", "int", "Number", "of", "knots", "i", ".", "e", ".", "number", "of", "basis", "functions", ".", "spline_order", ":", "int", "Order", "of", "the", "splines", ".", "marginal", ":", "array", "optional", "Estimate", "of", "the", "marginal", "distribution", "of", "the", "input", "to", "be", "fitted", ".", "If", "given", "it", "is", "used", "to", "determine", "the", "positioning", "of", "knots", "each", "knot", "will", "cover", "the", "same", "amount", "of", "probability", "mass", ".", "If", "not", "given", "knots", "are", "equally", "spaced", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L171-L194
nwilming/ocupy
ocupy/spline_base.py
spline_base2d
def spline_base2d(width, height, nr_knots_x = 20.0, nr_knots_y = 20.0, spline_order = 5, marginal_x = None, marginal_y = None): """Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots. """ if not (nr_knots_x<width and nr_knots_y<height): raise RuntimeError("Too many knots for size of the base") if marginal_x is None: knots_x = augknt(np.linspace(0,width+1,nr_knots_x), spline_order) else: knots_x = knots_from_marginal(marginal_x, nr_knots_x, spline_order) if marginal_y is None: knots_y = augknt(np.linspace(0,height+1, nr_knots_y), spline_order) else: knots_y = knots_from_marginal(marginal_y, nr_knots_y, spline_order) x_eval = np.arange(1,width+1).astype(float) y_eval = np.arange(1,height+1).astype(float) spline_setx = spcol(x_eval, knots_x, spline_order) spline_sety = spcol(y_eval, knots_y, spline_order) nr_coeff = [spline_sety.shape[1], spline_setx.shape[1]] dim_bspline = [nr_coeff[0]*nr_coeff[1], len(x_eval)*len(y_eval)] # construct 2D B-splines nr_basis = 0 bspline = np.zeros(dim_bspline) for IDX1 in range(0,nr_coeff[0]): for IDX2 in range(0, nr_coeff[1]): rand_coeff = np.zeros((nr_coeff[0] , nr_coeff[1])) rand_coeff[IDX1,IDX2] = 1 tmp = np.dot(spline_sety,rand_coeff) bspline[nr_basis,:] = np.dot(tmp,spline_setx.T).reshape((1,-1)) nr_basis = nr_basis+1 return bspline, (knots_x, knots_y)
python
def spline_base2d(width, height, nr_knots_x = 20.0, nr_knots_y = 20.0, spline_order = 5, marginal_x = None, marginal_y = None): """Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots. """ if not (nr_knots_x<width and nr_knots_y<height): raise RuntimeError("Too many knots for size of the base") if marginal_x is None: knots_x = augknt(np.linspace(0,width+1,nr_knots_x), spline_order) else: knots_x = knots_from_marginal(marginal_x, nr_knots_x, spline_order) if marginal_y is None: knots_y = augknt(np.linspace(0,height+1, nr_knots_y), spline_order) else: knots_y = knots_from_marginal(marginal_y, nr_knots_y, spline_order) x_eval = np.arange(1,width+1).astype(float) y_eval = np.arange(1,height+1).astype(float) spline_setx = spcol(x_eval, knots_x, spline_order) spline_sety = spcol(y_eval, knots_y, spline_order) nr_coeff = [spline_sety.shape[1], spline_setx.shape[1]] dim_bspline = [nr_coeff[0]*nr_coeff[1], len(x_eval)*len(y_eval)] # construct 2D B-splines nr_basis = 0 bspline = np.zeros(dim_bspline) for IDX1 in range(0,nr_coeff[0]): for IDX2 in range(0, nr_coeff[1]): rand_coeff = np.zeros((nr_coeff[0] , nr_coeff[1])) rand_coeff[IDX1,IDX2] = 1 tmp = np.dot(spline_sety,rand_coeff) bspline[nr_basis,:] = np.dot(tmp,spline_setx.T).reshape((1,-1)) nr_basis = nr_basis+1 return bspline, (knots_x, knots_y)
[ "def", "spline_base2d", "(", "width", ",", "height", ",", "nr_knots_x", "=", "20.0", ",", "nr_knots_y", "=", "20.0", ",", "spline_order", "=", "5", ",", "marginal_x", "=", "None", ",", "marginal_y", "=", "None", ")", ":", "if", "not", "(", "nr_knots_x", "<", "width", "and", "nr_knots_y", "<", "height", ")", ":", "raise", "RuntimeError", "(", "\"Too many knots for size of the base\"", ")", "if", "marginal_x", "is", "None", ":", "knots_x", "=", "augknt", "(", "np", ".", "linspace", "(", "0", ",", "width", "+", "1", ",", "nr_knots_x", ")", ",", "spline_order", ")", "else", ":", "knots_x", "=", "knots_from_marginal", "(", "marginal_x", ",", "nr_knots_x", ",", "spline_order", ")", "if", "marginal_y", "is", "None", ":", "knots_y", "=", "augknt", "(", "np", ".", "linspace", "(", "0", ",", "height", "+", "1", ",", "nr_knots_y", ")", ",", "spline_order", ")", "else", ":", "knots_y", "=", "knots_from_marginal", "(", "marginal_y", ",", "nr_knots_y", ",", "spline_order", ")", "x_eval", "=", "np", ".", "arange", "(", "1", ",", "width", "+", "1", ")", ".", "astype", "(", "float", ")", "y_eval", "=", "np", ".", "arange", "(", "1", ",", "height", "+", "1", ")", ".", "astype", "(", "float", ")", "spline_setx", "=", "spcol", "(", "x_eval", ",", "knots_x", ",", "spline_order", ")", "spline_sety", "=", "spcol", "(", "y_eval", ",", "knots_y", ",", "spline_order", ")", "nr_coeff", "=", "[", "spline_sety", ".", "shape", "[", "1", "]", ",", "spline_setx", ".", "shape", "[", "1", "]", "]", "dim_bspline", "=", "[", "nr_coeff", "[", "0", "]", "*", "nr_coeff", "[", "1", "]", ",", "len", "(", "x_eval", ")", "*", "len", "(", "y_eval", ")", "]", "# construct 2D B-splines ", "nr_basis", "=", "0", "bspline", "=", "np", ".", "zeros", "(", "dim_bspline", ")", "for", "IDX1", "in", "range", "(", "0", ",", "nr_coeff", "[", "0", "]", ")", ":", "for", "IDX2", "in", "range", "(", "0", ",", "nr_coeff", "[", "1", "]", ")", ":", "rand_coeff", "=", "np", ".", "zeros", "(", "(", "nr_coeff", "[", "0", "]", ",", "nr_coeff", "[", "1", "]", ")", ")", "rand_coeff", "[", "IDX1", ",", "IDX2", "]", "=", "1", "tmp", "=", "np", ".", "dot", "(", "spline_sety", ",", "rand_coeff", ")", "bspline", "[", "nr_basis", ",", ":", "]", "=", "np", ".", "dot", "(", "tmp", ",", "spline_setx", ".", "T", ")", ".", "reshape", "(", "(", "1", ",", "-", "1", ")", ")", "nr_basis", "=", "nr_basis", "+", "1", "return", "bspline", ",", "(", "knots_x", ",", "knots_y", ")" ]
Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots.
[ "Computes", "a", "set", "of", "2D", "spline", "basis", "functions", ".", "The", "basis", "functions", "cover", "the", "entire", "space", "in", "height", "*", "width", "and", "can", "for", "example", "be", "used", "to", "create", "fixation", "density", "maps", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L197-L257
nwilming/ocupy
ocupy/spline_base.py
spline_base3d
def spline_base3d( width, height, depth, nr_knots_x = 10.0, nr_knots_y = 10.0, nr_knots_z=10, spline_order = 3, marginal_x = None, marginal_y = None, marginal_z = None): """Computes a set of 3D spline basis functions. For a description of the parameters see spline_base2d. """ if not nr_knots_z < depth: raise RuntimeError("Too many knots for size of the base") basis2d, (knots_x, knots_y) = spline_base2d(height, width, nr_knots_x, nr_knots_y, spline_order, marginal_x, marginal_y) if marginal_z is not None: knots_z = knots_from_marginal(marginal_z, nr_knots_z, spline_order) else: knots_z = augknt(np.linspace(0,depth+1, nr_knots_z), spline_order) z_eval = np.arange(1,depth+1).astype(float) spline_setz = spcol(z_eval, knots_z, spline_order) bspline = np.zeros((basis2d.shape[0]*len(z_eval), height*width*depth)) basis_nr = 0 for spline_a in spline_setz.T: for spline_b in basis2d: spline_b = spline_b.reshape((height, width)) bspline[basis_nr, :] = (spline_b[:,:,np.newaxis] * spline_a[:]).flat basis_nr +=1 return bspline, (knots_x, knots_y, knots_z)
python
def spline_base3d( width, height, depth, nr_knots_x = 10.0, nr_knots_y = 10.0, nr_knots_z=10, spline_order = 3, marginal_x = None, marginal_y = None, marginal_z = None): """Computes a set of 3D spline basis functions. For a description of the parameters see spline_base2d. """ if not nr_knots_z < depth: raise RuntimeError("Too many knots for size of the base") basis2d, (knots_x, knots_y) = spline_base2d(height, width, nr_knots_x, nr_knots_y, spline_order, marginal_x, marginal_y) if marginal_z is not None: knots_z = knots_from_marginal(marginal_z, nr_knots_z, spline_order) else: knots_z = augknt(np.linspace(0,depth+1, nr_knots_z), spline_order) z_eval = np.arange(1,depth+1).astype(float) spline_setz = spcol(z_eval, knots_z, spline_order) bspline = np.zeros((basis2d.shape[0]*len(z_eval), height*width*depth)) basis_nr = 0 for spline_a in spline_setz.T: for spline_b in basis2d: spline_b = spline_b.reshape((height, width)) bspline[basis_nr, :] = (spline_b[:,:,np.newaxis] * spline_a[:]).flat basis_nr +=1 return bspline, (knots_x, knots_y, knots_z)
[ "def", "spline_base3d", "(", "width", ",", "height", ",", "depth", ",", "nr_knots_x", "=", "10.0", ",", "nr_knots_y", "=", "10.0", ",", "nr_knots_z", "=", "10", ",", "spline_order", "=", "3", ",", "marginal_x", "=", "None", ",", "marginal_y", "=", "None", ",", "marginal_z", "=", "None", ")", ":", "if", "not", "nr_knots_z", "<", "depth", ":", "raise", "RuntimeError", "(", "\"Too many knots for size of the base\"", ")", "basis2d", ",", "(", "knots_x", ",", "knots_y", ")", "=", "spline_base2d", "(", "height", ",", "width", ",", "nr_knots_x", ",", "nr_knots_y", ",", "spline_order", ",", "marginal_x", ",", "marginal_y", ")", "if", "marginal_z", "is", "not", "None", ":", "knots_z", "=", "knots_from_marginal", "(", "marginal_z", ",", "nr_knots_z", ",", "spline_order", ")", "else", ":", "knots_z", "=", "augknt", "(", "np", ".", "linspace", "(", "0", ",", "depth", "+", "1", ",", "nr_knots_z", ")", ",", "spline_order", ")", "z_eval", "=", "np", ".", "arange", "(", "1", ",", "depth", "+", "1", ")", ".", "astype", "(", "float", ")", "spline_setz", "=", "spcol", "(", "z_eval", ",", "knots_z", ",", "spline_order", ")", "bspline", "=", "np", ".", "zeros", "(", "(", "basis2d", ".", "shape", "[", "0", "]", "*", "len", "(", "z_eval", ")", ",", "height", "*", "width", "*", "depth", ")", ")", "basis_nr", "=", "0", "for", "spline_a", "in", "spline_setz", ".", "T", ":", "for", "spline_b", "in", "basis2d", ":", "spline_b", "=", "spline_b", ".", "reshape", "(", "(", "height", ",", "width", ")", ")", "bspline", "[", "basis_nr", ",", ":", "]", "=", "(", "spline_b", "[", ":", ",", ":", ",", "np", ".", "newaxis", "]", "*", "spline_a", "[", ":", "]", ")", ".", "flat", "basis_nr", "+=", "1", "return", "bspline", ",", "(", "knots_x", ",", "knots_y", ",", "knots_z", ")" ]
Computes a set of 3D spline basis functions. For a description of the parameters see spline_base2d.
[ "Computes", "a", "set", "of", "3D", "spline", "basis", "functions", ".", "For", "a", "description", "of", "the", "parameters", "see", "spline_base2d", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L259-L283
nwilming/ocupy
ocupy/spline_base.py
spline
def spline(x,knots,p,i=0.0): """Evaluates the ith spline basis given by knots on points in x""" assert(p+1<len(knots)) return np.array([N(float(u),float(i),float(p),knots) for u in x])
python
def spline(x,knots,p,i=0.0): """Evaluates the ith spline basis given by knots on points in x""" assert(p+1<len(knots)) return np.array([N(float(u),float(i),float(p),knots) for u in x])
[ "def", "spline", "(", "x", ",", "knots", ",", "p", ",", "i", "=", "0.0", ")", ":", "assert", "(", "p", "+", "1", "<", "len", "(", "knots", ")", ")", "return", "np", ".", "array", "(", "[", "N", "(", "float", "(", "u", ")", ",", "float", "(", "i", ")", ",", "float", "(", "p", ")", ",", "knots", ")", "for", "u", "in", "x", "]", ")" ]
Evaluates the ith spline basis given by knots on points in x
[ "Evaluates", "the", "ith", "spline", "basis", "given", "by", "knots", "on", "points", "in", "x" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L285-L288
nwilming/ocupy
ocupy/spline_base.py
spcol
def spcol(x,knots,spline_order): """Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x. """ colmat = np.nan*np.ones((len(x),len(knots) - spline_order-1)) for i in range(0,len(knots) - spline_order -1): colmat[:,i] = spline(x,knots,spline_order,i) return colmat
python
def spcol(x,knots,spline_order): """Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x. """ colmat = np.nan*np.ones((len(x),len(knots) - spline_order-1)) for i in range(0,len(knots) - spline_order -1): colmat[:,i] = spline(x,knots,spline_order,i) return colmat
[ "def", "spcol", "(", "x", ",", "knots", ",", "spline_order", ")", ":", "colmat", "=", "np", ".", "nan", "*", "np", ".", "ones", "(", "(", "len", "(", "x", ")", ",", "len", "(", "knots", ")", "-", "spline_order", "-", "1", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "knots", ")", "-", "spline_order", "-", "1", ")", ":", "colmat", "[", ":", ",", "i", "]", "=", "spline", "(", "x", ",", "knots", ",", "spline_order", ",", "i", ")", "return", "colmat" ]
Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x.
[ "Computes", "the", "spline", "colocation", "matrix", "for", "knots", "in", "x", ".", "The", "spline", "collocation", "matrix", "contains", "all", "m", "-", "p", "-", "1", "bases", "defined", "by", "knots", ".", "Specifically", "it", "contains", "the", "ith", "basis", "in", "the", "ith", "column", ".", "Input", ":", "x", ":", "vector", "to", "evaluate", "the", "bases", "on", "knots", ":", "vector", "of", "knots", "spline_order", ":", "order", "of", "the", "spline", "Output", ":", "colmat", ":", "m", "x", "m", "-", "p", "matrix", "The", "colocation", "matrix", "has", "size", "m", "x", "m", "-", "p", "where", "m", "denotes", "the", "number", "of", "points", "the", "basis", "is", "evaluated", "on", "and", "p", "is", "the", "spline", "order", ".", "The", "colums", "contain", "the", "ith", "basis", "of", "knots", "evaluated", "on", "x", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L290-L311
nwilming/ocupy
ocupy/spline_base.py
augknt
def augknt(knots,order): """Augment knot sequence such that some boundary conditions are met.""" a = [] [a.append(knots[0]) for t in range(0,order)] [a.append(k) for k in knots] [a.append(knots[-1]) for t in range(0,order)] return np.array(a)
python
def augknt(knots,order): """Augment knot sequence such that some boundary conditions are met.""" a = [] [a.append(knots[0]) for t in range(0,order)] [a.append(k) for k in knots] [a.append(knots[-1]) for t in range(0,order)] return np.array(a)
[ "def", "augknt", "(", "knots", ",", "order", ")", ":", "a", "=", "[", "]", "[", "a", ".", "append", "(", "knots", "[", "0", "]", ")", "for", "t", "in", "range", "(", "0", ",", "order", ")", "]", "[", "a", ".", "append", "(", "k", ")", "for", "k", "in", "knots", "]", "[", "a", ".", "append", "(", "knots", "[", "-", "1", "]", ")", "for", "t", "in", "range", "(", "0", ",", "order", ")", "]", "return", "np", ".", "array", "(", "a", ")" ]
Augment knot sequence such that some boundary conditions are met.
[ "Augment", "knot", "sequence", "such", "that", "some", "boundary", "conditions", "are", "met", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L313-L320
nwilming/ocupy
ocupy/spline_base.py
N
def N(u,i,p,knots): """Compute Spline Basis Evaluates the spline basis of order p defined by knots at knot i and point u. """ if p == 0: if knots[i] < u and u <=knots[i+1]: return 1.0 else: return 0.0 else: try: k = (( float((u-knots[i]))/float((knots[i+p] - knots[i]) )) * N(u,i,p-1,knots)) except ZeroDivisionError: k = 0.0 try: q = (( float((knots[i+p+1] - u))/float((knots[i+p+1] - knots[i+1]))) * N(u,i+1,p-1,knots)) except ZeroDivisionError: q = 0.0 return float(k + q)
python
def N(u,i,p,knots): """Compute Spline Basis Evaluates the spline basis of order p defined by knots at knot i and point u. """ if p == 0: if knots[i] < u and u <=knots[i+1]: return 1.0 else: return 0.0 else: try: k = (( float((u-knots[i]))/float((knots[i+p] - knots[i]) )) * N(u,i,p-1,knots)) except ZeroDivisionError: k = 0.0 try: q = (( float((knots[i+p+1] - u))/float((knots[i+p+1] - knots[i+1]))) * N(u,i+1,p-1,knots)) except ZeroDivisionError: q = 0.0 return float(k + q)
[ "def", "N", "(", "u", ",", "i", ",", "p", ",", "knots", ")", ":", "if", "p", "==", "0", ":", "if", "knots", "[", "i", "]", "<", "u", "and", "u", "<=", "knots", "[", "i", "+", "1", "]", ":", "return", "1.0", "else", ":", "return", "0.0", "else", ":", "try", ":", "k", "=", "(", "(", "float", "(", "(", "u", "-", "knots", "[", "i", "]", ")", ")", "/", "float", "(", "(", "knots", "[", "i", "+", "p", "]", "-", "knots", "[", "i", "]", ")", ")", ")", "*", "N", "(", "u", ",", "i", ",", "p", "-", "1", ",", "knots", ")", ")", "except", "ZeroDivisionError", ":", "k", "=", "0.0", "try", ":", "q", "=", "(", "(", "float", "(", "(", "knots", "[", "i", "+", "p", "+", "1", "]", "-", "u", ")", ")", "/", "float", "(", "(", "knots", "[", "i", "+", "p", "+", "1", "]", "-", "knots", "[", "i", "+", "1", "]", ")", ")", ")", "*", "N", "(", "u", ",", "i", "+", "1", ",", "p", "-", "1", ",", "knots", ")", ")", "except", "ZeroDivisionError", ":", "q", "=", "0.0", "return", "float", "(", "k", "+", "q", ")" ]
Compute Spline Basis Evaluates the spline basis of order p defined by knots at knot i and point u.
[ "Compute", "Spline", "Basis", "Evaluates", "the", "spline", "basis", "of", "order", "p", "defined", "by", "knots", "at", "knot", "i", "and", "point", "u", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/spline_base.py#L322-L344
nwilming/ocupy
ocupy/measures.py
prediction_scores
def prediction_scores(prediction, fm, **kw): """ Evaluates a prediction against fixations in a fixmat with different measures. The default measures which are used are AUC, NSS and KL-divergence. This can be changed by setting the list of measures with set_scores. As different measures need potentially different parameters, the kw dictionary can be used to pass arguments to measures. Every named argument (except fm and prediction) of a measure that is included in kw.keys() will be filled with the value stored in kw. Example: >>> prediction_scores(P, FM, ctr_loc = (y,x)) In this case the AUC will be computed with control points (y,x), because the measure 'roc_model' has 'ctr_loc' as named argument. Input: prediction : 2D numpy array The prediction that should be evaluated fm : Fixmat The eyetracking data to evaluate against Output: Tuple of prediction scores. The order of the scores is determined by order of measures.scores. """ if prediction == None: return [np.NaN for measure in scores] results = [] for measure in scores: (args, _, _, _) = inspect.getargspec(measure) if len(args)>2: # Filter dictionary, such that only the keys that are # expected by the measure are in it mdict = {} [mdict.update({key:value}) for (key, value) in list(kw.items()) if key in args] score = measure(prediction, fm, **mdict) else: score = measure(prediction, fm) results.append(score) return results
python
def prediction_scores(prediction, fm, **kw): """ Evaluates a prediction against fixations in a fixmat with different measures. The default measures which are used are AUC, NSS and KL-divergence. This can be changed by setting the list of measures with set_scores. As different measures need potentially different parameters, the kw dictionary can be used to pass arguments to measures. Every named argument (except fm and prediction) of a measure that is included in kw.keys() will be filled with the value stored in kw. Example: >>> prediction_scores(P, FM, ctr_loc = (y,x)) In this case the AUC will be computed with control points (y,x), because the measure 'roc_model' has 'ctr_loc' as named argument. Input: prediction : 2D numpy array The prediction that should be evaluated fm : Fixmat The eyetracking data to evaluate against Output: Tuple of prediction scores. The order of the scores is determined by order of measures.scores. """ if prediction == None: return [np.NaN for measure in scores] results = [] for measure in scores: (args, _, _, _) = inspect.getargspec(measure) if len(args)>2: # Filter dictionary, such that only the keys that are # expected by the measure are in it mdict = {} [mdict.update({key:value}) for (key, value) in list(kw.items()) if key in args] score = measure(prediction, fm, **mdict) else: score = measure(prediction, fm) results.append(score) return results
[ "def", "prediction_scores", "(", "prediction", ",", "fm", ",", "*", "*", "kw", ")", ":", "if", "prediction", "==", "None", ":", "return", "[", "np", ".", "NaN", "for", "measure", "in", "scores", "]", "results", "=", "[", "]", "for", "measure", "in", "scores", ":", "(", "args", ",", "_", ",", "_", ",", "_", ")", "=", "inspect", ".", "getargspec", "(", "measure", ")", "if", "len", "(", "args", ")", ">", "2", ":", "# Filter dictionary, such that only the keys that are", "# expected by the measure are in it", "mdict", "=", "{", "}", "[", "mdict", ".", "update", "(", "{", "key", ":", "value", "}", ")", "for", "(", "key", ",", "value", ")", "in", "list", "(", "kw", ".", "items", "(", ")", ")", "if", "key", "in", "args", "]", "score", "=", "measure", "(", "prediction", ",", "fm", ",", "*", "*", "mdict", ")", "else", ":", "score", "=", "measure", "(", "prediction", ",", "fm", ")", "results", ".", "append", "(", "score", ")", "return", "results" ]
Evaluates a prediction against fixations in a fixmat with different measures. The default measures which are used are AUC, NSS and KL-divergence. This can be changed by setting the list of measures with set_scores. As different measures need potentially different parameters, the kw dictionary can be used to pass arguments to measures. Every named argument (except fm and prediction) of a measure that is included in kw.keys() will be filled with the value stored in kw. Example: >>> prediction_scores(P, FM, ctr_loc = (y,x)) In this case the AUC will be computed with control points (y,x), because the measure 'roc_model' has 'ctr_loc' as named argument. Input: prediction : 2D numpy array The prediction that should be evaluated fm : Fixmat The eyetracking data to evaluate against Output: Tuple of prediction scores. The order of the scores is determined by order of measures.scores.
[ "Evaluates", "a", "prediction", "against", "fixations", "in", "a", "fixmat", "with", "different", "measures", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L23-L64
nwilming/ocupy
ocupy/measures.py
kldiv_model
def kldiv_model(prediction, fm): """ wraps kldiv functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Should be filtered for the image corresponding to the prediction """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) q = np.array(prediction, copy=True) q -= np.min(q.flatten()) q /= np.sum(q.flatten()) return kldiv(None, q, distp = fm, scale_factor = r_x)
python
def kldiv_model(prediction, fm): """ wraps kldiv functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Should be filtered for the image corresponding to the prediction """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) q = np.array(prediction, copy=True) q -= np.min(q.flatten()) q /= np.sum(q.flatten()) return kldiv(None, q, distp = fm, scale_factor = r_x)
[ "def", "kldiv_model", "(", "prediction", ",", "fm", ")", ":", "(", "_", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "q", "=", "np", ".", "array", "(", "prediction", ",", "copy", "=", "True", ")", "q", "-=", "np", ".", "min", "(", "q", ".", "flatten", "(", ")", ")", "q", "/=", "np", ".", "sum", "(", "q", ".", "flatten", "(", ")", ")", "return", "kldiv", "(", "None", ",", "q", ",", "distp", "=", "fm", ",", "scale_factor", "=", "r_x", ")" ]
wraps kldiv functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Should be filtered for the image corresponding to the prediction
[ "wraps", "kldiv", "functionality", "for", "model", "evaluation" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L80-L94
nwilming/ocupy
ocupy/measures.py
kldiv
def kldiv(p, q, distp = None, distq = None, scale_factor = 1): """ Computes the Kullback-Leibler divergence between two distributions. Parameters p : Matrix The first probability distribution q : Matrix The second probability distribution distp : fixmat If p is None, distp is used to compute a FDM which is then taken as 1st probability distribution. distq : fixmat If q is None, distq is used to compute a FDM which is then taken as 2dn probability distribution. scale_factor : double Determines the size of FDM computed from distq or distp. """ assert q != None or distq != None, "Either q or distq have to be given" assert p != None or distp != None, "Either p or distp have to be given" try: if p == None: p = compute_fdm(distp, scale_factor = scale_factor) if q == None: q = compute_fdm(distq, scale_factor = scale_factor) except RuntimeError: return np.NaN q += np.finfo(q.dtype).eps p += np.finfo(p.dtype).eps kl = np.sum( p * (np.log2(p / q))) return kl
python
def kldiv(p, q, distp = None, distq = None, scale_factor = 1): """ Computes the Kullback-Leibler divergence between two distributions. Parameters p : Matrix The first probability distribution q : Matrix The second probability distribution distp : fixmat If p is None, distp is used to compute a FDM which is then taken as 1st probability distribution. distq : fixmat If q is None, distq is used to compute a FDM which is then taken as 2dn probability distribution. scale_factor : double Determines the size of FDM computed from distq or distp. """ assert q != None or distq != None, "Either q or distq have to be given" assert p != None or distp != None, "Either p or distp have to be given" try: if p == None: p = compute_fdm(distp, scale_factor = scale_factor) if q == None: q = compute_fdm(distq, scale_factor = scale_factor) except RuntimeError: return np.NaN q += np.finfo(q.dtype).eps p += np.finfo(p.dtype).eps kl = np.sum( p * (np.log2(p / q))) return kl
[ "def", "kldiv", "(", "p", ",", "q", ",", "distp", "=", "None", ",", "distq", "=", "None", ",", "scale_factor", "=", "1", ")", ":", "assert", "q", "!=", "None", "or", "distq", "!=", "None", ",", "\"Either q or distq have to be given\"", "assert", "p", "!=", "None", "or", "distp", "!=", "None", ",", "\"Either p or distp have to be given\"", "try", ":", "if", "p", "==", "None", ":", "p", "=", "compute_fdm", "(", "distp", ",", "scale_factor", "=", "scale_factor", ")", "if", "q", "==", "None", ":", "q", "=", "compute_fdm", "(", "distq", ",", "scale_factor", "=", "scale_factor", ")", "except", "RuntimeError", ":", "return", "np", ".", "NaN", "q", "+=", "np", ".", "finfo", "(", "q", ".", "dtype", ")", ".", "eps", "p", "+=", "np", ".", "finfo", "(", "p", ".", "dtype", ")", ".", "eps", "kl", "=", "np", ".", "sum", "(", "p", "*", "(", "np", ".", "log2", "(", "p", "/", "q", ")", ")", ")", "return", "kl" ]
Computes the Kullback-Leibler divergence between two distributions. Parameters p : Matrix The first probability distribution q : Matrix The second probability distribution distp : fixmat If p is None, distp is used to compute a FDM which is then taken as 1st probability distribution. distq : fixmat If q is None, distq is used to compute a FDM which is then taken as 2dn probability distribution. scale_factor : double Determines the size of FDM computed from distq or distp.
[ "Computes", "the", "Kullback", "-", "Leibler", "divergence", "between", "two", "distributions", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L96-L129
nwilming/ocupy
ocupy/measures.py
kldiv_cs_model
def kldiv_cs_model(prediction, fm): """ Computes Chao-Shen corrected KL-divergence between prediction and fdm made from fixations in fm. Parameters : prediction : np.ndarray a fixation density map fm : FixMat object """ # compute histogram of fixations needed for ChaoShen corrected kl-div # image category must exist (>-1) and image_size must be non-empty assert(len(fm.image_size) == 2 and (fm.image_size[0] > 0) and (fm.image_size[1] > 0)) assert(-1 not in fm.category) # check whether fixmat contains fixations if len(fm.x) == 0: return np.NaN (scale_factor, _) = calc_resize_factor(prediction, fm.image_size) # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled e_y = np.arange(0, np.round(scale_factor*fm.image_size[0]+1)) e_x = np.arange(0, np.round(scale_factor*fm.image_size[1]+1)) samples = np.array(list(zip((scale_factor*fm.y), (scale_factor*fm.x)))) (fdm, _) = np.histogramdd(samples, (e_y, e_x)) # compute ChaoShen corrected kl-div q = np.array(prediction, copy = True) q[q == 0] = np.finfo(q.dtype).eps q /= np.sum(q) (H, pa, la) = chao_shen(fdm) q = q[fdm > 0] cross_entropy = -np.sum((pa * np.log2(q)) / la) return (cross_entropy - H)
python
def kldiv_cs_model(prediction, fm): """ Computes Chao-Shen corrected KL-divergence between prediction and fdm made from fixations in fm. Parameters : prediction : np.ndarray a fixation density map fm : FixMat object """ # compute histogram of fixations needed for ChaoShen corrected kl-div # image category must exist (>-1) and image_size must be non-empty assert(len(fm.image_size) == 2 and (fm.image_size[0] > 0) and (fm.image_size[1] > 0)) assert(-1 not in fm.category) # check whether fixmat contains fixations if len(fm.x) == 0: return np.NaN (scale_factor, _) = calc_resize_factor(prediction, fm.image_size) # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled e_y = np.arange(0, np.round(scale_factor*fm.image_size[0]+1)) e_x = np.arange(0, np.round(scale_factor*fm.image_size[1]+1)) samples = np.array(list(zip((scale_factor*fm.y), (scale_factor*fm.x)))) (fdm, _) = np.histogramdd(samples, (e_y, e_x)) # compute ChaoShen corrected kl-div q = np.array(prediction, copy = True) q[q == 0] = np.finfo(q.dtype).eps q /= np.sum(q) (H, pa, la) = chao_shen(fdm) q = q[fdm > 0] cross_entropy = -np.sum((pa * np.log2(q)) / la) return (cross_entropy - H)
[ "def", "kldiv_cs_model", "(", "prediction", ",", "fm", ")", ":", "# compute histogram of fixations needed for ChaoShen corrected kl-div", "# image category must exist (>-1) and image_size must be non-empty", "assert", "(", "len", "(", "fm", ".", "image_size", ")", "==", "2", "and", "(", "fm", ".", "image_size", "[", "0", "]", ">", "0", ")", "and", "(", "fm", ".", "image_size", "[", "1", "]", ">", "0", ")", ")", "assert", "(", "-", "1", "not", "in", "fm", ".", "category", ")", "# check whether fixmat contains fixations", "if", "len", "(", "fm", ".", "x", ")", "==", "0", ":", "return", "np", ".", "NaN", "(", "scale_factor", ",", "_", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "# this specifies left edges of the histogram bins, i.e. fixations between", "# ]0 binedge[0]] are included. --> fixations are ceiled", "e_y", "=", "np", ".", "arange", "(", "0", ",", "np", ".", "round", "(", "scale_factor", "*", "fm", ".", "image_size", "[", "0", "]", "+", "1", ")", ")", "e_x", "=", "np", ".", "arange", "(", "0", ",", "np", ".", "round", "(", "scale_factor", "*", "fm", ".", "image_size", "[", "1", "]", "+", "1", ")", ")", "samples", "=", "np", ".", "array", "(", "list", "(", "zip", "(", "(", "scale_factor", "*", "fm", ".", "y", ")", ",", "(", "scale_factor", "*", "fm", ".", "x", ")", ")", ")", ")", "(", "fdm", ",", "_", ")", "=", "np", ".", "histogramdd", "(", "samples", ",", "(", "e_y", ",", "e_x", ")", ")", "# compute ChaoShen corrected kl-div", "q", "=", "np", ".", "array", "(", "prediction", ",", "copy", "=", "True", ")", "q", "[", "q", "==", "0", "]", "=", "np", ".", "finfo", "(", "q", ".", "dtype", ")", ".", "eps", "q", "/=", "np", ".", "sum", "(", "q", ")", "(", "H", ",", "pa", ",", "la", ")", "=", "chao_shen", "(", "fdm", ")", "q", "=", "q", "[", "fdm", ">", "0", "]", "cross_entropy", "=", "-", "np", ".", "sum", "(", "(", "pa", "*", "np", ".", "log2", "(", "q", ")", ")", "/", "la", ")", "return", "(", "cross_entropy", "-", "H", ")" ]
Computes Chao-Shen corrected KL-divergence between prediction and fdm made from fixations in fm. Parameters : prediction : np.ndarray a fixation density map fm : FixMat object
[ "Computes", "Chao", "-", "Shen", "corrected", "KL", "-", "divergence", "between", "prediction", "and", "fdm", "made", "from", "fixations", "in", "fm", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L131-L164
nwilming/ocupy
ocupy/measures.py
chao_shen
def chao_shen(q): """ Computes some terms needed for the Chao-Shen KL correction. """ yx = q[q > 0] # remove bins with zero counts n = np.sum(yx) p = yx.astype(float)/n f1 = np.sum(yx == 1) # number of singletons in the sample if f1 == n: # avoid C == 0 f1 -= 1 C = 1 - (f1/n) # estimated coverage of the sample pa = C * p # coverage adjusted empirical frequencies la = (1 - (1 - pa) ** n) # probability to see a bin (species) in the sample H = -np.sum((pa * np.log2(pa)) / la) return (H, pa, la)
python
def chao_shen(q): """ Computes some terms needed for the Chao-Shen KL correction. """ yx = q[q > 0] # remove bins with zero counts n = np.sum(yx) p = yx.astype(float)/n f1 = np.sum(yx == 1) # number of singletons in the sample if f1 == n: # avoid C == 0 f1 -= 1 C = 1 - (f1/n) # estimated coverage of the sample pa = C * p # coverage adjusted empirical frequencies la = (1 - (1 - pa) ** n) # probability to see a bin (species) in the sample H = -np.sum((pa * np.log2(pa)) / la) return (H, pa, la)
[ "def", "chao_shen", "(", "q", ")", ":", "yx", "=", "q", "[", "q", ">", "0", "]", "# remove bins with zero counts", "n", "=", "np", ".", "sum", "(", "yx", ")", "p", "=", "yx", ".", "astype", "(", "float", ")", "/", "n", "f1", "=", "np", ".", "sum", "(", "yx", "==", "1", ")", "# number of singletons in the sample", "if", "f1", "==", "n", ":", "# avoid C == 0", "f1", "-=", "1", "C", "=", "1", "-", "(", "f1", "/", "n", ")", "# estimated coverage of the sample", "pa", "=", "C", "*", "p", "# coverage adjusted empirical frequencies", "la", "=", "(", "1", "-", "(", "1", "-", "pa", ")", "**", "n", ")", "# probability to see a bin (species) in the sample", "H", "=", "-", "np", ".", "sum", "(", "(", "pa", "*", "np", ".", "log2", "(", "pa", ")", ")", "/", "la", ")", "return", "(", "H", ",", "pa", ",", "la", ")" ]
Computes some terms needed for the Chao-Shen KL correction.
[ "Computes", "some", "terms", "needed", "for", "the", "Chao", "-", "Shen", "KL", "correction", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L167-L181
nwilming/ocupy
ocupy/measures.py
correlation_model
def correlation_model(prediction, fm): """ wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared. """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) fdm = compute_fdm(fm, scale_factor = r_x) return np.corrcoef(fdm.flatten(), prediction.flatten())[0,1]
python
def correlation_model(prediction, fm): """ wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared. """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) fdm = compute_fdm(fm, scale_factor = r_x) return np.corrcoef(fdm.flatten(), prediction.flatten())[0,1]
[ "def", "correlation_model", "(", "prediction", ",", "fm", ")", ":", "(", "_", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "fdm", "=", "compute_fdm", "(", "fm", ",", "scale_factor", "=", "r_x", ")", "return", "np", ".", "corrcoef", "(", "fdm", ".", "flatten", "(", ")", ",", "prediction", ".", "flatten", "(", ")", ")", "[", "0", ",", "1", "]" ]
wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared.
[ "wraps", "numpy", ".", "corrcoef", "functionality", "for", "model", "evaluation" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L184-L196
nwilming/ocupy
ocupy/measures.py
nss_model
def nss_model(prediction, fm): """ wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals """ (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) fix = ((np.array(fm.y-1)*r_y).astype(int), (np.array(fm.x-1)*r_x).astype(int)) return nss(prediction, fix)
python
def nss_model(prediction, fm): """ wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals """ (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) fix = ((np.array(fm.y-1)*r_y).astype(int), (np.array(fm.x-1)*r_x).astype(int)) return nss(prediction, fix)
[ "def", "nss_model", "(", "prediction", ",", "fm", ")", ":", "(", "r_y", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "fix", "=", "(", "(", "np", ".", "array", "(", "fm", ".", "y", "-", "1", ")", "*", "r_y", ")", ".", "astype", "(", "int", ")", ",", "(", "np", ".", "array", "(", "fm", ".", "x", "-", "1", ")", "*", "r_x", ")", ".", "astype", "(", "int", ")", ")", "return", "nss", "(", "prediction", ",", "fix", ")" ]
wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals
[ "wraps", "nss", "functionality", "for", "model", "evaluation" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L199-L212
nwilming/ocupy
ocupy/measures.py
nss
def nss(prediction, fix): """ Compute the normalized scanpath salience input: fix : list, l[0] contains y, l[1] contains x """ prediction = prediction - np.mean(prediction) prediction = prediction / np.std(prediction) return np.mean(prediction[fix[0], fix[1]])
python
def nss(prediction, fix): """ Compute the normalized scanpath salience input: fix : list, l[0] contains y, l[1] contains x """ prediction = prediction - np.mean(prediction) prediction = prediction / np.std(prediction) return np.mean(prediction[fix[0], fix[1]])
[ "def", "nss", "(", "prediction", ",", "fix", ")", ":", "prediction", "=", "prediction", "-", "np", ".", "mean", "(", "prediction", ")", "prediction", "=", "prediction", "/", "np", ".", "std", "(", "prediction", ")", "return", "np", ".", "mean", "(", "prediction", "[", "fix", "[", "0", "]", ",", "fix", "[", "1", "]", "]", ")" ]
Compute the normalized scanpath salience input: fix : list, l[0] contains y, l[1] contains x
[ "Compute", "the", "normalized", "scanpath", "salience" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L215-L225
nwilming/ocupy
ocupy/measures.py
roc_model
def roc_model(prediction, fm, ctr_loc = None, ctr_size = None): """ wraps roc functionality for model evaluation Parameters: prediction: 2D array the model salience map fm : fixmat Fixations that define locations of the actuals ctr_loc : tuple of (y.x) coordinates, optional Allows to specify control points for spatial bias correction ctr_size : two element tuple, optional Specifies the assumed image size of the control locations, defaults to fm.image_size """ # check if prediction is a valid numpy array assert type(prediction) == np.ndarray # check whether scaling preserved aspect ratio (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) # read out values in the fdm at actual fixation locations # .astype(int) floors numbers in np.array y_index = (r_y * np.array(fm.y-1)).astype(int) x_index = (r_x * np.array(fm.x-1)).astype(int) actuals = prediction[y_index, x_index] if not ctr_loc: xc = np.random.randint(0, prediction.shape[1], 1000) yc = np.random.randint(0, prediction.shape[0], 1000) ctr_loc = (yc.astype(int), xc.astype(int)) else: if not ctr_size: ctr_size = fm.image_size else: (r_y, r_x) = calc_resize_factor(prediction, ctr_size) ctr_loc = ((r_y * np.array(ctr_loc[0])).astype(int), (r_x * np.array(ctr_loc[1])).astype(int)) controls = prediction[ctr_loc[0], ctr_loc[1]] return fast_roc(actuals, controls)[0]
python
def roc_model(prediction, fm, ctr_loc = None, ctr_size = None): """ wraps roc functionality for model evaluation Parameters: prediction: 2D array the model salience map fm : fixmat Fixations that define locations of the actuals ctr_loc : tuple of (y.x) coordinates, optional Allows to specify control points for spatial bias correction ctr_size : two element tuple, optional Specifies the assumed image size of the control locations, defaults to fm.image_size """ # check if prediction is a valid numpy array assert type(prediction) == np.ndarray # check whether scaling preserved aspect ratio (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) # read out values in the fdm at actual fixation locations # .astype(int) floors numbers in np.array y_index = (r_y * np.array(fm.y-1)).astype(int) x_index = (r_x * np.array(fm.x-1)).astype(int) actuals = prediction[y_index, x_index] if not ctr_loc: xc = np.random.randint(0, prediction.shape[1], 1000) yc = np.random.randint(0, prediction.shape[0], 1000) ctr_loc = (yc.astype(int), xc.astype(int)) else: if not ctr_size: ctr_size = fm.image_size else: (r_y, r_x) = calc_resize_factor(prediction, ctr_size) ctr_loc = ((r_y * np.array(ctr_loc[0])).astype(int), (r_x * np.array(ctr_loc[1])).astype(int)) controls = prediction[ctr_loc[0], ctr_loc[1]] return fast_roc(actuals, controls)[0]
[ "def", "roc_model", "(", "prediction", ",", "fm", ",", "ctr_loc", "=", "None", ",", "ctr_size", "=", "None", ")", ":", "# check if prediction is a valid numpy array", "assert", "type", "(", "prediction", ")", "==", "np", ".", "ndarray", "# check whether scaling preserved aspect ratio", "(", "r_y", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "# read out values in the fdm at actual fixation locations", "# .astype(int) floors numbers in np.array", "y_index", "=", "(", "r_y", "*", "np", ".", "array", "(", "fm", ".", "y", "-", "1", ")", ")", ".", "astype", "(", "int", ")", "x_index", "=", "(", "r_x", "*", "np", ".", "array", "(", "fm", ".", "x", "-", "1", ")", ")", ".", "astype", "(", "int", ")", "actuals", "=", "prediction", "[", "y_index", ",", "x_index", "]", "if", "not", "ctr_loc", ":", "xc", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "prediction", ".", "shape", "[", "1", "]", ",", "1000", ")", "yc", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "prediction", ".", "shape", "[", "0", "]", ",", "1000", ")", "ctr_loc", "=", "(", "yc", ".", "astype", "(", "int", ")", ",", "xc", ".", "astype", "(", "int", ")", ")", "else", ":", "if", "not", "ctr_size", ":", "ctr_size", "=", "fm", ".", "image_size", "else", ":", "(", "r_y", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "ctr_size", ")", "ctr_loc", "=", "(", "(", "r_y", "*", "np", ".", "array", "(", "ctr_loc", "[", "0", "]", ")", ")", ".", "astype", "(", "int", ")", ",", "(", "r_x", "*", "np", ".", "array", "(", "ctr_loc", "[", "1", "]", ")", ")", ".", "astype", "(", "int", ")", ")", "controls", "=", "prediction", "[", "ctr_loc", "[", "0", "]", ",", "ctr_loc", "[", "1", "]", "]", "return", "fast_roc", "(", "actuals", ",", "controls", ")", "[", "0", "]" ]
wraps roc functionality for model evaluation Parameters: prediction: 2D array the model salience map fm : fixmat Fixations that define locations of the actuals ctr_loc : tuple of (y.x) coordinates, optional Allows to specify control points for spatial bias correction ctr_size : two element tuple, optional Specifies the assumed image size of the control locations, defaults to fm.image_size
[ "wraps", "roc", "functionality", "for", "model", "evaluation" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L228-L266
nwilming/ocupy
ocupy/measures.py
fast_roc
def fast_roc(actuals, controls): """ approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """ assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])[::-1] true_pos_rate = np.empty(thresholds.size) false_pos_rate = np.empty(thresholds.size) num_act = float(len(actuals)) num_ctr = float(len(controls)) for i, value in enumerate(thresholds): true_pos_rate[i] = (actuals >= value).sum() / num_act false_pos_rate[i] = (controls >= value).sum() / num_ctr auc = np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
python
def fast_roc(actuals, controls): """ approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """ assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])[::-1] true_pos_rate = np.empty(thresholds.size) false_pos_rate = np.empty(thresholds.size) num_act = float(len(actuals)) num_ctr = float(len(controls)) for i, value in enumerate(thresholds): true_pos_rate[i] = (actuals >= value).sum() / num_act false_pos_rate[i] = (controls >= value).sum() / num_ctr auc = np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
[ "def", "fast_roc", "(", "actuals", ",", "controls", ")", ":", "assert", "(", "type", "(", "actuals", ")", "is", "np", ".", "ndarray", ")", "assert", "(", "type", "(", "controls", ")", "is", "np", ".", "ndarray", ")", "actuals", "=", "np", ".", "ravel", "(", "actuals", ")", "controls", "=", "np", ".", "ravel", "(", "controls", ")", "if", "np", ".", "isnan", "(", "actuals", ")", ".", "any", "(", ")", ":", "raise", "RuntimeError", "(", "'NaN found in actuals'", ")", "if", "np", ".", "isnan", "(", "controls", ")", ".", "any", "(", ")", ":", "raise", "RuntimeError", "(", "'NaN found in controls'", ")", "thresholds", "=", "np", ".", "hstack", "(", "[", "-", "np", ".", "inf", ",", "np", ".", "unique", "(", "actuals", ")", ",", "np", ".", "inf", "]", ")", "[", ":", ":", "-", "1", "]", "true_pos_rate", "=", "np", ".", "empty", "(", "thresholds", ".", "size", ")", "false_pos_rate", "=", "np", ".", "empty", "(", "thresholds", ".", "size", ")", "num_act", "=", "float", "(", "len", "(", "actuals", ")", ")", "num_ctr", "=", "float", "(", "len", "(", "controls", ")", ")", "for", "i", ",", "value", "in", "enumerate", "(", "thresholds", ")", ":", "true_pos_rate", "[", "i", "]", "=", "(", "actuals", ">=", "value", ")", ".", "sum", "(", ")", "/", "num_act", "false_pos_rate", "[", "i", "]", "=", "(", "controls", ">=", "value", ")", ".", "sum", "(", ")", "/", "num_ctr", "auc", "=", "np", ".", "dot", "(", "np", ".", "diff", "(", "false_pos_rate", ")", ",", "true_pos_rate", "[", "0", ":", "-", "1", "]", ")", "# treat cases where TPR of one is not reached before FPR of one", "# by using trapezoidal integration for the last segment", "# (add the missing triangle)", "if", "false_pos_rate", "[", "-", "2", "]", "==", "1", ":", "auc", "+=", "(", "(", "1", "-", "true_pos_rate", "[", "-", "3", "]", ")", "*", ".5", "*", "(", "1", "-", "false_pos_rate", "[", "-", "3", "]", ")", ")", "return", "(", "auc", ",", "true_pos_rate", ",", "false_pos_rate", ")" ]
approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations.
[ "approximates", "the", "area", "under", "the", "roc", "curve", "for", "sets", "of", "actuals", "and", "controls", ".", "Uses", "all", "values", "appearing", "in", "actuals", "as", "thresholds", "and", "lower", "sum", "interpolation", ".", "Also", "returns", "arrays", "of", "the", "true", "positive", "rate", "and", "the", "false", "positive", "rate", "that", "can", "be", "used", "for", "plotting", "the", "roc", "curve", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L269-L307
nwilming/ocupy
ocupy/measures.py
faster_roc
def faster_roc(actuals, controls): """ Histogram based implementation of AUC unde ROC curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """ assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) if len(actuals)<500: raise RuntimeError('This method might be incorrect when '+ 'not enough actuals are present. Needs to be checked before '+ 'proceeding. Stopping here for you to do so.') actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])+np.finfo(float).eps true_pos_rate = np.nan*np.empty(thresholds.size-1) false_pos_rate = np.nan*np.empty(thresholds.size-1) num_act = float(len(actuals)) num_ctr = float(len(controls)) actuals = 1-(np.cumsum(np.histogram(actuals, thresholds)[0])/num_act) controls = 1-(np.cumsum(np.histogram(controls, thresholds)[0])/num_ctr) true_pos_rate = actuals false_pos_rate = controls #true_pos_rate = np.concatenate(([0], true_pos_rate, [1])) false_pos_rate = false_pos_rate auc = -1*np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
python
def faster_roc(actuals, controls): """ Histogram based implementation of AUC unde ROC curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """ assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) if len(actuals)<500: raise RuntimeError('This method might be incorrect when '+ 'not enough actuals are present. Needs to be checked before '+ 'proceeding. Stopping here for you to do so.') actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])+np.finfo(float).eps true_pos_rate = np.nan*np.empty(thresholds.size-1) false_pos_rate = np.nan*np.empty(thresholds.size-1) num_act = float(len(actuals)) num_ctr = float(len(controls)) actuals = 1-(np.cumsum(np.histogram(actuals, thresholds)[0])/num_act) controls = 1-(np.cumsum(np.histogram(controls, thresholds)[0])/num_ctr) true_pos_rate = actuals false_pos_rate = controls #true_pos_rate = np.concatenate(([0], true_pos_rate, [1])) false_pos_rate = false_pos_rate auc = -1*np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
[ "def", "faster_roc", "(", "actuals", ",", "controls", ")", ":", "assert", "(", "type", "(", "actuals", ")", "is", "np", ".", "ndarray", ")", "assert", "(", "type", "(", "controls", ")", "is", "np", ".", "ndarray", ")", "if", "len", "(", "actuals", ")", "<", "500", ":", "raise", "RuntimeError", "(", "'This method might be incorrect when '", "+", "'not enough actuals are present. Needs to be checked before '", "+", "'proceeding. Stopping here for you to do so.'", ")", "actuals", "=", "np", ".", "ravel", "(", "actuals", ")", "controls", "=", "np", ".", "ravel", "(", "controls", ")", "if", "np", ".", "isnan", "(", "actuals", ")", ".", "any", "(", ")", ":", "raise", "RuntimeError", "(", "'NaN found in actuals'", ")", "if", "np", ".", "isnan", "(", "controls", ")", ".", "any", "(", ")", ":", "raise", "RuntimeError", "(", "'NaN found in controls'", ")", "thresholds", "=", "np", ".", "hstack", "(", "[", "-", "np", ".", "inf", ",", "np", ".", "unique", "(", "actuals", ")", ",", "np", ".", "inf", "]", ")", "+", "np", ".", "finfo", "(", "float", ")", ".", "eps", "true_pos_rate", "=", "np", ".", "nan", "*", "np", ".", "empty", "(", "thresholds", ".", "size", "-", "1", ")", "false_pos_rate", "=", "np", ".", "nan", "*", "np", ".", "empty", "(", "thresholds", ".", "size", "-", "1", ")", "num_act", "=", "float", "(", "len", "(", "actuals", ")", ")", "num_ctr", "=", "float", "(", "len", "(", "controls", ")", ")", "actuals", "=", "1", "-", "(", "np", ".", "cumsum", "(", "np", ".", "histogram", "(", "actuals", ",", "thresholds", ")", "[", "0", "]", ")", "/", "num_act", ")", "controls", "=", "1", "-", "(", "np", ".", "cumsum", "(", "np", ".", "histogram", "(", "controls", ",", "thresholds", ")", "[", "0", "]", ")", "/", "num_ctr", ")", "true_pos_rate", "=", "actuals", "false_pos_rate", "=", "controls", "#true_pos_rate = np.concatenate(([0], true_pos_rate, [1]))", "false_pos_rate", "=", "false_pos_rate", "auc", "=", "-", "1", "*", "np", ".", "dot", "(", "np", ".", "diff", "(", "false_pos_rate", ")", ",", "true_pos_rate", "[", "0", ":", "-", "1", "]", ")", "# treat cases where TPR of one is not reached before FPR of one", "# by using trapezoidal integration for the last segment", "# (add the missing triangle)", "if", "false_pos_rate", "[", "-", "2", "]", "==", "1", ":", "auc", "+=", "(", "(", "1", "-", "true_pos_rate", "[", "-", "3", "]", ")", "*", ".5", "*", "(", "1", "-", "false_pos_rate", "[", "-", "3", "]", ")", ")", "return", "(", "auc", ",", "true_pos_rate", ",", "false_pos_rate", ")" ]
Histogram based implementation of AUC unde ROC curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations.
[ "Histogram", "based", "implementation", "of", "AUC", "unde", "ROC", "curve", ".", "Parameters", ":", "actuals", ":", "list", "A", "list", "of", "numeric", "values", "for", "positive", "observations", ".", "controls", ":", "list", "A", "list", "of", "numeric", "values", "for", "negative", "observations", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L309-L351
nwilming/ocupy
ocupy/measures.py
emd_model
def emd_model(prediction, fm): """ wraps emd functionality for model evaluation requires: OpenCV python bindings input: prediction: the model salience map fm : fixmat filtered for the image corresponding to the prediction """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) gt = fixmat.compute_fdm(fm, scale_factor = r_x) return emd(prediction, gt)
python
def emd_model(prediction, fm): """ wraps emd functionality for model evaluation requires: OpenCV python bindings input: prediction: the model salience map fm : fixmat filtered for the image corresponding to the prediction """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) gt = fixmat.compute_fdm(fm, scale_factor = r_x) return emd(prediction, gt)
[ "def", "emd_model", "(", "prediction", ",", "fm", ")", ":", "(", "_", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "gt", "=", "fixmat", ".", "compute_fdm", "(", "fm", ",", "scale_factor", "=", "r_x", ")", "return", "emd", "(", "prediction", ",", "gt", ")" ]
wraps emd functionality for model evaluation requires: OpenCV python bindings input: prediction: the model salience map fm : fixmat filtered for the image corresponding to the prediction
[ "wraps", "emd", "functionality", "for", "model", "evaluation" ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L382-L395
nwilming/ocupy
ocupy/measures.py
emd
def emd(prediction, ground_truth): """ Compute the Eart Movers Distance between prediction and model. This implementation uses opencv for doing the actual work. Unfortunately, at the time of implementation only the SWIG bindings werer available and the numpy arrays have to converted by hand. This changes with opencv 2.1. """ import opencv if not (prediction.shape == ground_truth.shape): raise RuntimeError('Shapes of prediction and ground truth have' + ' to be equal. They are: %s, %s' %(str(prediction.shape), str(ground_truth.shape))) (x, y) = np.meshgrid(list(range(0, prediction.shape[1])), list(range(0, prediction.shape[0]))) s1 = np.array([x.flatten(), y.flatten(), prediction.flatten()]).T s2 = np.array([x.flatten(), y.flatten(), ground_truth.flatten()]).T s1m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) s2m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) for r in range(0, s1.shape[0]): for c in range(0, s1.shape[1]): s1m[r, c] = float(s1[r, c]) s2m[r, c] = float(s2[r, c]) d = opencv.cvCalcEMD2(s1m, s2m, opencv.CV_DIST_L2) return d
python
def emd(prediction, ground_truth): """ Compute the Eart Movers Distance between prediction and model. This implementation uses opencv for doing the actual work. Unfortunately, at the time of implementation only the SWIG bindings werer available and the numpy arrays have to converted by hand. This changes with opencv 2.1. """ import opencv if not (prediction.shape == ground_truth.shape): raise RuntimeError('Shapes of prediction and ground truth have' + ' to be equal. They are: %s, %s' %(str(prediction.shape), str(ground_truth.shape))) (x, y) = np.meshgrid(list(range(0, prediction.shape[1])), list(range(0, prediction.shape[0]))) s1 = np.array([x.flatten(), y.flatten(), prediction.flatten()]).T s2 = np.array([x.flatten(), y.flatten(), ground_truth.flatten()]).T s1m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) s2m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) for r in range(0, s1.shape[0]): for c in range(0, s1.shape[1]): s1m[r, c] = float(s1[r, c]) s2m[r, c] = float(s2[r, c]) d = opencv.cvCalcEMD2(s1m, s2m, opencv.CV_DIST_L2) return d
[ "def", "emd", "(", "prediction", ",", "ground_truth", ")", ":", "import", "opencv", "if", "not", "(", "prediction", ".", "shape", "==", "ground_truth", ".", "shape", ")", ":", "raise", "RuntimeError", "(", "'Shapes of prediction and ground truth have'", "+", "' to be equal. They are: %s, %s'", "%", "(", "str", "(", "prediction", ".", "shape", ")", ",", "str", "(", "ground_truth", ".", "shape", ")", ")", ")", "(", "x", ",", "y", ")", "=", "np", ".", "meshgrid", "(", "list", "(", "range", "(", "0", ",", "prediction", ".", "shape", "[", "1", "]", ")", ")", ",", "list", "(", "range", "(", "0", ",", "prediction", ".", "shape", "[", "0", "]", ")", ")", ")", "s1", "=", "np", ".", "array", "(", "[", "x", ".", "flatten", "(", ")", ",", "y", ".", "flatten", "(", ")", ",", "prediction", ".", "flatten", "(", ")", "]", ")", ".", "T", "s2", "=", "np", ".", "array", "(", "[", "x", ".", "flatten", "(", ")", ",", "y", ".", "flatten", "(", ")", ",", "ground_truth", ".", "flatten", "(", ")", "]", ")", ".", "T", "s1m", "=", "opencv", ".", "cvCreateMat", "(", "s1", ".", "shape", "[", "0", "]", ",", "s2", ".", "shape", "[", "1", "]", ",", "opencv", ".", "CV_32FC1", ")", "s2m", "=", "opencv", ".", "cvCreateMat", "(", "s1", ".", "shape", "[", "0", "]", ",", "s2", ".", "shape", "[", "1", "]", ",", "opencv", ".", "CV_32FC1", ")", "for", "r", "in", "range", "(", "0", ",", "s1", ".", "shape", "[", "0", "]", ")", ":", "for", "c", "in", "range", "(", "0", ",", "s1", ".", "shape", "[", "1", "]", ")", ":", "s1m", "[", "r", ",", "c", "]", "=", "float", "(", "s1", "[", "r", ",", "c", "]", ")", "s2m", "[", "r", ",", "c", "]", "=", "float", "(", "s2", "[", "r", ",", "c", "]", ")", "d", "=", "opencv", ".", "cvCalcEMD2", "(", "s1m", ",", "s2m", ",", "opencv", ".", "CV_DIST_L2", ")", "return", "d" ]
Compute the Eart Movers Distance between prediction and model. This implementation uses opencv for doing the actual work. Unfortunately, at the time of implementation only the SWIG bindings werer available and the numpy arrays have to converted by hand. This changes with opencv 2.1.
[ "Compute", "the", "Eart", "Movers", "Distance", "between", "prediction", "and", "model", "." ]
train
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/measures.py#L398-L423
kurtmckee/listparser
listparser/dates.py
_rfc822
def _rfc822(date): """Parse RFC 822 dates and times http://tools.ietf.org/html/rfc822#section-5 There are some formatting differences that are accounted for: 1. Years may be two or four digits. 2. The month and day can be swapped. 3. Additional timezone names are supported. 4. A default time and timezone are assumed if only a date is present. 5. """ daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']) months = { 'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12, } timezonenames = { 'ut': 0, 'gmt': 0, 'z': 0, 'adt': -3, 'ast': -4, 'at': -4, 'edt': -4, 'est': -5, 'et': -5, 'cdt': -5, 'cst': -6, 'ct': -6, 'mdt': -6, 'mst': -7, 'mt': -7, 'pdt': -7, 'pst': -8, 'pt': -8, 'a': -1, 'n': 1, 'm': -12, 'y': 12, } parts = date.lower().split() if len(parts) < 5: # Assume that the time and timezone are missing parts.extend(('00:00:00', '0000')) # Remove the day name if parts[0][:3] in daynames: parts = parts[1:] if len(parts) < 5: # If there are still fewer than five parts, there's not enough # information to interpret this return None try: day = int(parts[0]) except ValueError: # Check if the day and month are swapped if months.get(parts[0][:3]): try: day = int(parts[1]) except ValueError: return None else: parts[1] = parts[0] else: return None month = months.get(parts[1][:3]) if not month: return None try: year = int(parts[2]) except ValueError: return None # Normalize two-digit years: # Anything in the 90's is interpreted as 1990 and on # Anything 89 or less is interpreted as 2089 or before if len(parts[2]) <= 2: year += (1900, 2000)[year < 90] timeparts = parts[3].split(':') timeparts = timeparts + ([0] * (3 - len(timeparts))) try: (hour, minute, second) = map(int, timeparts) except ValueError: return None tzhour = 0 tzmin = 0 # Strip 'Etc/' from the timezone if parts[4].startswith('etc/'): parts[4] = parts[4][4:] # Normalize timezones that start with 'gmt': # GMT-05:00 => -0500 # GMT => GMT if parts[4].startswith('gmt'): parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt' # Handle timezones like '-0500', '+0500', and 'EST' if parts[4] and parts[4][0] in ('-', '+'): try: tzhour = int(parts[4][1:3]) tzmin = int(parts[4][3:]) except ValueError: return None if parts[4].startswith('-'): tzhour = tzhour * -1 tzmin = tzmin * -1 else: tzhour = timezonenames.get(parts[4], 0) # Create the datetime object and timezone delta objects try: stamp = datetime.datetime(year, month, day, hour, minute, second) except ValueError: return None delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) # Return the date and timestamp in a UTC 9-tuple try: return stamp - delta except OverflowError: return None
python
def _rfc822(date): """Parse RFC 822 dates and times http://tools.ietf.org/html/rfc822#section-5 There are some formatting differences that are accounted for: 1. Years may be two or four digits. 2. The month and day can be swapped. 3. Additional timezone names are supported. 4. A default time and timezone are assumed if only a date is present. 5. """ daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']) months = { 'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12, } timezonenames = { 'ut': 0, 'gmt': 0, 'z': 0, 'adt': -3, 'ast': -4, 'at': -4, 'edt': -4, 'est': -5, 'et': -5, 'cdt': -5, 'cst': -6, 'ct': -6, 'mdt': -6, 'mst': -7, 'mt': -7, 'pdt': -7, 'pst': -8, 'pt': -8, 'a': -1, 'n': 1, 'm': -12, 'y': 12, } parts = date.lower().split() if len(parts) < 5: # Assume that the time and timezone are missing parts.extend(('00:00:00', '0000')) # Remove the day name if parts[0][:3] in daynames: parts = parts[1:] if len(parts) < 5: # If there are still fewer than five parts, there's not enough # information to interpret this return None try: day = int(parts[0]) except ValueError: # Check if the day and month are swapped if months.get(parts[0][:3]): try: day = int(parts[1]) except ValueError: return None else: parts[1] = parts[0] else: return None month = months.get(parts[1][:3]) if not month: return None try: year = int(parts[2]) except ValueError: return None # Normalize two-digit years: # Anything in the 90's is interpreted as 1990 and on # Anything 89 or less is interpreted as 2089 or before if len(parts[2]) <= 2: year += (1900, 2000)[year < 90] timeparts = parts[3].split(':') timeparts = timeparts + ([0] * (3 - len(timeparts))) try: (hour, minute, second) = map(int, timeparts) except ValueError: return None tzhour = 0 tzmin = 0 # Strip 'Etc/' from the timezone if parts[4].startswith('etc/'): parts[4] = parts[4][4:] # Normalize timezones that start with 'gmt': # GMT-05:00 => -0500 # GMT => GMT if parts[4].startswith('gmt'): parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt' # Handle timezones like '-0500', '+0500', and 'EST' if parts[4] and parts[4][0] in ('-', '+'): try: tzhour = int(parts[4][1:3]) tzmin = int(parts[4][3:]) except ValueError: return None if parts[4].startswith('-'): tzhour = tzhour * -1 tzmin = tzmin * -1 else: tzhour = timezonenames.get(parts[4], 0) # Create the datetime object and timezone delta objects try: stamp = datetime.datetime(year, month, day, hour, minute, second) except ValueError: return None delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) # Return the date and timestamp in a UTC 9-tuple try: return stamp - delta except OverflowError: return None
[ "def", "_rfc822", "(", "date", ")", ":", "daynames", "=", "set", "(", "[", "'mon'", ",", "'tue'", ",", "'wed'", ",", "'thu'", ",", "'fri'", ",", "'sat'", ",", "'sun'", "]", ")", "months", "=", "{", "'jan'", ":", "1", ",", "'feb'", ":", "2", ",", "'mar'", ":", "3", ",", "'apr'", ":", "4", ",", "'may'", ":", "5", ",", "'jun'", ":", "6", ",", "'jul'", ":", "7", ",", "'aug'", ":", "8", ",", "'sep'", ":", "9", ",", "'oct'", ":", "10", ",", "'nov'", ":", "11", ",", "'dec'", ":", "12", ",", "}", "timezonenames", "=", "{", "'ut'", ":", "0", ",", "'gmt'", ":", "0", ",", "'z'", ":", "0", ",", "'adt'", ":", "-", "3", ",", "'ast'", ":", "-", "4", ",", "'at'", ":", "-", "4", ",", "'edt'", ":", "-", "4", ",", "'est'", ":", "-", "5", ",", "'et'", ":", "-", "5", ",", "'cdt'", ":", "-", "5", ",", "'cst'", ":", "-", "6", ",", "'ct'", ":", "-", "6", ",", "'mdt'", ":", "-", "6", ",", "'mst'", ":", "-", "7", ",", "'mt'", ":", "-", "7", ",", "'pdt'", ":", "-", "7", ",", "'pst'", ":", "-", "8", ",", "'pt'", ":", "-", "8", ",", "'a'", ":", "-", "1", ",", "'n'", ":", "1", ",", "'m'", ":", "-", "12", ",", "'y'", ":", "12", ",", "}", "parts", "=", "date", ".", "lower", "(", ")", ".", "split", "(", ")", "if", "len", "(", "parts", ")", "<", "5", ":", "# Assume that the time and timezone are missing", "parts", ".", "extend", "(", "(", "'00:00:00'", ",", "'0000'", ")", ")", "# Remove the day name", "if", "parts", "[", "0", "]", "[", ":", "3", "]", "in", "daynames", ":", "parts", "=", "parts", "[", "1", ":", "]", "if", "len", "(", "parts", ")", "<", "5", ":", "# If there are still fewer than five parts, there's not enough", "# information to interpret this", "return", "None", "try", ":", "day", "=", "int", "(", "parts", "[", "0", "]", ")", "except", "ValueError", ":", "# Check if the day and month are swapped", "if", "months", ".", "get", "(", "parts", "[", "0", "]", "[", ":", "3", "]", ")", ":", "try", ":", "day", "=", "int", "(", "parts", "[", "1", "]", ")", "except", "ValueError", ":", "return", "None", "else", ":", "parts", "[", "1", "]", "=", "parts", "[", "0", "]", "else", ":", "return", "None", "month", "=", "months", ".", "get", "(", "parts", "[", "1", "]", "[", ":", "3", "]", ")", "if", "not", "month", ":", "return", "None", "try", ":", "year", "=", "int", "(", "parts", "[", "2", "]", ")", "except", "ValueError", ":", "return", "None", "# Normalize two-digit years:", "# Anything in the 90's is interpreted as 1990 and on", "# Anything 89 or less is interpreted as 2089 or before", "if", "len", "(", "parts", "[", "2", "]", ")", "<=", "2", ":", "year", "+=", "(", "1900", ",", "2000", ")", "[", "year", "<", "90", "]", "timeparts", "=", "parts", "[", "3", "]", ".", "split", "(", "':'", ")", "timeparts", "=", "timeparts", "+", "(", "[", "0", "]", "*", "(", "3", "-", "len", "(", "timeparts", ")", ")", ")", "try", ":", "(", "hour", ",", "minute", ",", "second", ")", "=", "map", "(", "int", ",", "timeparts", ")", "except", "ValueError", ":", "return", "None", "tzhour", "=", "0", "tzmin", "=", "0", "# Strip 'Etc/' from the timezone", "if", "parts", "[", "4", "]", ".", "startswith", "(", "'etc/'", ")", ":", "parts", "[", "4", "]", "=", "parts", "[", "4", "]", "[", "4", ":", "]", "# Normalize timezones that start with 'gmt':", "# GMT-05:00 => -0500", "# GMT => GMT", "if", "parts", "[", "4", "]", ".", "startswith", "(", "'gmt'", ")", ":", "parts", "[", "4", "]", "=", "''", ".", "join", "(", "parts", "[", "4", "]", "[", "3", ":", "]", ".", "split", "(", "':'", ")", ")", "or", "'gmt'", "# Handle timezones like '-0500', '+0500', and 'EST'", "if", "parts", "[", "4", "]", "and", "parts", "[", "4", "]", "[", "0", "]", "in", "(", "'-'", ",", "'+'", ")", ":", "try", ":", "tzhour", "=", "int", "(", "parts", "[", "4", "]", "[", "1", ":", "3", "]", ")", "tzmin", "=", "int", "(", "parts", "[", "4", "]", "[", "3", ":", "]", ")", "except", "ValueError", ":", "return", "None", "if", "parts", "[", "4", "]", ".", "startswith", "(", "'-'", ")", ":", "tzhour", "=", "tzhour", "*", "-", "1", "tzmin", "=", "tzmin", "*", "-", "1", "else", ":", "tzhour", "=", "timezonenames", ".", "get", "(", "parts", "[", "4", "]", ",", "0", ")", "# Create the datetime object and timezone delta objects", "try", ":", "stamp", "=", "datetime", ".", "datetime", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ")", "except", "ValueError", ":", "return", "None", "delta", "=", "datetime", ".", "timedelta", "(", "0", ",", "0", ",", "0", ",", "0", ",", "tzmin", ",", "tzhour", ")", "# Return the date and timestamp in a UTC 9-tuple", "try", ":", "return", "stamp", "-", "delta", "except", "OverflowError", ":", "return", "None" ]
Parse RFC 822 dates and times http://tools.ietf.org/html/rfc822#section-5 There are some formatting differences that are accounted for: 1. Years may be two or four digits. 2. The month and day can be swapped. 3. Additional timezone names are supported. 4. A default time and timezone are assumed if only a date is present. 5.
[ "Parse", "RFC", "822", "dates", "and", "times", "http", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc822#section", "-", "5" ]
train
https://github.com/kurtmckee/listparser/blob/f9bc310a0ce567cd0611fea68be99974021f53c7/listparser/dates.py#L25-L126
kurtmckee/listparser
listparser/dates.py
_to_rfc822
def _to_rfc822(date): """_to_rfc822(datetime.datetime) -> str The datetime `strftime` method is subject to locale-specific day and month names, so this function hardcodes the conversion.""" months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] fmt = '{day}, {d:02} {month} {y:04} {h:02}:{m:02}:{s:02} GMT' return fmt.format( day=days[date.weekday()], d=date.day, month=months[date.month - 1], y=date.year, h=date.hour, m=date.minute, s=date.second, )
python
def _to_rfc822(date): """_to_rfc822(datetime.datetime) -> str The datetime `strftime` method is subject to locale-specific day and month names, so this function hardcodes the conversion.""" months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] fmt = '{day}, {d:02} {month} {y:04} {h:02}:{m:02}:{s:02} GMT' return fmt.format( day=days[date.weekday()], d=date.day, month=months[date.month - 1], y=date.year, h=date.hour, m=date.minute, s=date.second, )
[ "def", "_to_rfc822", "(", "date", ")", ":", "months", "=", "[", "'Jan'", ",", "'Feb'", ",", "'Mar'", ",", "'Apr'", ",", "'May'", ",", "'Jun'", ",", "'Jul'", ",", "'Aug'", ",", "'Sep'", ",", "'Oct'", ",", "'Nov'", ",", "'Dec'", "]", "days", "=", "[", "'Mon'", ",", "'Tue'", ",", "'Wed'", ",", "'Thu'", ",", "'Fri'", ",", "'Sat'", ",", "'Sun'", "]", "fmt", "=", "'{day}, {d:02} {month} {y:04} {h:02}:{m:02}:{s:02} GMT'", "return", "fmt", ".", "format", "(", "day", "=", "days", "[", "date", ".", "weekday", "(", ")", "]", ",", "d", "=", "date", ".", "day", ",", "month", "=", "months", "[", "date", ".", "month", "-", "1", "]", ",", "y", "=", "date", ".", "year", ",", "h", "=", "date", ".", "hour", ",", "m", "=", "date", ".", "minute", ",", "s", "=", "date", ".", "second", ",", ")" ]
_to_rfc822(datetime.datetime) -> str The datetime `strftime` method is subject to locale-specific day and month names, so this function hardcodes the conversion.
[ "_to_rfc822", "(", "datetime", ".", "datetime", ")", "-", ">", "str", "The", "datetime", "strftime", "method", "is", "subject", "to", "locale", "-", "specific", "day", "and", "month", "names", "so", "this", "function", "hardcodes", "the", "conversion", "." ]
train
https://github.com/kurtmckee/listparser/blob/f9bc310a0ce567cd0611fea68be99974021f53c7/listparser/dates.py#L129-L145
cpburnz/python-sql-parameters
sqlparams/__init__.py
SQLParams.format
def format(self, sql, params): """ Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and the ordinal parameters (|list|). """ if isinstance(sql, unicode): string_type = unicode elif isinstance(sql, bytes): string_type = bytes sql = sql.decode(_BYTES_ENCODING) else: raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql)) if self.named == 'numeric': if isinstance(params, collections.Mapping): params = {string_type(idx): val for idx, val in iteritems(params)} elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)): params = {string_type(idx): val for idx, val in enumerate(params, 1)} if not isinstance(params, collections.Mapping): raise TypeError("params:{!r} is not a dict.".format(params)) # Find named parameters. names = self.match.findall(sql) # Map named parameters to ordinals. ord_params = [] name_to_ords = {} for name in names: value = params[name] if isinstance(value, tuple): ord_params.extend(value) if name not in name_to_ords: name_to_ords[name] = '(' + ','.join((self.replace,) * len(value)) + ')' else: ord_params.append(value) if name not in name_to_ords: name_to_ords[name] = self.replace # Replace named parameters with ordinals. sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql) # Make sure the query is returned as the proper string type. if string_type is bytes: sql = sql.encode(_BYTES_ENCODING) # Return formatted SQL and new ordinal parameters. return sql, ord_params
python
def format(self, sql, params): """ Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and the ordinal parameters (|list|). """ if isinstance(sql, unicode): string_type = unicode elif isinstance(sql, bytes): string_type = bytes sql = sql.decode(_BYTES_ENCODING) else: raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql)) if self.named == 'numeric': if isinstance(params, collections.Mapping): params = {string_type(idx): val for idx, val in iteritems(params)} elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)): params = {string_type(idx): val for idx, val in enumerate(params, 1)} if not isinstance(params, collections.Mapping): raise TypeError("params:{!r} is not a dict.".format(params)) # Find named parameters. names = self.match.findall(sql) # Map named parameters to ordinals. ord_params = [] name_to_ords = {} for name in names: value = params[name] if isinstance(value, tuple): ord_params.extend(value) if name not in name_to_ords: name_to_ords[name] = '(' + ','.join((self.replace,) * len(value)) + ')' else: ord_params.append(value) if name not in name_to_ords: name_to_ords[name] = self.replace # Replace named parameters with ordinals. sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql) # Make sure the query is returned as the proper string type. if string_type is bytes: sql = sql.encode(_BYTES_ENCODING) # Return formatted SQL and new ordinal parameters. return sql, ord_params
[ "def", "format", "(", "self", ",", "sql", ",", "params", ")", ":", "if", "isinstance", "(", "sql", ",", "unicode", ")", ":", "string_type", "=", "unicode", "elif", "isinstance", "(", "sql", ",", "bytes", ")", ":", "string_type", "=", "bytes", "sql", "=", "sql", ".", "decode", "(", "_BYTES_ENCODING", ")", "else", ":", "raise", "TypeError", "(", "\"sql:{!r} is not a unicode or byte string.\"", ".", "format", "(", "sql", ")", ")", "if", "self", ".", "named", "==", "'numeric'", ":", "if", "isinstance", "(", "params", ",", "collections", ".", "Mapping", ")", ":", "params", "=", "{", "string_type", "(", "idx", ")", ":", "val", "for", "idx", ",", "val", "in", "iteritems", "(", "params", ")", "}", "elif", "isinstance", "(", "params", ",", "collections", ".", "Sequence", ")", "and", "not", "isinstance", "(", "params", ",", "(", "unicode", ",", "bytes", ")", ")", ":", "params", "=", "{", "string_type", "(", "idx", ")", ":", "val", "for", "idx", ",", "val", "in", "enumerate", "(", "params", ",", "1", ")", "}", "if", "not", "isinstance", "(", "params", ",", "collections", ".", "Mapping", ")", ":", "raise", "TypeError", "(", "\"params:{!r} is not a dict.\"", ".", "format", "(", "params", ")", ")", "# Find named parameters.", "names", "=", "self", ".", "match", ".", "findall", "(", "sql", ")", "# Map named parameters to ordinals.", "ord_params", "=", "[", "]", "name_to_ords", "=", "{", "}", "for", "name", "in", "names", ":", "value", "=", "params", "[", "name", "]", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "ord_params", ".", "extend", "(", "value", ")", "if", "name", "not", "in", "name_to_ords", ":", "name_to_ords", "[", "name", "]", "=", "'('", "+", "','", ".", "join", "(", "(", "self", ".", "replace", ",", ")", "*", "len", "(", "value", ")", ")", "+", "')'", "else", ":", "ord_params", ".", "append", "(", "value", ")", "if", "name", "not", "in", "name_to_ords", ":", "name_to_ords", "[", "name", "]", "=", "self", ".", "replace", "# Replace named parameters with ordinals.", "sql", "=", "self", ".", "match", ".", "sub", "(", "lambda", "m", ":", "name_to_ords", "[", "m", ".", "group", "(", "1", ")", "]", ",", "sql", ")", "# Make sure the query is returned as the proper string type.", "if", "string_type", "is", "bytes", ":", "sql", "=", "sql", ".", "encode", "(", "_BYTES_ENCODING", ")", "# Return formatted SQL and new ordinal parameters.", "return", "sql", ",", "ord_params" ]
Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and the ordinal parameters (|list|).
[ "Formats", "the", "SQL", "query", "to", "use", "ordinal", "parameters", "instead", "of", "named", "parameters", "." ]
train
https://github.com/cpburnz/python-sql-parameters/blob/828088a54b84aa904ec54c50af60f8aecc62a5f4/sqlparams/__init__.py#L245-L301
cpburnz/python-sql-parameters
sqlparams/__init__.py
SQLParams.formatmany
def formatmany(self, sql, many_params): """ Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *many_params* (|iterable|) contains each *params* to format. - *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and a |list| containing each ordinal parameters (|list|). """ if isinstance(sql, unicode): string_type = unicode elif isinstance(sql, bytes): string_type = bytes sql = sql.decode(_BYTES_ENCODING) else: raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql)) if not isinstance(many_params, collections.Iterable) or isinstance(many_params, (unicode, bytes)): raise TypeError("many_params:{!r} is not iterable.".format(many_params)) # Find named parameters. names = self.match.findall(sql) name_set = set(names) # Map named parameters to ordinals. many_ord_params = [] name_to_ords = {} name_to_len = {} repl_str = self.replace repl_tuple = (repl_str,) for i, params in enumerate(many_params): if self.named == 'numeric': if isinstance(params, collections.Mapping): params = {string_type(idx): val for idx, val in iteritems(params)} elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)): params = {string_type(idx): val for idx, val in enumerate(params, 1)} if not isinstance(params, collections.Mapping): raise TypeError("many_params[{}]:{!r} is not a dict.".format(i, params)) if not i: # first # Map names to ordinals, and determine what names are tuples and # what their lengths are. for name in name_set: value = params[name] if isinstance(value, tuple): tuple_len = len(value) name_to_ords[name] = '(' + ','.join(repl_tuple * tuple_len) + ')' name_to_len[name] = tuple_len else: name_to_ords[name] = repl_str name_to_len[name] = None # Make sure tuples match up and collapse tuples into ordinals. ord_params = [] for name in names: value = params[name] tuple_len = name_to_len[name] if tuple_len is not None: if not isinstance(value, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, name, value)) elif len(value) != tuple_len: raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, name, value, tuple_len)) ord_params.extend(value) else: ord_params.append(value) many_ord_params.append(ord_params) # Replace named parameters with ordinals. sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql) # Make sure the query is returned as the proper string type. if string_type is bytes: sql = sql.encode(_BYTES_ENCODING) # Return formatted SQL and new ordinal parameters. return sql, many_ord_params
python
def formatmany(self, sql, many_params): """ Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *many_params* (|iterable|) contains each *params* to format. - *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and a |list| containing each ordinal parameters (|list|). """ if isinstance(sql, unicode): string_type = unicode elif isinstance(sql, bytes): string_type = bytes sql = sql.decode(_BYTES_ENCODING) else: raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql)) if not isinstance(many_params, collections.Iterable) or isinstance(many_params, (unicode, bytes)): raise TypeError("many_params:{!r} is not iterable.".format(many_params)) # Find named parameters. names = self.match.findall(sql) name_set = set(names) # Map named parameters to ordinals. many_ord_params = [] name_to_ords = {} name_to_len = {} repl_str = self.replace repl_tuple = (repl_str,) for i, params in enumerate(many_params): if self.named == 'numeric': if isinstance(params, collections.Mapping): params = {string_type(idx): val for idx, val in iteritems(params)} elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)): params = {string_type(idx): val for idx, val in enumerate(params, 1)} if not isinstance(params, collections.Mapping): raise TypeError("many_params[{}]:{!r} is not a dict.".format(i, params)) if not i: # first # Map names to ordinals, and determine what names are tuples and # what their lengths are. for name in name_set: value = params[name] if isinstance(value, tuple): tuple_len = len(value) name_to_ords[name] = '(' + ','.join(repl_tuple * tuple_len) + ')' name_to_len[name] = tuple_len else: name_to_ords[name] = repl_str name_to_len[name] = None # Make sure tuples match up and collapse tuples into ordinals. ord_params = [] for name in names: value = params[name] tuple_len = name_to_len[name] if tuple_len is not None: if not isinstance(value, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, name, value)) elif len(value) != tuple_len: raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, name, value, tuple_len)) ord_params.extend(value) else: ord_params.append(value) many_ord_params.append(ord_params) # Replace named parameters with ordinals. sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql) # Make sure the query is returned as the proper string type. if string_type is bytes: sql = sql.encode(_BYTES_ENCODING) # Return formatted SQL and new ordinal parameters. return sql, many_ord_params
[ "def", "formatmany", "(", "self", ",", "sql", ",", "many_params", ")", ":", "if", "isinstance", "(", "sql", ",", "unicode", ")", ":", "string_type", "=", "unicode", "elif", "isinstance", "(", "sql", ",", "bytes", ")", ":", "string_type", "=", "bytes", "sql", "=", "sql", ".", "decode", "(", "_BYTES_ENCODING", ")", "else", ":", "raise", "TypeError", "(", "\"sql:{!r} is not a unicode or byte string.\"", ".", "format", "(", "sql", ")", ")", "if", "not", "isinstance", "(", "many_params", ",", "collections", ".", "Iterable", ")", "or", "isinstance", "(", "many_params", ",", "(", "unicode", ",", "bytes", ")", ")", ":", "raise", "TypeError", "(", "\"many_params:{!r} is not iterable.\"", ".", "format", "(", "many_params", ")", ")", "# Find named parameters.", "names", "=", "self", ".", "match", ".", "findall", "(", "sql", ")", "name_set", "=", "set", "(", "names", ")", "# Map named parameters to ordinals.", "many_ord_params", "=", "[", "]", "name_to_ords", "=", "{", "}", "name_to_len", "=", "{", "}", "repl_str", "=", "self", ".", "replace", "repl_tuple", "=", "(", "repl_str", ",", ")", "for", "i", ",", "params", "in", "enumerate", "(", "many_params", ")", ":", "if", "self", ".", "named", "==", "'numeric'", ":", "if", "isinstance", "(", "params", ",", "collections", ".", "Mapping", ")", ":", "params", "=", "{", "string_type", "(", "idx", ")", ":", "val", "for", "idx", ",", "val", "in", "iteritems", "(", "params", ")", "}", "elif", "isinstance", "(", "params", ",", "collections", ".", "Sequence", ")", "and", "not", "isinstance", "(", "params", ",", "(", "unicode", ",", "bytes", ")", ")", ":", "params", "=", "{", "string_type", "(", "idx", ")", ":", "val", "for", "idx", ",", "val", "in", "enumerate", "(", "params", ",", "1", ")", "}", "if", "not", "isinstance", "(", "params", ",", "collections", ".", "Mapping", ")", ":", "raise", "TypeError", "(", "\"many_params[{}]:{!r} is not a dict.\"", ".", "format", "(", "i", ",", "params", ")", ")", "if", "not", "i", ":", "# first", "# Map names to ordinals, and determine what names are tuples and", "# what their lengths are.", "for", "name", "in", "name_set", ":", "value", "=", "params", "[", "name", "]", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "tuple_len", "=", "len", "(", "value", ")", "name_to_ords", "[", "name", "]", "=", "'('", "+", "','", ".", "join", "(", "repl_tuple", "*", "tuple_len", ")", "+", "')'", "name_to_len", "[", "name", "]", "=", "tuple_len", "else", ":", "name_to_ords", "[", "name", "]", "=", "repl_str", "name_to_len", "[", "name", "]", "=", "None", "# Make sure tuples match up and collapse tuples into ordinals.", "ord_params", "=", "[", "]", "for", "name", "in", "names", ":", "value", "=", "params", "[", "name", "]", "tuple_len", "=", "name_to_len", "[", "name", "]", "if", "tuple_len", "is", "not", "None", ":", "if", "not", "isinstance", "(", "value", ",", "tuple", ")", ":", "raise", "TypeError", "(", "\"many_params[{}][{!r}]:{!r} was expected to be a tuple.\"", ".", "format", "(", "i", ",", "name", ",", "value", ")", ")", "elif", "len", "(", "value", ")", "!=", "tuple_len", ":", "raise", "ValueError", "(", "\"many_params[{}][{!r}]:{!r} length was expected to be {}.\"", ".", "format", "(", "i", ",", "name", ",", "value", ",", "tuple_len", ")", ")", "ord_params", ".", "extend", "(", "value", ")", "else", ":", "ord_params", ".", "append", "(", "value", ")", "many_ord_params", ".", "append", "(", "ord_params", ")", "# Replace named parameters with ordinals.", "sql", "=", "self", ".", "match", ".", "sub", "(", "lambda", "m", ":", "name_to_ords", "[", "m", ".", "group", "(", "1", ")", "]", ",", "sql", ")", "# Make sure the query is returned as the proper string type.", "if", "string_type", "is", "bytes", ":", "sql", "=", "sql", ".", "encode", "(", "_BYTES_ENCODING", ")", "# Return formatted SQL and new ordinal parameters.", "return", "sql", ",", "many_ord_params" ]
Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *many_params* (|iterable|) contains each *params* to format. - *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and a |list| containing each ordinal parameters (|list|).
[ "Formats", "the", "SQL", "query", "to", "use", "ordinal", "parameters", "instead", "of", "named", "parameters", "." ]
train
https://github.com/cpburnz/python-sql-parameters/blob/828088a54b84aa904ec54c50af60f8aecc62a5f4/sqlparams/__init__.py#L303-L386
hecrj/reticular
reticular.py
_get_parser
def _get_parser(f): """ Gets the parser for the command f, if it not exists it creates a new one """ _COMMAND_GROUPS[f.__module__].load() if f.__name__ not in _COMMAND_GROUPS[f.__module__].parsers: parser = _COMMAND_GROUPS[f.__module__].parser_generator.add_parser(f.__name__, help=f.__doc__, description=f.__doc__) parser.set_defaults(func=f) _COMMAND_GROUPS[f.__module__].parsers[f.__name__] = parser return _COMMAND_GROUPS[f.__module__].parsers[f.__name__]
python
def _get_parser(f): """ Gets the parser for the command f, if it not exists it creates a new one """ _COMMAND_GROUPS[f.__module__].load() if f.__name__ not in _COMMAND_GROUPS[f.__module__].parsers: parser = _COMMAND_GROUPS[f.__module__].parser_generator.add_parser(f.__name__, help=f.__doc__, description=f.__doc__) parser.set_defaults(func=f) _COMMAND_GROUPS[f.__module__].parsers[f.__name__] = parser return _COMMAND_GROUPS[f.__module__].parsers[f.__name__]
[ "def", "_get_parser", "(", "f", ")", ":", "_COMMAND_GROUPS", "[", "f", ".", "__module__", "]", ".", "load", "(", ")", "if", "f", ".", "__name__", "not", "in", "_COMMAND_GROUPS", "[", "f", ".", "__module__", "]", ".", "parsers", ":", "parser", "=", "_COMMAND_GROUPS", "[", "f", ".", "__module__", "]", ".", "parser_generator", ".", "add_parser", "(", "f", ".", "__name__", ",", "help", "=", "f", ".", "__doc__", ",", "description", "=", "f", ".", "__doc__", ")", "parser", ".", "set_defaults", "(", "func", "=", "f", ")", "_COMMAND_GROUPS", "[", "f", ".", "__module__", "]", ".", "parsers", "[", "f", ".", "__name__", "]", "=", "parser", "return", "_COMMAND_GROUPS", "[", "f", ".", "__module__", "]", ".", "parsers", "[", "f", ".", "__name__", "]" ]
Gets the parser for the command f, if it not exists it creates a new one
[ "Gets", "the", "parser", "for", "the", "command", "f", "if", "it", "not", "exists", "it", "creates", "a", "new", "one" ]
train
https://github.com/hecrj/reticular/blob/b83c632459b3215284124373931fcaef76bc64e8/reticular.py#L180-L193
bear/ronkyuu
ronkyuu/webmention.py
findMentions
def findMentions(sourceURL, targetURL=None, exclude_domains=[], content=None, test_urls=True, headers={}, timeout=None): """Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. If any have an href attribute that is not from the one of the items in exclude_domains, append it to our lists. :param sourceURL: the URL for the post we are scanning :param exclude_domains: a list of domains to exclude from the search :type exclude_domains: list :param content: the content to be scanned for mentions :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers: dict :param timeout: optional timeout for web requests :type timeout float :rtype: dictionary of Mentions """ __doc__ = None if test_urls: URLValidator(message='invalid source URL')(sourceURL) if content: result = {'status': requests.codes.ok, 'headers': None, } else: r = requests.get(sourceURL, verify=True, headers=headers, timeout=timeout) result = {'status': r.status_code, 'headers': r.headers } # Check for character encodings and use 'correct' data if 'charset' in r.headers.get('content-type', ''): content = r.text else: content = r.content result.update({'refs': set(), 'post-url': sourceURL}) if result['status'] == requests.codes.ok: # Allow passing BS doc as content if isinstance(content, BeautifulSoup): __doc__ = content # result.update({'content': unicode(__doc__)}) result.update({'content': str(__doc__)}) else: __doc__ = BeautifulSoup(content, _html_parser) result.update({'content': content}) # try to find first h-entry else use full document entry = __doc__.find(class_="h-entry") or __doc__ # Allow finding particular URL if targetURL: # find only targetURL all_links = entry.find_all('a', href=targetURL) else: # find all links with a href all_links = entry.find_all('a', href=True) for link in all_links: href = link.get('href', None) if href: url = urlparse(href) if url.scheme in ('http', 'https'): if url.hostname and url.hostname not in exclude_domains: result['refs'].add(href) return result
python
def findMentions(sourceURL, targetURL=None, exclude_domains=[], content=None, test_urls=True, headers={}, timeout=None): """Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. If any have an href attribute that is not from the one of the items in exclude_domains, append it to our lists. :param sourceURL: the URL for the post we are scanning :param exclude_domains: a list of domains to exclude from the search :type exclude_domains: list :param content: the content to be scanned for mentions :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers: dict :param timeout: optional timeout for web requests :type timeout float :rtype: dictionary of Mentions """ __doc__ = None if test_urls: URLValidator(message='invalid source URL')(sourceURL) if content: result = {'status': requests.codes.ok, 'headers': None, } else: r = requests.get(sourceURL, verify=True, headers=headers, timeout=timeout) result = {'status': r.status_code, 'headers': r.headers } # Check for character encodings and use 'correct' data if 'charset' in r.headers.get('content-type', ''): content = r.text else: content = r.content result.update({'refs': set(), 'post-url': sourceURL}) if result['status'] == requests.codes.ok: # Allow passing BS doc as content if isinstance(content, BeautifulSoup): __doc__ = content # result.update({'content': unicode(__doc__)}) result.update({'content': str(__doc__)}) else: __doc__ = BeautifulSoup(content, _html_parser) result.update({'content': content}) # try to find first h-entry else use full document entry = __doc__.find(class_="h-entry") or __doc__ # Allow finding particular URL if targetURL: # find only targetURL all_links = entry.find_all('a', href=targetURL) else: # find all links with a href all_links = entry.find_all('a', href=True) for link in all_links: href = link.get('href', None) if href: url = urlparse(href) if url.scheme in ('http', 'https'): if url.hostname and url.hostname not in exclude_domains: result['refs'].add(href) return result
[ "def", "findMentions", "(", "sourceURL", ",", "targetURL", "=", "None", ",", "exclude_domains", "=", "[", "]", ",", "content", "=", "None", ",", "test_urls", "=", "True", ",", "headers", "=", "{", "}", ",", "timeout", "=", "None", ")", ":", "__doc__", "=", "None", "if", "test_urls", ":", "URLValidator", "(", "message", "=", "'invalid source URL'", ")", "(", "sourceURL", ")", "if", "content", ":", "result", "=", "{", "'status'", ":", "requests", ".", "codes", ".", "ok", ",", "'headers'", ":", "None", ",", "}", "else", ":", "r", "=", "requests", ".", "get", "(", "sourceURL", ",", "verify", "=", "True", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")", "result", "=", "{", "'status'", ":", "r", ".", "status_code", ",", "'headers'", ":", "r", ".", "headers", "}", "# Check for character encodings and use 'correct' data", "if", "'charset'", "in", "r", ".", "headers", ".", "get", "(", "'content-type'", ",", "''", ")", ":", "content", "=", "r", ".", "text", "else", ":", "content", "=", "r", ".", "content", "result", ".", "update", "(", "{", "'refs'", ":", "set", "(", ")", ",", "'post-url'", ":", "sourceURL", "}", ")", "if", "result", "[", "'status'", "]", "==", "requests", ".", "codes", ".", "ok", ":", "# Allow passing BS doc as content", "if", "isinstance", "(", "content", ",", "BeautifulSoup", ")", ":", "__doc__", "=", "content", "# result.update({'content': unicode(__doc__)})", "result", ".", "update", "(", "{", "'content'", ":", "str", "(", "__doc__", ")", "}", ")", "else", ":", "__doc__", "=", "BeautifulSoup", "(", "content", ",", "_html_parser", ")", "result", ".", "update", "(", "{", "'content'", ":", "content", "}", ")", "# try to find first h-entry else use full document", "entry", "=", "__doc__", ".", "find", "(", "class_", "=", "\"h-entry\"", ")", "or", "__doc__", "# Allow finding particular URL", "if", "targetURL", ":", "# find only targetURL", "all_links", "=", "entry", ".", "find_all", "(", "'a'", ",", "href", "=", "targetURL", ")", "else", ":", "# find all links with a href", "all_links", "=", "entry", ".", "find_all", "(", "'a'", ",", "href", "=", "True", ")", "for", "link", "in", "all_links", ":", "href", "=", "link", ".", "get", "(", "'href'", ",", "None", ")", "if", "href", ":", "url", "=", "urlparse", "(", "href", ")", "if", "url", ".", "scheme", "in", "(", "'http'", ",", "'https'", ")", ":", "if", "url", ".", "hostname", "and", "url", ".", "hostname", "not", "in", "exclude_domains", ":", "result", "[", "'refs'", "]", ".", "add", "(", "href", ")", "return", "result" ]
Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. If any have an href attribute that is not from the one of the items in exclude_domains, append it to our lists. :param sourceURL: the URL for the post we are scanning :param exclude_domains: a list of domains to exclude from the search :type exclude_domains: list :param content: the content to be scanned for mentions :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers: dict :param timeout: optional timeout for web requests :type timeout float :rtype: dictionary of Mentions
[ "Find", "all", "<a", "/", ">", "elements", "in", "the", "given", "html", "for", "a", "post", ".", "Only", "scan", "html", "element", "matching", "all", "criteria", "in", "look_in", "." ]
train
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/webmention.py#L41-L112
bear/ronkyuu
ronkyuu/webmention.py
findEndpoint
def findEndpoint(html): """Search the given html content for all <link /> elements and return any discovered WebMention URL. :param html: html content :rtype: WebMention URL """ poss_rels = ['webmention', 'http://webmention.org', 'http://webmention.org/', 'https://webmention.org', 'https://webmention.org/'] # find elements with correct rels and a href value all_links = BeautifulSoup(html, _html_parser).find_all(rel=poss_rels, href=True) for link in all_links: s = link.get('href', None) if s is not None: return s return None
python
def findEndpoint(html): """Search the given html content for all <link /> elements and return any discovered WebMention URL. :param html: html content :rtype: WebMention URL """ poss_rels = ['webmention', 'http://webmention.org', 'http://webmention.org/', 'https://webmention.org', 'https://webmention.org/'] # find elements with correct rels and a href value all_links = BeautifulSoup(html, _html_parser).find_all(rel=poss_rels, href=True) for link in all_links: s = link.get('href', None) if s is not None: return s return None
[ "def", "findEndpoint", "(", "html", ")", ":", "poss_rels", "=", "[", "'webmention'", ",", "'http://webmention.org'", ",", "'http://webmention.org/'", ",", "'https://webmention.org'", ",", "'https://webmention.org/'", "]", "# find elements with correct rels and a href value", "all_links", "=", "BeautifulSoup", "(", "html", ",", "_html_parser", ")", ".", "find_all", "(", "rel", "=", "poss_rels", ",", "href", "=", "True", ")", "for", "link", "in", "all_links", ":", "s", "=", "link", ".", "get", "(", "'href'", ",", "None", ")", "if", "s", "is", "not", "None", ":", "return", "s", "return", "None" ]
Search the given html content for all <link /> elements and return any discovered WebMention URL. :param html: html content :rtype: WebMention URL
[ "Search", "the", "given", "html", "content", "for", "all", "<link", "/", ">", "elements", "and", "return", "any", "discovered", "WebMention", "URL", "." ]
train
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/webmention.py#L115-L131
bear/ronkyuu
ronkyuu/webmention.py
discoverEndpoint
def discoverEndpoint(url, test_urls=True, headers={}, timeout=None, request=None, debug=False): """Discover any WebMention endpoint for a given URL. :param link: URL to discover WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :param request: optional Requests request object to avoid another GET :rtype: tuple (status_code, URL, [debug]) """ if test_urls: URLValidator(message='invalid URL')(url) # status, webmention endpointURL = None debugOutput = [] try: if request is not None: targetRequest = request else: targetRequest = requests.get(url, verify=False, headers=headers, timeout=timeout) returnCode = targetRequest.status_code debugOutput.append('%s %s' % (returnCode, url)) if returnCode == requests.codes.ok: try: linkHeader = parse_link_header(targetRequest.headers['link']) endpointURL = linkHeader.get('webmention', '') or \ linkHeader.get('http://webmention.org', '') or \ linkHeader.get('http://webmention.org/', '') or \ linkHeader.get('https://webmention.org', '') or \ linkHeader.get('https://webmention.org/', '') # force searching in the HTML if not found if not endpointURL: raise AttributeError debugOutput.append('found in link headers') except (KeyError, AttributeError): endpointURL = findEndpoint(targetRequest.text) debugOutput.append('found in body') if endpointURL is not None: endpointURL = urljoin(url, endpointURL) except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') returnCode = 500 debugOutput.append('endpointURL: %s %s' % (returnCode, endpointURL)) if debug: return (returnCode, endpointURL, debugOutput) else: return (returnCode, endpointURL)
python
def discoverEndpoint(url, test_urls=True, headers={}, timeout=None, request=None, debug=False): """Discover any WebMention endpoint for a given URL. :param link: URL to discover WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :param request: optional Requests request object to avoid another GET :rtype: tuple (status_code, URL, [debug]) """ if test_urls: URLValidator(message='invalid URL')(url) # status, webmention endpointURL = None debugOutput = [] try: if request is not None: targetRequest = request else: targetRequest = requests.get(url, verify=False, headers=headers, timeout=timeout) returnCode = targetRequest.status_code debugOutput.append('%s %s' % (returnCode, url)) if returnCode == requests.codes.ok: try: linkHeader = parse_link_header(targetRequest.headers['link']) endpointURL = linkHeader.get('webmention', '') or \ linkHeader.get('http://webmention.org', '') or \ linkHeader.get('http://webmention.org/', '') or \ linkHeader.get('https://webmention.org', '') or \ linkHeader.get('https://webmention.org/', '') # force searching in the HTML if not found if not endpointURL: raise AttributeError debugOutput.append('found in link headers') except (KeyError, AttributeError): endpointURL = findEndpoint(targetRequest.text) debugOutput.append('found in body') if endpointURL is not None: endpointURL = urljoin(url, endpointURL) except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') returnCode = 500 debugOutput.append('endpointURL: %s %s' % (returnCode, endpointURL)) if debug: return (returnCode, endpointURL, debugOutput) else: return (returnCode, endpointURL)
[ "def", "discoverEndpoint", "(", "url", ",", "test_urls", "=", "True", ",", "headers", "=", "{", "}", ",", "timeout", "=", "None", ",", "request", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "test_urls", ":", "URLValidator", "(", "message", "=", "'invalid URL'", ")", "(", "url", ")", "# status, webmention", "endpointURL", "=", "None", "debugOutput", "=", "[", "]", "try", ":", "if", "request", "is", "not", "None", ":", "targetRequest", "=", "request", "else", ":", "targetRequest", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "False", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")", "returnCode", "=", "targetRequest", ".", "status_code", "debugOutput", ".", "append", "(", "'%s %s'", "%", "(", "returnCode", ",", "url", ")", ")", "if", "returnCode", "==", "requests", ".", "codes", ".", "ok", ":", "try", ":", "linkHeader", "=", "parse_link_header", "(", "targetRequest", ".", "headers", "[", "'link'", "]", ")", "endpointURL", "=", "linkHeader", ".", "get", "(", "'webmention'", ",", "''", ")", "or", "linkHeader", ".", "get", "(", "'http://webmention.org'", ",", "''", ")", "or", "linkHeader", ".", "get", "(", "'http://webmention.org/'", ",", "''", ")", "or", "linkHeader", ".", "get", "(", "'https://webmention.org'", ",", "''", ")", "or", "linkHeader", ".", "get", "(", "'https://webmention.org/'", ",", "''", ")", "# force searching in the HTML if not found", "if", "not", "endpointURL", ":", "raise", "AttributeError", "debugOutput", ".", "append", "(", "'found in link headers'", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "endpointURL", "=", "findEndpoint", "(", "targetRequest", ".", "text", ")", "debugOutput", ".", "append", "(", "'found in body'", ")", "if", "endpointURL", "is", "not", "None", ":", "endpointURL", "=", "urljoin", "(", "url", ",", "endpointURL", ")", "except", "(", "requests", ".", "exceptions", ".", "RequestException", ",", "requests", ".", "exceptions", ".", "ConnectionError", ",", "requests", ".", "exceptions", ".", "HTTPError", ",", "requests", ".", "exceptions", ".", "URLRequired", ",", "requests", ".", "exceptions", ".", "TooManyRedirects", ",", "requests", ".", "exceptions", ".", "Timeout", ")", ":", "debugOutput", ".", "append", "(", "'exception during GET request'", ")", "returnCode", "=", "500", "debugOutput", ".", "append", "(", "'endpointURL: %s %s'", "%", "(", "returnCode", ",", "endpointURL", ")", ")", "if", "debug", ":", "return", "(", "returnCode", ",", "endpointURL", ",", "debugOutput", ")", "else", ":", "return", "(", "returnCode", ",", "endpointURL", ")" ]
Discover any WebMention endpoint for a given URL. :param link: URL to discover WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :param request: optional Requests request object to avoid another GET :rtype: tuple (status_code, URL, [debug])
[ "Discover", "any", "WebMention", "endpoint", "for", "a", "given", "URL", "." ]
train
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/webmention.py#L134-L185
bear/ronkyuu
ronkyuu/webmention.py
sendWebmention
def sendWebmention(sourceURL, targetURL, webmention=None, test_urls=True, vouchDomain=None, headers={}, timeout=None, debug=False): """Send to the :targetURL: a WebMention for the :sourceURL: The WebMention will be discovered if not given in the :webmention: parameter. :param sourceURL: URL that is referencing :targetURL: :param targetURL: URL of mentioned post :param webmention: optional WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :rtype: HTTPrequest object if WebMention endpoint was valid """ if test_urls: v = URLValidator() v(sourceURL) v(targetURL) debugOutput = [] originalURL = targetURL try: targetRequest = requests.get(targetURL) if targetRequest.status_code == requests.codes.ok: if len(targetRequest.history) > 0: redirect = targetRequest.history[-1] if (redirect.status_code == 301 or redirect.status_code == 302) and 'Location' in redirect.headers: targetURL = urljoin(targetURL, redirect.headers['Location']) debugOutput.append('targetURL redirected: %s' % targetURL) if webmention is None: wStatus, wUrl = discoverEndpoint(targetURL, headers=headers, timeout=timeout, request=targetRequest) else: wStatus = 200 wUrl = webmention debugOutput.append('endpointURL: %s %s' % (wStatus, wUrl)) if wStatus == requests.codes.ok and wUrl is not None: if test_urls: v(wUrl) payload = {'source': sourceURL, 'target': originalURL} if vouchDomain is not None: payload['vouch'] = vouchDomain try: result = requests.post(wUrl, data=payload, headers=headers, timeout=timeout) debugOutput.append('POST %s -- %s' % (wUrl, result.status_code)) if result.status_code == 405 and len(result.history) > 0: redirect = result.history[-1] if redirect.status_code == 301 and 'Location' in redirect.headers: result = requests.post(redirect.headers['Location'], data=payload, headers=headers, timeout=timeout) debugOutput.append('redirected POST %s -- %s' % (redirect.headers['Location'], result.status_code)) except Exception as e: result = None except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') result = None return result
python
def sendWebmention(sourceURL, targetURL, webmention=None, test_urls=True, vouchDomain=None, headers={}, timeout=None, debug=False): """Send to the :targetURL: a WebMention for the :sourceURL: The WebMention will be discovered if not given in the :webmention: parameter. :param sourceURL: URL that is referencing :targetURL: :param targetURL: URL of mentioned post :param webmention: optional WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :rtype: HTTPrequest object if WebMention endpoint was valid """ if test_urls: v = URLValidator() v(sourceURL) v(targetURL) debugOutput = [] originalURL = targetURL try: targetRequest = requests.get(targetURL) if targetRequest.status_code == requests.codes.ok: if len(targetRequest.history) > 0: redirect = targetRequest.history[-1] if (redirect.status_code == 301 or redirect.status_code == 302) and 'Location' in redirect.headers: targetURL = urljoin(targetURL, redirect.headers['Location']) debugOutput.append('targetURL redirected: %s' % targetURL) if webmention is None: wStatus, wUrl = discoverEndpoint(targetURL, headers=headers, timeout=timeout, request=targetRequest) else: wStatus = 200 wUrl = webmention debugOutput.append('endpointURL: %s %s' % (wStatus, wUrl)) if wStatus == requests.codes.ok and wUrl is not None: if test_urls: v(wUrl) payload = {'source': sourceURL, 'target': originalURL} if vouchDomain is not None: payload['vouch'] = vouchDomain try: result = requests.post(wUrl, data=payload, headers=headers, timeout=timeout) debugOutput.append('POST %s -- %s' % (wUrl, result.status_code)) if result.status_code == 405 and len(result.history) > 0: redirect = result.history[-1] if redirect.status_code == 301 and 'Location' in redirect.headers: result = requests.post(redirect.headers['Location'], data=payload, headers=headers, timeout=timeout) debugOutput.append('redirected POST %s -- %s' % (redirect.headers['Location'], result.status_code)) except Exception as e: result = None except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') result = None return result
[ "def", "sendWebmention", "(", "sourceURL", ",", "targetURL", ",", "webmention", "=", "None", ",", "test_urls", "=", "True", ",", "vouchDomain", "=", "None", ",", "headers", "=", "{", "}", ",", "timeout", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "test_urls", ":", "v", "=", "URLValidator", "(", ")", "v", "(", "sourceURL", ")", "v", "(", "targetURL", ")", "debugOutput", "=", "[", "]", "originalURL", "=", "targetURL", "try", ":", "targetRequest", "=", "requests", ".", "get", "(", "targetURL", ")", "if", "targetRequest", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "if", "len", "(", "targetRequest", ".", "history", ")", ">", "0", ":", "redirect", "=", "targetRequest", ".", "history", "[", "-", "1", "]", "if", "(", "redirect", ".", "status_code", "==", "301", "or", "redirect", ".", "status_code", "==", "302", ")", "and", "'Location'", "in", "redirect", ".", "headers", ":", "targetURL", "=", "urljoin", "(", "targetURL", ",", "redirect", ".", "headers", "[", "'Location'", "]", ")", "debugOutput", ".", "append", "(", "'targetURL redirected: %s'", "%", "targetURL", ")", "if", "webmention", "is", "None", ":", "wStatus", ",", "wUrl", "=", "discoverEndpoint", "(", "targetURL", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ",", "request", "=", "targetRequest", ")", "else", ":", "wStatus", "=", "200", "wUrl", "=", "webmention", "debugOutput", ".", "append", "(", "'endpointURL: %s %s'", "%", "(", "wStatus", ",", "wUrl", ")", ")", "if", "wStatus", "==", "requests", ".", "codes", ".", "ok", "and", "wUrl", "is", "not", "None", ":", "if", "test_urls", ":", "v", "(", "wUrl", ")", "payload", "=", "{", "'source'", ":", "sourceURL", ",", "'target'", ":", "originalURL", "}", "if", "vouchDomain", "is", "not", "None", ":", "payload", "[", "'vouch'", "]", "=", "vouchDomain", "try", ":", "result", "=", "requests", ".", "post", "(", "wUrl", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")", "debugOutput", ".", "append", "(", "'POST %s -- %s'", "%", "(", "wUrl", ",", "result", ".", "status_code", ")", ")", "if", "result", ".", "status_code", "==", "405", "and", "len", "(", "result", ".", "history", ")", ">", "0", ":", "redirect", "=", "result", ".", "history", "[", "-", "1", "]", "if", "redirect", ".", "status_code", "==", "301", "and", "'Location'", "in", "redirect", ".", "headers", ":", "result", "=", "requests", ".", "post", "(", "redirect", ".", "headers", "[", "'Location'", "]", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")", "debugOutput", ".", "append", "(", "'redirected POST %s -- %s'", "%", "(", "redirect", ".", "headers", "[", "'Location'", "]", ",", "result", ".", "status_code", ")", ")", "except", "Exception", "as", "e", ":", "result", "=", "None", "except", "(", "requests", ".", "exceptions", ".", "RequestException", ",", "requests", ".", "exceptions", ".", "ConnectionError", ",", "requests", ".", "exceptions", ".", "HTTPError", ",", "requests", ".", "exceptions", ".", "URLRequired", ",", "requests", ".", "exceptions", ".", "TooManyRedirects", ",", "requests", ".", "exceptions", ".", "Timeout", ")", ":", "debugOutput", ".", "append", "(", "'exception during GET request'", ")", "result", "=", "None", "return", "result" ]
Send to the :targetURL: a WebMention for the :sourceURL: The WebMention will be discovered if not given in the :webmention: parameter. :param sourceURL: URL that is referencing :targetURL: :param targetURL: URL of mentioned post :param webmention: optional WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :rtype: HTTPrequest object if WebMention endpoint was valid
[ "Send", "to", "the", ":", "targetURL", ":", "a", "WebMention", "for", "the", ":", "sourceURL", ":" ]
train
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/webmention.py#L187-L249
bear/ronkyuu
ronkyuu/tools.py
parse_link_header
def parse_link_header(link): """takes the link header as a string and returns a dictionary with rel values as keys and urls as values :param link: link header as a string :rtype: dictionary {rel_name: rel_value} """ rel_dict = {} for rels in link.split(','): rel_break = quoted_split(rels, ';') try: rel_url = re.search('<(.+?)>', rel_break[0]).group(1) rel_names = quoted_split(rel_break[1], '=')[-1] if rel_names.startswith('"') and rel_names.endswith('"'): rel_names = rel_names[1:-1] for name in rel_names.split(): rel_dict[name] = rel_url except (AttributeError, IndexError): pass return rel_dict
python
def parse_link_header(link): """takes the link header as a string and returns a dictionary with rel values as keys and urls as values :param link: link header as a string :rtype: dictionary {rel_name: rel_value} """ rel_dict = {} for rels in link.split(','): rel_break = quoted_split(rels, ';') try: rel_url = re.search('<(.+?)>', rel_break[0]).group(1) rel_names = quoted_split(rel_break[1], '=')[-1] if rel_names.startswith('"') and rel_names.endswith('"'): rel_names = rel_names[1:-1] for name in rel_names.split(): rel_dict[name] = rel_url except (AttributeError, IndexError): pass return rel_dict
[ "def", "parse_link_header", "(", "link", ")", ":", "rel_dict", "=", "{", "}", "for", "rels", "in", "link", ".", "split", "(", "','", ")", ":", "rel_break", "=", "quoted_split", "(", "rels", ",", "';'", ")", "try", ":", "rel_url", "=", "re", ".", "search", "(", "'<(.+?)>'", ",", "rel_break", "[", "0", "]", ")", ".", "group", "(", "1", ")", "rel_names", "=", "quoted_split", "(", "rel_break", "[", "1", "]", ",", "'='", ")", "[", "-", "1", "]", "if", "rel_names", ".", "startswith", "(", "'\"'", ")", "and", "rel_names", ".", "endswith", "(", "'\"'", ")", ":", "rel_names", "=", "rel_names", "[", "1", ":", "-", "1", "]", "for", "name", "in", "rel_names", ".", "split", "(", ")", ":", "rel_dict", "[", "name", "]", "=", "rel_url", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "pass", "return", "rel_dict" ]
takes the link header as a string and returns a dictionary with rel values as keys and urls as values :param link: link header as a string :rtype: dictionary {rel_name: rel_value}
[ "takes", "the", "link", "header", "as", "a", "string", "and", "returns", "a", "dictionary", "with", "rel", "values", "as", "keys", "and", "urls", "as", "values", ":", "param", "link", ":", "link", "header", "as", "a", "string", ":", "rtype", ":", "dictionary", "{", "rel_name", ":", "rel_value", "}" ]
train
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/tools.py#L86-L104
bear/ronkyuu
ronkyuu/relme.py
findRelMe
def findRelMe(sourceURL): """Find all <a /> elements in the given html for a post. If any have an href attribute that is rel="me" then include it in the result. :param sourceURL: the URL for the post we are scanning :rtype: dictionary of RelMe references """ r = requests.get(sourceURL) result = {'status': r.status_code, 'headers': r.headers, 'history': r.history, 'content': r.text, 'relme': [], 'url': sourceURL } if r.status_code == requests.codes.ok: dom = BeautifulSoup(r.text, _html_parser) for link in dom.find_all('a', rel='me'): rel = link.get('rel') href = link.get('href') if rel is not None and href is not None: url = urlparse(href) if url is not None and url.scheme in ('http', 'https'): result['relme'].append(cleanURL(href)) return result
python
def findRelMe(sourceURL): """Find all <a /> elements in the given html for a post. If any have an href attribute that is rel="me" then include it in the result. :param sourceURL: the URL for the post we are scanning :rtype: dictionary of RelMe references """ r = requests.get(sourceURL) result = {'status': r.status_code, 'headers': r.headers, 'history': r.history, 'content': r.text, 'relme': [], 'url': sourceURL } if r.status_code == requests.codes.ok: dom = BeautifulSoup(r.text, _html_parser) for link in dom.find_all('a', rel='me'): rel = link.get('rel') href = link.get('href') if rel is not None and href is not None: url = urlparse(href) if url is not None and url.scheme in ('http', 'https'): result['relme'].append(cleanURL(href)) return result
[ "def", "findRelMe", "(", "sourceURL", ")", ":", "r", "=", "requests", ".", "get", "(", "sourceURL", ")", "result", "=", "{", "'status'", ":", "r", ".", "status_code", ",", "'headers'", ":", "r", ".", "headers", ",", "'history'", ":", "r", ".", "history", ",", "'content'", ":", "r", ".", "text", ",", "'relme'", ":", "[", "]", ",", "'url'", ":", "sourceURL", "}", "if", "r", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "dom", "=", "BeautifulSoup", "(", "r", ".", "text", ",", "_html_parser", ")", "for", "link", "in", "dom", ".", "find_all", "(", "'a'", ",", "rel", "=", "'me'", ")", ":", "rel", "=", "link", ".", "get", "(", "'rel'", ")", "href", "=", "link", ".", "get", "(", "'href'", ")", "if", "rel", "is", "not", "None", "and", "href", "is", "not", "None", ":", "url", "=", "urlparse", "(", "href", ")", "if", "url", "is", "not", "None", "and", "url", ".", "scheme", "in", "(", "'http'", ",", "'https'", ")", ":", "result", "[", "'relme'", "]", ".", "append", "(", "cleanURL", "(", "href", ")", ")", "return", "result" ]
Find all <a /> elements in the given html for a post. If any have an href attribute that is rel="me" then include it in the result. :param sourceURL: the URL for the post we are scanning :rtype: dictionary of RelMe references
[ "Find", "all", "<a", "/", ">", "elements", "in", "the", "given", "html", "for", "a", "post", "." ]
train
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/relme.py#L63-L89
bear/ronkyuu
ronkyuu/relme.py
confirmRelMe
def confirmRelMe(profileURL, resourceURL, profileRelMes=None, resourceRelMes=None): """Determine if a given :resourceURL: is authoritative for the :profileURL: TODO add https/http filtering for those who wish to limit/restrict urls to match fully TODO add code to ensure that each item in the redirect chain is authoritative :param profileURL: URL of the user :param resourceURL: URL of the resource to validate :param profileRelMes: optional list of rel="me" links within the profile URL :param resourceRelMes: optional list of rel="me" links found within resource URL :rtype: True if confirmed """ result = False profile = normalizeURL(profileURL) if profileRelMes is None: profileRelMe = findRelMe(profileURL) profileRelMes = profileRelMe['relme'] if resourceRelMes is None: resourceRelMe = findRelMe(resourceURL) resourceRelMes = resourceRelMe['relme'] for url in resourceRelMes: if profile in (url, normalizeURL(url)): result = True break return result
python
def confirmRelMe(profileURL, resourceURL, profileRelMes=None, resourceRelMes=None): """Determine if a given :resourceURL: is authoritative for the :profileURL: TODO add https/http filtering for those who wish to limit/restrict urls to match fully TODO add code to ensure that each item in the redirect chain is authoritative :param profileURL: URL of the user :param resourceURL: URL of the resource to validate :param profileRelMes: optional list of rel="me" links within the profile URL :param resourceRelMes: optional list of rel="me" links found within resource URL :rtype: True if confirmed """ result = False profile = normalizeURL(profileURL) if profileRelMes is None: profileRelMe = findRelMe(profileURL) profileRelMes = profileRelMe['relme'] if resourceRelMes is None: resourceRelMe = findRelMe(resourceURL) resourceRelMes = resourceRelMe['relme'] for url in resourceRelMes: if profile in (url, normalizeURL(url)): result = True break return result
[ "def", "confirmRelMe", "(", "profileURL", ",", "resourceURL", ",", "profileRelMes", "=", "None", ",", "resourceRelMes", "=", "None", ")", ":", "result", "=", "False", "profile", "=", "normalizeURL", "(", "profileURL", ")", "if", "profileRelMes", "is", "None", ":", "profileRelMe", "=", "findRelMe", "(", "profileURL", ")", "profileRelMes", "=", "profileRelMe", "[", "'relme'", "]", "if", "resourceRelMes", "is", "None", ":", "resourceRelMe", "=", "findRelMe", "(", "resourceURL", ")", "resourceRelMes", "=", "resourceRelMe", "[", "'relme'", "]", "for", "url", "in", "resourceRelMes", ":", "if", "profile", "in", "(", "url", ",", "normalizeURL", "(", "url", ")", ")", ":", "result", "=", "True", "break", "return", "result" ]
Determine if a given :resourceURL: is authoritative for the :profileURL: TODO add https/http filtering for those who wish to limit/restrict urls to match fully TODO add code to ensure that each item in the redirect chain is authoritative :param profileURL: URL of the user :param resourceURL: URL of the resource to validate :param profileRelMes: optional list of rel="me" links within the profile URL :param resourceRelMes: optional list of rel="me" links found within resource URL :rtype: True if confirmed
[ "Determine", "if", "a", "given", ":", "resourceURL", ":", "is", "authoritative", "for", "the", ":", "profileURL", ":" ]
train
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/relme.py#L92-L119
alexras/bread
bread/utils.py
indent_text
def indent_text(string, indent_level=2): """Indent every line of text in a newline-delimited string""" indented_lines = [] indent_spaces = ' ' * indent_level for line in string.split('\n'): indented_lines.append(indent_spaces + line) return '\n'.join(indented_lines)
python
def indent_text(string, indent_level=2): """Indent every line of text in a newline-delimited string""" indented_lines = [] indent_spaces = ' ' * indent_level for line in string.split('\n'): indented_lines.append(indent_spaces + line) return '\n'.join(indented_lines)
[ "def", "indent_text", "(", "string", ",", "indent_level", "=", "2", ")", ":", "indented_lines", "=", "[", "]", "indent_spaces", "=", "' '", "*", "indent_level", "for", "line", "in", "string", ".", "split", "(", "'\\n'", ")", ":", "indented_lines", ".", "append", "(", "indent_spaces", "+", "line", ")", "return", "'\\n'", ".", "join", "(", "indented_lines", ")" ]
Indent every line of text in a newline-delimited string
[ "Indent", "every", "line", "of", "text", "in", "a", "newline", "-", "delimited", "string" ]
train
https://github.com/alexras/bread/blob/2e131380878c07500167fc12685e7bff1df258a4/bread/utils.py#L1-L10
takluyver/requests_download
requests_download.py
download
def download(url, target, headers=None, trackers=()): """Download a file using requests. This is like urllib.request.urlretrieve, but: - requests validates SSL certificates by default - you can pass tracker objects to e.g. display a progress bar or calculate a file hash. """ if headers is None: headers = {} headers.setdefault('user-agent', 'requests_download/'+__version__) r = requests.get(url, headers=headers, stream=True) r.raise_for_status() for t in trackers: t.on_start(r) with open(target, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) for t in trackers: t.on_chunk(chunk) for t in trackers: t.on_finish()
python
def download(url, target, headers=None, trackers=()): """Download a file using requests. This is like urllib.request.urlretrieve, but: - requests validates SSL certificates by default - you can pass tracker objects to e.g. display a progress bar or calculate a file hash. """ if headers is None: headers = {} headers.setdefault('user-agent', 'requests_download/'+__version__) r = requests.get(url, headers=headers, stream=True) r.raise_for_status() for t in trackers: t.on_start(r) with open(target, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) for t in trackers: t.on_chunk(chunk) for t in trackers: t.on_finish()
[ "def", "download", "(", "url", ",", "target", ",", "headers", "=", "None", ",", "trackers", "=", "(", ")", ")", ":", "if", "headers", "is", "None", ":", "headers", "=", "{", "}", "headers", ".", "setdefault", "(", "'user-agent'", ",", "'requests_download/'", "+", "__version__", ")", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "stream", "=", "True", ")", "r", ".", "raise_for_status", "(", ")", "for", "t", "in", "trackers", ":", "t", ".", "on_start", "(", "r", ")", "with", "open", "(", "target", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "r", ".", "iter_content", "(", "chunk_size", "=", "8192", ")", ":", "if", "chunk", ":", "f", ".", "write", "(", "chunk", ")", "for", "t", "in", "trackers", ":", "t", ".", "on_chunk", "(", "chunk", ")", "for", "t", "in", "trackers", ":", "t", ".", "on_finish", "(", ")" ]
Download a file using requests. This is like urllib.request.urlretrieve, but: - requests validates SSL certificates by default - you can pass tracker objects to e.g. display a progress bar or calculate a file hash.
[ "Download", "a", "file", "using", "requests", "." ]
train
https://github.com/takluyver/requests_download/blob/bc0412bb930051af21ab1959402abab883c78e76/requests_download.py#L65-L91
alexras/bread
bread/lifecycle.py
write
def write(parsed_obj, spec=None, filename=None): """Writes an object created by `parse` to either a file or a bytearray. If the object doesn't end on a byte boundary, zeroes are appended to it until it does. """ if not isinstance(parsed_obj, BreadStruct): raise ValueError( 'Object to write must be a structure created ' 'by bread.parse') if filename is not None: with open(filename, 'wb') as fp: parsed_obj._data_bits[:parsed_obj._length].tofile(fp) else: return bytearray(parsed_obj._data_bits[:parsed_obj._length].tobytes())
python
def write(parsed_obj, spec=None, filename=None): """Writes an object created by `parse` to either a file or a bytearray. If the object doesn't end on a byte boundary, zeroes are appended to it until it does. """ if not isinstance(parsed_obj, BreadStruct): raise ValueError( 'Object to write must be a structure created ' 'by bread.parse') if filename is not None: with open(filename, 'wb') as fp: parsed_obj._data_bits[:parsed_obj._length].tofile(fp) else: return bytearray(parsed_obj._data_bits[:parsed_obj._length].tobytes())
[ "def", "write", "(", "parsed_obj", ",", "spec", "=", "None", ",", "filename", "=", "None", ")", ":", "if", "not", "isinstance", "(", "parsed_obj", ",", "BreadStruct", ")", ":", "raise", "ValueError", "(", "'Object to write must be a structure created '", "'by bread.parse'", ")", "if", "filename", "is", "not", "None", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "fp", ":", "parsed_obj", ".", "_data_bits", "[", ":", "parsed_obj", ".", "_length", "]", ".", "tofile", "(", "fp", ")", "else", ":", "return", "bytearray", "(", "parsed_obj", ".", "_data_bits", "[", ":", "parsed_obj", ".", "_length", "]", ".", "tobytes", "(", ")", ")" ]
Writes an object created by `parse` to either a file or a bytearray. If the object doesn't end on a byte boundary, zeroes are appended to it until it does.
[ "Writes", "an", "object", "created", "by", "parse", "to", "either", "a", "file", "or", "a", "bytearray", "." ]
train
https://github.com/alexras/bread/blob/2e131380878c07500167fc12685e7bff1df258a4/bread/lifecycle.py#L41-L56
audreyr/alotofeffort
alotofeffort/send.py
deploy_file
def deploy_file(file_path, bucket): """ Uploads a file to an S3 bucket, as a public file. """ # Paths look like: # index.html # css/bootstrap.min.css logger.info("Deploying {0}".format(file_path)) # Upload the actual file to file_path k = Key(bucket) k.key = file_path try: k.set_contents_from_filename(file_path) k.set_acl('public-read') except socket.error: logger.warning("Caught socket.error while trying to upload {0}".format( file_path)) msg = "Please file an issue with alotofeffort if you see this," logger.warning(msg) logger.warning("providing as much info as you can.")
python
def deploy_file(file_path, bucket): """ Uploads a file to an S3 bucket, as a public file. """ # Paths look like: # index.html # css/bootstrap.min.css logger.info("Deploying {0}".format(file_path)) # Upload the actual file to file_path k = Key(bucket) k.key = file_path try: k.set_contents_from_filename(file_path) k.set_acl('public-read') except socket.error: logger.warning("Caught socket.error while trying to upload {0}".format( file_path)) msg = "Please file an issue with alotofeffort if you see this," logger.warning(msg) logger.warning("providing as much info as you can.")
[ "def", "deploy_file", "(", "file_path", ",", "bucket", ")", ":", "# Paths look like:", "# index.html", "# css/bootstrap.min.css", "logger", ".", "info", "(", "\"Deploying {0}\"", ".", "format", "(", "file_path", ")", ")", "# Upload the actual file to file_path", "k", "=", "Key", "(", "bucket", ")", "k", ".", "key", "=", "file_path", "try", ":", "k", ".", "set_contents_from_filename", "(", "file_path", ")", "k", ".", "set_acl", "(", "'public-read'", ")", "except", "socket", ".", "error", ":", "logger", ".", "warning", "(", "\"Caught socket.error while trying to upload {0}\"", ".", "format", "(", "file_path", ")", ")", "msg", "=", "\"Please file an issue with alotofeffort if you see this,\"", "logger", ".", "warning", "(", "msg", ")", "logger", ".", "warning", "(", "\"providing as much info as you can.\"", ")" ]
Uploads a file to an S3 bucket, as a public file.
[ "Uploads", "a", "file", "to", "an", "S3", "bucket", "as", "a", "public", "file", "." ]
train
https://github.com/audreyr/alotofeffort/blob/06deca82a70fa9896496fd44c8c6f24707396c50/alotofeffort/send.py#L15-L35
audreyr/alotofeffort
alotofeffort/send.py
deploy
def deploy(www_dir, bucket_name): """ Deploy to the configured S3 bucket. """ # Set up the connection to an S3 bucket. conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) # Deploy each changed file in www_dir os.chdir(www_dir) for root, dirs, files in os.walk('.'): for f in files: # Use full relative path. Normalize to remove dot. file_path = os.path.normpath(os.path.join(root, f)) if has_changed_since_last_deploy(file_path, bucket): deploy_file(file_path, bucket) else: logger.info("Skipping {0}".format(file_path)) # Make the whole bucket public bucket.set_acl('public-read') # Configure it to be a website bucket.configure_website('index.html', 'error.html') # Print the endpoint, so you know the URL msg = "Your website is now live at {0}".format( bucket.get_website_endpoint()) logger.info(msg) logger.info("If you haven't done so yet, point your domain name there!")
python
def deploy(www_dir, bucket_name): """ Deploy to the configured S3 bucket. """ # Set up the connection to an S3 bucket. conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) # Deploy each changed file in www_dir os.chdir(www_dir) for root, dirs, files in os.walk('.'): for f in files: # Use full relative path. Normalize to remove dot. file_path = os.path.normpath(os.path.join(root, f)) if has_changed_since_last_deploy(file_path, bucket): deploy_file(file_path, bucket) else: logger.info("Skipping {0}".format(file_path)) # Make the whole bucket public bucket.set_acl('public-read') # Configure it to be a website bucket.configure_website('index.html', 'error.html') # Print the endpoint, so you know the URL msg = "Your website is now live at {0}".format( bucket.get_website_endpoint()) logger.info(msg) logger.info("If you haven't done so yet, point your domain name there!")
[ "def", "deploy", "(", "www_dir", ",", "bucket_name", ")", ":", "# Set up the connection to an S3 bucket.", "conn", "=", "boto", ".", "connect_s3", "(", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "# Deploy each changed file in www_dir", "os", ".", "chdir", "(", "www_dir", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "'.'", ")", ":", "for", "f", "in", "files", ":", "# Use full relative path. Normalize to remove dot.", "file_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ")", "if", "has_changed_since_last_deploy", "(", "file_path", ",", "bucket", ")", ":", "deploy_file", "(", "file_path", ",", "bucket", ")", "else", ":", "logger", ".", "info", "(", "\"Skipping {0}\"", ".", "format", "(", "file_path", ")", ")", "# Make the whole bucket public", "bucket", ".", "set_acl", "(", "'public-read'", ")", "# Configure it to be a website", "bucket", ".", "configure_website", "(", "'index.html'", ",", "'error.html'", ")", "# Print the endpoint, so you know the URL", "msg", "=", "\"Your website is now live at {0}\"", ".", "format", "(", "bucket", ".", "get_website_endpoint", "(", ")", ")", "logger", ".", "info", "(", "msg", ")", "logger", ".", "info", "(", "\"If you haven't done so yet, point your domain name there!\"", ")" ]
Deploy to the configured S3 bucket.
[ "Deploy", "to", "the", "configured", "S3", "bucket", "." ]
train
https://github.com/audreyr/alotofeffort/blob/06deca82a70fa9896496fd44c8c6f24707396c50/alotofeffort/send.py#L38-L67
audreyr/alotofeffort
alotofeffort/send.py
has_changed_since_last_deploy
def has_changed_since_last_deploy(file_path, bucket): """ Checks if a file has changed since the last time it was deployed. :param file_path: Path to file which should be checked. Should be relative from root of bucket. :param bucket_name: Name of S3 bucket to check against. :returns: True if the file has changed, else False. """ msg = "Checking if {0} has changed since last deploy.".format(file_path) logger.debug(msg) with open(file_path) as f: data = f.read() file_md5 = hashlib.md5(data.encode('utf-8')).hexdigest() logger.debug("file_md5 is {0}".format(file_md5)) key = bucket.get_key(file_path) # HACK: Boto's md5 property does not work when the file hasn't been # downloaded. The etag works but will break for multi-part uploaded files. # http://stackoverflow.com/questions/16872679/how-to-programmatically- # get-the-md5-checksum-of-amazon-s3-file-using-boto/17607096#17607096 # Also the double quotes around it must be stripped. Sketchy...boto's fault if key: key_md5 = key.etag.replace('"', '').strip() logger.debug("key_md5 is {0}".format(key_md5)) else: logger.debug("File does not exist in bucket") return True if file_md5 == key_md5: logger.debug("File has not changed.") return False logger.debug("File has changed.") return True
python
def has_changed_since_last_deploy(file_path, bucket): """ Checks if a file has changed since the last time it was deployed. :param file_path: Path to file which should be checked. Should be relative from root of bucket. :param bucket_name: Name of S3 bucket to check against. :returns: True if the file has changed, else False. """ msg = "Checking if {0} has changed since last deploy.".format(file_path) logger.debug(msg) with open(file_path) as f: data = f.read() file_md5 = hashlib.md5(data.encode('utf-8')).hexdigest() logger.debug("file_md5 is {0}".format(file_md5)) key = bucket.get_key(file_path) # HACK: Boto's md5 property does not work when the file hasn't been # downloaded. The etag works but will break for multi-part uploaded files. # http://stackoverflow.com/questions/16872679/how-to-programmatically- # get-the-md5-checksum-of-amazon-s3-file-using-boto/17607096#17607096 # Also the double quotes around it must be stripped. Sketchy...boto's fault if key: key_md5 = key.etag.replace('"', '').strip() logger.debug("key_md5 is {0}".format(key_md5)) else: logger.debug("File does not exist in bucket") return True if file_md5 == key_md5: logger.debug("File has not changed.") return False logger.debug("File has changed.") return True
[ "def", "has_changed_since_last_deploy", "(", "file_path", ",", "bucket", ")", ":", "msg", "=", "\"Checking if {0} has changed since last deploy.\"", ".", "format", "(", "file_path", ")", "logger", ".", "debug", "(", "msg", ")", "with", "open", "(", "file_path", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "file_md5", "=", "hashlib", ".", "md5", "(", "data", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "logger", ".", "debug", "(", "\"file_md5 is {0}\"", ".", "format", "(", "file_md5", ")", ")", "key", "=", "bucket", ".", "get_key", "(", "file_path", ")", "# HACK: Boto's md5 property does not work when the file hasn't been", "# downloaded. The etag works but will break for multi-part uploaded files.", "# http://stackoverflow.com/questions/16872679/how-to-programmatically-", "# get-the-md5-checksum-of-amazon-s3-file-using-boto/17607096#17607096", "# Also the double quotes around it must be stripped. Sketchy...boto's fault", "if", "key", ":", "key_md5", "=", "key", ".", "etag", ".", "replace", "(", "'\"'", ",", "''", ")", ".", "strip", "(", ")", "logger", ".", "debug", "(", "\"key_md5 is {0}\"", ".", "format", "(", "key_md5", ")", ")", "else", ":", "logger", ".", "debug", "(", "\"File does not exist in bucket\"", ")", "return", "True", "if", "file_md5", "==", "key_md5", ":", "logger", ".", "debug", "(", "\"File has not changed.\"", ")", "return", "False", "logger", ".", "debug", "(", "\"File has changed.\"", ")", "return", "True" ]
Checks if a file has changed since the last time it was deployed. :param file_path: Path to file which should be checked. Should be relative from root of bucket. :param bucket_name: Name of S3 bucket to check against. :returns: True if the file has changed, else False.
[ "Checks", "if", "a", "file", "has", "changed", "since", "the", "last", "time", "it", "was", "deployed", "." ]
train
https://github.com/audreyr/alotofeffort/blob/06deca82a70fa9896496fd44c8c6f24707396c50/alotofeffort/send.py#L70-L105
audreyr/alotofeffort
alotofeffort/main.py
main
def main(): """ Entry point for the package, as defined in setup.py. """ # Log info and above to console logging.basicConfig( format='%(levelname)s: %(message)s', level=logging.INFO) # Get command line input/output arguments msg = 'Instantly deploy static HTML sites to S3 at the command line.' parser = argparse.ArgumentParser(description=msg) parser.add_argument( 'www_dir', help='Directory containing the HTML files for your website.' ) parser.add_argument( 'bucket_name', help='Name of S3 bucket to deploy to, e.g. mybucket.' ) args = parser.parse_args() # Deploy the site to S3! deploy(args.www_dir, args.bucket_name)
python
def main(): """ Entry point for the package, as defined in setup.py. """ # Log info and above to console logging.basicConfig( format='%(levelname)s: %(message)s', level=logging.INFO) # Get command line input/output arguments msg = 'Instantly deploy static HTML sites to S3 at the command line.' parser = argparse.ArgumentParser(description=msg) parser.add_argument( 'www_dir', help='Directory containing the HTML files for your website.' ) parser.add_argument( 'bucket_name', help='Name of S3 bucket to deploy to, e.g. mybucket.' ) args = parser.parse_args() # Deploy the site to S3! deploy(args.www_dir, args.bucket_name)
[ "def", "main", "(", ")", ":", "# Log info and above to console", "logging", ".", "basicConfig", "(", "format", "=", "'%(levelname)s: %(message)s'", ",", "level", "=", "logging", ".", "INFO", ")", "# Get command line input/output arguments", "msg", "=", "'Instantly deploy static HTML sites to S3 at the command line.'", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "msg", ")", "parser", ".", "add_argument", "(", "'www_dir'", ",", "help", "=", "'Directory containing the HTML files for your website.'", ")", "parser", ".", "add_argument", "(", "'bucket_name'", ",", "help", "=", "'Name of S3 bucket to deploy to, e.g. mybucket.'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Deploy the site to S3!", "deploy", "(", "args", ".", "www_dir", ",", "args", ".", "bucket_name", ")" ]
Entry point for the package, as defined in setup.py.
[ "Entry", "point", "for", "the", "package", "as", "defined", "in", "setup", ".", "py", "." ]
train
https://github.com/audreyr/alotofeffort/blob/06deca82a70fa9896496fd44c8c6f24707396c50/alotofeffort/main.py#L9-L30
rainmanwy/robotframework-SikuliLibrary
src/python/sikuli.py
SikuliLibrary.start_sikuli_process
def start_sikuli_process(self, port=None): """ This keyword is used to start sikuli java process. If library is inited with mode "OLD", sikuli java process is started automatically. If library is inited with mode "NEW", this keyword should be used. :param port: port of sikuli java process, if value is None or 0, a random free port will be used :return: None """ if port is None or int(port) == 0: port = self._get_free_tcp_port() self.port = port start_retries = 0 started = False while start_retries < 5: try: self._start_sikuli_java_process() except RuntimeError as err: print('error........%s' % err) if self.process: self.process.terminate_process() self.port = self._get_free_tcp_port() start_retries += 1 continue started = True break if not started: raise RuntimeError('Start sikuli java process failed!') self.remote = self._connect_remote_library()
python
def start_sikuli_process(self, port=None): """ This keyword is used to start sikuli java process. If library is inited with mode "OLD", sikuli java process is started automatically. If library is inited with mode "NEW", this keyword should be used. :param port: port of sikuli java process, if value is None or 0, a random free port will be used :return: None """ if port is None or int(port) == 0: port = self._get_free_tcp_port() self.port = port start_retries = 0 started = False while start_retries < 5: try: self._start_sikuli_java_process() except RuntimeError as err: print('error........%s' % err) if self.process: self.process.terminate_process() self.port = self._get_free_tcp_port() start_retries += 1 continue started = True break if not started: raise RuntimeError('Start sikuli java process failed!') self.remote = self._connect_remote_library()
[ "def", "start_sikuli_process", "(", "self", ",", "port", "=", "None", ")", ":", "if", "port", "is", "None", "or", "int", "(", "port", ")", "==", "0", ":", "port", "=", "self", ".", "_get_free_tcp_port", "(", ")", "self", ".", "port", "=", "port", "start_retries", "=", "0", "started", "=", "False", "while", "start_retries", "<", "5", ":", "try", ":", "self", ".", "_start_sikuli_java_process", "(", ")", "except", "RuntimeError", "as", "err", ":", "print", "(", "'error........%s'", "%", "err", ")", "if", "self", ".", "process", ":", "self", ".", "process", ".", "terminate_process", "(", ")", "self", ".", "port", "=", "self", ".", "_get_free_tcp_port", "(", ")", "start_retries", "+=", "1", "continue", "started", "=", "True", "break", "if", "not", "started", ":", "raise", "RuntimeError", "(", "'Start sikuli java process failed!'", ")", "self", ".", "remote", "=", "self", ".", "_connect_remote_library", "(", ")" ]
This keyword is used to start sikuli java process. If library is inited with mode "OLD", sikuli java process is started automatically. If library is inited with mode "NEW", this keyword should be used. :param port: port of sikuli java process, if value is None or 0, a random free port will be used :return: None
[ "This", "keyword", "is", "used", "to", "start", "sikuli", "java", "process", ".", "If", "library", "is", "inited", "with", "mode", "OLD", "sikuli", "java", "process", "is", "started", "automatically", ".", "If", "library", "is", "inited", "with", "mode", "NEW", "this", "keyword", "should", "be", "used", "." ]
train
https://github.com/rainmanwy/robotframework-SikuliLibrary/blob/992874dd96b139246a62fb07ec763e0a4caffad8/src/python/sikuli.py#L65-L93
JamesRitchie/django-rest-framework-expiring-tokens
rest_framework_expiring_authtoken/views.py
ObtainExpiringAuthToken.post
def post(self, request): """Respond to POSTed username/password with token.""" serializer = AuthTokenSerializer(data=request.data) if serializer.is_valid(): token, _ = ExpiringToken.objects.get_or_create( user=serializer.validated_data['user'] ) if token.expired(): # If the token is expired, generate a new one. token.delete() token = ExpiringToken.objects.create( user=serializer.validated_data['user'] ) data = {'token': token.key} return Response(data) return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
python
def post(self, request): """Respond to POSTed username/password with token.""" serializer = AuthTokenSerializer(data=request.data) if serializer.is_valid(): token, _ = ExpiringToken.objects.get_or_create( user=serializer.validated_data['user'] ) if token.expired(): # If the token is expired, generate a new one. token.delete() token = ExpiringToken.objects.create( user=serializer.validated_data['user'] ) data = {'token': token.key} return Response(data) return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
[ "def", "post", "(", "self", ",", "request", ")", ":", "serializer", "=", "AuthTokenSerializer", "(", "data", "=", "request", ".", "data", ")", "if", "serializer", ".", "is_valid", "(", ")", ":", "token", ",", "_", "=", "ExpiringToken", ".", "objects", ".", "get_or_create", "(", "user", "=", "serializer", ".", "validated_data", "[", "'user'", "]", ")", "if", "token", ".", "expired", "(", ")", ":", "# If the token is expired, generate a new one.", "token", ".", "delete", "(", ")", "token", "=", "ExpiringToken", ".", "objects", ".", "create", "(", "user", "=", "serializer", ".", "validated_data", "[", "'user'", "]", ")", "data", "=", "{", "'token'", ":", "token", ".", "key", "}", "return", "Response", "(", "data", ")", "return", "Response", "(", "serializer", ".", "errors", ",", "status", "=", "HTTP_400_BAD_REQUEST", ")" ]
Respond to POSTed username/password with token.
[ "Respond", "to", "POSTed", "username", "/", "password", "with", "token", "." ]
train
https://github.com/JamesRitchie/django-rest-framework-expiring-tokens/blob/e62f1f92a621575174172e970da624d367ac0cf6/rest_framework_expiring_authtoken/views.py#L20-L39
JamesRitchie/django-rest-framework-expiring-tokens
rest_framework_expiring_authtoken/settings.py
TokenSettings.EXPIRING_TOKEN_LIFESPAN
def EXPIRING_TOKEN_LIFESPAN(self): """ Return the allowed lifespan of a token as a TimeDelta object. Defaults to 30 days. """ try: val = settings.EXPIRING_TOKEN_LIFESPAN except AttributeError: val = timedelta(days=30) return val
python
def EXPIRING_TOKEN_LIFESPAN(self): """ Return the allowed lifespan of a token as a TimeDelta object. Defaults to 30 days. """ try: val = settings.EXPIRING_TOKEN_LIFESPAN except AttributeError: val = timedelta(days=30) return val
[ "def", "EXPIRING_TOKEN_LIFESPAN", "(", "self", ")", ":", "try", ":", "val", "=", "settings", ".", "EXPIRING_TOKEN_LIFESPAN", "except", "AttributeError", ":", "val", "=", "timedelta", "(", "days", "=", "30", ")", "return", "val" ]
Return the allowed lifespan of a token as a TimeDelta object. Defaults to 30 days.
[ "Return", "the", "allowed", "lifespan", "of", "a", "token", "as", "a", "TimeDelta", "object", "." ]
train
https://github.com/JamesRitchie/django-rest-framework-expiring-tokens/blob/e62f1f92a621575174172e970da624d367ac0cf6/rest_framework_expiring_authtoken/settings.py#L16-L27
JamesRitchie/django-rest-framework-expiring-tokens
rest_framework_expiring_authtoken/models.py
ExpiringToken.expired
def expired(self): """Return boolean indicating token expiration.""" now = timezone.now() if self.created < now - token_settings.EXPIRING_TOKEN_LIFESPAN: return True return False
python
def expired(self): """Return boolean indicating token expiration.""" now = timezone.now() if self.created < now - token_settings.EXPIRING_TOKEN_LIFESPAN: return True return False
[ "def", "expired", "(", "self", ")", ":", "now", "=", "timezone", ".", "now", "(", ")", "if", "self", ".", "created", "<", "now", "-", "token_settings", ".", "EXPIRING_TOKEN_LIFESPAN", ":", "return", "True", "return", "False" ]
Return boolean indicating token expiration.
[ "Return", "boolean", "indicating", "token", "expiration", "." ]
train
https://github.com/JamesRitchie/django-rest-framework-expiring-tokens/blob/e62f1f92a621575174172e970da624d367ac0cf6/rest_framework_expiring_authtoken/models.py#L21-L26
commonsense/metanl
metanl/extprocess.py
unicode_is_punctuation
def unicode_is_punctuation(text): """ Test if a token is made entirely of Unicode characters of the following classes: - P: punctuation - S: symbols - Z: separators - M: combining marks - C: control characters >>> unicode_is_punctuation('word') False >>> unicode_is_punctuation('。') True >>> unicode_is_punctuation('-') True >>> unicode_is_punctuation('-3') False >>> unicode_is_punctuation('あ') False """ for char in str_func(text): category = unicodedata.category(char)[0] if category not in 'PSZMC': return False return True
python
def unicode_is_punctuation(text): """ Test if a token is made entirely of Unicode characters of the following classes: - P: punctuation - S: symbols - Z: separators - M: combining marks - C: control characters >>> unicode_is_punctuation('word') False >>> unicode_is_punctuation('。') True >>> unicode_is_punctuation('-') True >>> unicode_is_punctuation('-3') False >>> unicode_is_punctuation('あ') False """ for char in str_func(text): category = unicodedata.category(char)[0] if category not in 'PSZMC': return False return True
[ "def", "unicode_is_punctuation", "(", "text", ")", ":", "for", "char", "in", "str_func", "(", "text", ")", ":", "category", "=", "unicodedata", ".", "category", "(", "char", ")", "[", "0", "]", "if", "category", "not", "in", "'PSZMC'", ":", "return", "False", "return", "True" ]
Test if a token is made entirely of Unicode characters of the following classes: - P: punctuation - S: symbols - Z: separators - M: combining marks - C: control characters >>> unicode_is_punctuation('word') False >>> unicode_is_punctuation('。') True >>> unicode_is_punctuation('-') True >>> unicode_is_punctuation('-3') False >>> unicode_is_punctuation('あ') False
[ "Test", "if", "a", "token", "is", "made", "entirely", "of", "Unicode", "characters", "of", "the", "following", "classes", ":" ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L246-L272
commonsense/metanl
metanl/extprocess.py
ProcessWrapper.process
def process(self): """ Store the actual process in _process. If it doesn't exist yet, create it. """ if hasattr(self, '_process'): return self._process else: self._process = self._get_process() return self._process
python
def process(self): """ Store the actual process in _process. If it doesn't exist yet, create it. """ if hasattr(self, '_process'): return self._process else: self._process = self._get_process() return self._process
[ "def", "process", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_process'", ")", ":", "return", "self", ".", "_process", "else", ":", "self", ".", "_process", "=", "self", ".", "_get_process", "(", ")", "return", "self", ".", "_process" ]
Store the actual process in _process. If it doesn't exist yet, create it.
[ "Store", "the", "actual", "process", "in", "_process", ".", "If", "it", "doesn", "t", "exist", "yet", "create", "it", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L52-L61
commonsense/metanl
metanl/extprocess.py
ProcessWrapper._get_process
def _get_process(self): """ Create the process by running the specified command. """ command = self._get_command() return subprocess.Popen(command, bufsize=-1, close_fds=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
python
def _get_process(self): """ Create the process by running the specified command. """ command = self._get_command() return subprocess.Popen(command, bufsize=-1, close_fds=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
[ "def", "_get_process", "(", "self", ")", ":", "command", "=", "self", ".", "_get_command", "(", ")", "return", "subprocess", ".", "Popen", "(", "command", ",", "bufsize", "=", "-", "1", ",", "close_fds", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ")" ]
Create the process by running the specified command.
[ "Create", "the", "process", "by", "running", "the", "specified", "command", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L70-L77
commonsense/metanl
metanl/extprocess.py
ProcessWrapper.tokenize_list
def tokenize_list(self, text): """ Split a text into separate words. """ return [self.get_record_token(record) for record in self.analyze(text)]
python
def tokenize_list(self, text): """ Split a text into separate words. """ return [self.get_record_token(record) for record in self.analyze(text)]
[ "def", "tokenize_list", "(", "self", ",", "text", ")", ":", "return", "[", "self", ".", "get_record_token", "(", "record", ")", "for", "record", "in", "self", ".", "analyze", "(", "text", ")", "]" ]
Split a text into separate words.
[ "Split", "a", "text", "into", "separate", "words", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L117-L121
commonsense/metanl
metanl/extprocess.py
ProcessWrapper.is_stopword
def is_stopword(self, text): """ Determine whether a single word is a stopword, or whether a short phrase is made entirely of stopwords, disregarding context. Use of this function should be avoided; it's better to give the text in context and let the process determine which words are the stopwords. """ found_content_word = False for record in self.analyze(text): if not self.is_stopword_record(record): found_content_word = True break return not found_content_word
python
def is_stopword(self, text): """ Determine whether a single word is a stopword, or whether a short phrase is made entirely of stopwords, disregarding context. Use of this function should be avoided; it's better to give the text in context and let the process determine which words are the stopwords. """ found_content_word = False for record in self.analyze(text): if not self.is_stopword_record(record): found_content_word = True break return not found_content_word
[ "def", "is_stopword", "(", "self", ",", "text", ")", ":", "found_content_word", "=", "False", "for", "record", "in", "self", ".", "analyze", "(", "text", ")", ":", "if", "not", "self", ".", "is_stopword_record", "(", "record", ")", ":", "found_content_word", "=", "True", "break", "return", "not", "found_content_word" ]
Determine whether a single word is a stopword, or whether a short phrase is made entirely of stopwords, disregarding context. Use of this function should be avoided; it's better to give the text in context and let the process determine which words are the stopwords.
[ "Determine", "whether", "a", "single", "word", "is", "a", "stopword", "or", "whether", "a", "short", "phrase", "is", "made", "entirely", "of", "stopwords", "disregarding", "context", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L144-L157
commonsense/metanl
metanl/extprocess.py
ProcessWrapper.normalize_list
def normalize_list(self, text, cache=None): """ Get a canonical list representation of text, with words separated and reduced to their base forms. TODO: use the cache. """ words = [] analysis = self.analyze(text) for record in analysis: if not self.is_stopword_record(record): words.append(self.get_record_root(record)) if not words: # Don't discard stopwords if that's all you've got words = [self.get_record_token(record) for record in analysis] return words
python
def normalize_list(self, text, cache=None): """ Get a canonical list representation of text, with words separated and reduced to their base forms. TODO: use the cache. """ words = [] analysis = self.analyze(text) for record in analysis: if not self.is_stopword_record(record): words.append(self.get_record_root(record)) if not words: # Don't discard stopwords if that's all you've got words = [self.get_record_token(record) for record in analysis] return words
[ "def", "normalize_list", "(", "self", ",", "text", ",", "cache", "=", "None", ")", ":", "words", "=", "[", "]", "analysis", "=", "self", ".", "analyze", "(", "text", ")", "for", "record", "in", "analysis", ":", "if", "not", "self", ".", "is_stopword_record", "(", "record", ")", ":", "words", ".", "append", "(", "self", ".", "get_record_root", "(", "record", ")", ")", "if", "not", "words", ":", "# Don't discard stopwords if that's all you've got", "words", "=", "[", "self", ".", "get_record_token", "(", "record", ")", "for", "record", "in", "analysis", "]", "return", "words" ]
Get a canonical list representation of text, with words separated and reduced to their base forms. TODO: use the cache.
[ "Get", "a", "canonical", "list", "representation", "of", "text", "with", "words", "separated", "and", "reduced", "to", "their", "base", "forms", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L171-L186