code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# whether the script is visible self.grid_script._widget.setVisible(self.button_script.get_value()) # whether we should be able to edit it. if not self.combo_autoscript.get_index()==0: self.script.disable() else: self.script.enable()
def _synchronize_controls(self)
Updates the gui based on button configs.
11.460652
10.378679
1.10425
# multi plot, right number of plots and curves = great! if self.button_multi.is_checked() \ and len(self._curves) == len(self.plot_widgets) \ and len(self._curves) == n: return # single plot, right number of curves = great! if not self.button_multi.is_checked() \ and len(self.plot_widgets) == 1 \ and len(self._curves) == n: return # time to rebuild! # don't show the plots as they are built self.grid_plot.block_events() # make sure the number of curves is on target while len(self._curves) > n: self._curves.pop(-1) while len(self._curves) < n: self._curves.append(_g.PlotCurveItem(pen = (len(self._curves), n))) # figure out the target number of plots if self.button_multi.is_checked(): n_plots = n else: n_plots = min(n,1) # clear the plots while len(self.plot_widgets): # pop the last plot widget and remove all items p = self.plot_widgets.pop(-1) p.clear() # remove it from the grid self.grid_plot.remove_object(p) # add new plots for i in range(n_plots): self.plot_widgets.append(self.grid_plot.place_object(_g.PlotWidget(), 0, i, alignment=0)) # loop over the curves and add them to the plots for i in range(n): self.plot_widgets[min(i,len(self.plot_widgets)-1)].addItem(self._curves[i]) # loop over the ROI's and add them if self.ROIs is not None: for i in range(len(self.ROIs)): # get the ROIs for this plot ROIs = self.ROIs[i] if not _spinmob.fun.is_iterable(ROIs): ROIs = [ROIs] # loop over the ROIs for this plot for ROI in ROIs: # determine which plot to add the ROI to m = min(i, len(self.plot_widgets)-1) # add the ROI to the appropriate plot if m>=0: self.plot_widgets[m].addItem(ROI) # show the plots self.grid_plot.unblock_events()
def _set_number_of_plots(self, n)
Adjusts number of plots & curves to the desired value the gui.
3.331403
3.28873
1.012976
# no axes to link! if len(self.plot_widgets) <= 1: return # get the first plotItem a = self.plot_widgets[0].plotItem.getViewBox() # now loop through all the axes and link / unlink the axes for n in range(1,len(self.plot_widgets)): # Get one of the others b = self.plot_widgets[n].plotItem.getViewBox() # link the axis, but only if it isn't already if self.button_link_x.is_checked() and b.linkedView(b.XAxis) == None: b.linkView(b.XAxis, a) # Otherwise, unlink the guy, but only if it's linked to begin with elif not self.button_link_x.is_checked() and not b.linkedView(b.XAxis) == None: b.linkView(b.XAxis, None)
def _update_linked_axes(self)
Loops over the axes and links / unlinks them.
4.430912
4.23739
1.04567
if name in self.const_specs: return self.const_specs[name].link(self) if '.' in name: include_name, component = name.split('.', 1) if include_name in self.included_scopes: return self.included_scopes[include_name].resolve_const_spec( component, lineno ) raise ThriftCompilerError( 'Unknown constant "%s" referenced at line %d%s' % ( name, lineno, self.__in_path() ) )
def resolve_const_spec(self, name, lineno)
Finds and links the ConstSpec with the given name.
3.665476
3.385738
1.082622
if name in self.type_specs: return self.type_specs[name].link(self) if '.' in name: include_name, component = name.split('.', 1) if include_name in self.included_scopes: return self.included_scopes[include_name].resolve_type_spec( component, lineno ) raise ThriftCompilerError( 'Unknown type "%s" referenced at line %d%s' % ( name, lineno, self.__in_path() ) )
def resolve_type_spec(self, name, lineno)
Finds and links the TypeSpec with the given name.
3.62125
3.440767
1.052454
if name in self.service_specs: return self.service_specs[name].link(self) if '.' in name: include_name, component = name.split('.', 2) if include_name in self.included_scopes: return self.included_scopes[ include_name ].resolve_service_spec(component, lineno) raise ThriftCompilerError( 'Unknown service "%s" referenced at line %d%s' % ( name, lineno, self.__in_path() ) )
def resolve_service_spec(self, name, lineno)
Finds and links the ServiceSpec with the given name.
3.982422
3.736913
1.065698
# The compiler already ensures this. If we still get here with a # conflict, that's a bug. assert name not in self.included_scopes self.included_scopes[name] = included_scope self.add_surface(name, module)
def add_include(self, name, included_scope, module)
Register an imported module into this scope. Raises ``ThriftCompilerError`` if the name has already been used.
7.223856
8.144806
0.886928
assert service_spec is not None if service_spec.name in self.service_specs: raise ThriftCompilerError( 'Cannot define service "%s". That name is already taken.' % service_spec.name ) self.service_specs[service_spec.name] = service_spec
def add_service_spec(self, service_spec)
Registers the given ``ServiceSpec`` into the scope. Raises ``ThriftCompilerError`` if the name has already been used.
3.432147
2.804315
1.22388
if const_spec.name in self.const_specs: raise ThriftCompilerError( 'Cannot define constant "%s". That name is already taken.' % const_spec.name ) self.const_specs[const_spec.name] = const_spec
def add_const_spec(self, const_spec)
Adds a ConstSpec to the compliation scope. If the ConstSpec's ``save`` attribute is True, the constant will be added to the module at the top-level.
3.33458
3.61728
0.921847
assert surface is not None if hasattr(self.module, name): raise ThriftCompilerError( 'Cannot define "%s". The name has already been used.' % name ) setattr(self.module, name, surface)
def add_surface(self, name, surface)
Adds a top-level attribute with the given name to the module.
6.330403
5.149217
1.229391
assert type is not None if name in self.type_specs: raise ThriftCompilerError( 'Cannot define type "%s" at line %d. ' 'Another type with that name already exists.' % (name, lineno) ) self.type_specs[name] = spec
def add_type_spec(self, name, spec, lineno)
Adds the given type to the scope. :param str name: Name of the new type :param spec: ``TypeSpec`` object containing information on the type, or a ``TypeReference`` if this is meant to be resolved during the ``link`` stage. :param lineno: Line number on which this type is defined.
3.58088
4.256819
0.84121
if a is None: return None # make sure it's a numpy array a = _n.array(a) # quickest option if level in [0,1,False]: return a # otherwise assemble the python code to execute code = 'a.reshape(-1, level).'+method+'(axis=1)' # execute, making sure the array can be reshaped! try: return eval(code, dict(a=a[0:int(len(a)/level)*level], level=level)) except: print("ERROR: Could not coarsen array with method "+repr(method)) return a
def coarsen_array(a, level=2, method='mean')
Returns a coarsened (binned) version of the data. Currently supports any of the numpy array operations, e.g. min, max, mean, std, ... level=2 means every two data points will be binned. level=0 or 1 just returns a copy of the array
6.612615
6.926072
0.954743
# Normal coarsening if not exponential: # Coarsen the data xc = coarsen_array(x, level, 'mean') yc = coarsen_array(y, level, 'mean') # Coarsen the y error in quadrature if not ey is None: if not is_iterable(ey): ey = [ey]*len(y) eyc = _n.sqrt(coarsen_array(_n.power(ey,2)/level, level, 'mean')) # Coarsen the x error in quadrature if not ex is None: if not is_iterable(ey): ex = [ex]*len(x) exc = _n.sqrt(coarsen_array(_n.power(ex,2)/level, level, 'mean')) # Exponential coarsen else: # Make sure the data are arrays x = _n.array(x) y = _n.array(y) # Create the new arrays to fill xc = [] yc = [] if not ey is None: if not is_iterable(ey): ey = _n.array([ey]*len(y)) eyc = [] if not ex is None: if not is_iterable(ex): ex = _n.array([ex]*len(x)) exc = [] # Find the first element that is greater than zero x0 = x[x>0][0] # Now loop over the exponential bins n = 0 while x0*level**n < x[-1]: # Get all the points between x[n] and x[n]*r mask = _n.logical_and(x0*level**n <= x, x < x0*level**(n+1)) # Only do something if points exist from this range! if len(x[mask]): # Take the average x value xc.append(_n.average(x[mask])) yc.append(_n.average(y[mask])) # do the errors in quadrature if not ey is None: eyc.append(_n.sqrt(_n.average((ey**2)[mask])/len(ey[mask]))) if not ex is None: exc.append(_n.sqrt(_n.average((ex**2)[mask])/len(ex[mask]))) # Increment the counter n += 1 # Done exponential loop # Done coarsening # Return depending on situation if ey is None and ex is None: return _n.array(xc), _n.array(yc) elif ex is None : return _n.array(xc), _n.array(yc), _n.array(eyc) elif ey is None : return _n.array(xc), _n.array(yc), _n.array(exc) else : return _n.array(xc), _n.array(yc), _n.array(eyc), _n.array(exc)
def coarsen_data(x, y, ey=None, ex=None, level=2, exponential=False)
Coarsens the supplied data set. Returns coarsened arrays of x, y, along with quadrature-coarsened arrays of ey and ex if specified. Parameters ---------- x, y Data arrays. Can be lists (will convert to numpy arrays). These are coarsened by taking an average. ey=None, ex=None y and x uncertainties. Accepts arrays, lists, or numbers. These are coarsened by averaging in quadrature. level=2 For linear coarsening (default, see below), every n=level points will be averaged together (in quadrature for errors). For exponential coarsening, bins will be spaced by the specified scaling=level factor; for example, level=1.4 will group points within 40% of each other's x values. This is a great option for log-x plots, as the outcome will be evenly spaced. exponential=False If False, coarsen using linear spacing. If True, the bins will be exponentially spaced by the specified level.
2.512928
2.514738
0.99928
# coarsen x if not ylevel: Z_coarsened = Z else: temp = [] for z in Z: temp.append(coarsen_array(z, ylevel, method)) Z_coarsened = _n.array(temp) # coarsen y if xlevel: Z_coarsened = Z_coarsened.transpose() temp = [] for z in Z_coarsened: temp.append(coarsen_array(z, xlevel, method)) Z_coarsened = _n.array(temp).transpose() return Z_coarsened # first coarsen the columns (if necessary) if ylevel: Z_ycoarsened = [] for c in Z: Z_ycoarsened.append(coarsen_array(c, ylevel, method)) Z_ycoarsened = _n.array(Z_ycoarsened) # now coarsen the rows if xlevel: return coarsen_array(Z_ycoarsened, xlevel, method) else: return _n.array(Z_ycoarsened)
def coarsen_matrix(Z, xlevel=0, ylevel=0, method='average')
This returns a coarsened numpy matrix. method can be 'average', 'maximum', or 'minimum'
2.16849
2.193958
0.988392
if start == 0: print("Nothing you multiply zero by gives you anything but zero. Try picking something small.") return None if end == 0: print("It takes an infinite number of steps to get to zero. Try a small number?") return None # figure out our multiplication scale x = (1.0*end/start)**(1.0/(steps-1)) # now generate the array ns = _n.array(list(range(0,steps))) a = start*_n.power(x,ns) # tidy up the last element (there's often roundoff error) a[-1] = end return a
def erange(start, end, steps)
Returns a numpy array over the specified range taking geometric steps. See also numpy.logspace()
8.268836
7.988797
1.035054
if _s.fun.is_iterable(s) and not type(s) == str: return False try: float(s) return 1 except: try: complex(s) return 2 except: try: complex(s.replace('(','').replace(')','').replace('i','j')) return 2 except: return False
def is_a_number(s)
This takes an object and determines whether it's a number or a string representing a number.
4.343009
4.399415
0.987179
new_a = _n.array(a) if n==0: return new_a fill_array = _n.array([]) fill_array.resize(_n.abs(n)) # fill up the fill array before we do the shift if fill is "average": fill_array = 0.0*fill_array + _n.average(a) elif fill is "wrap" and n >= 0: for i in range(0,n): fill_array[i] = a[i-n] elif fill is "wrap" and n < 0: for i in range(0,-n): fill_array[i] = a[i] else: fill_array = 0.0*fill_array + fill # shift and fill if n > 0: for i in range(n, len(a)): new_a[i] = a[i-n] for i in range(0, n): new_a[i] = fill_array[i] else: for i in range(0, len(a)+n): new_a[i] = a[i-n] for i in range(0, -n): new_a[-i-1] = fill_array[-i-1] return new_a
def array_shift(a, n, fill="average")
This will return an array with all the elements shifted forward in index by n. a is the array n is the amount by which to shift (can be positive or negative) fill="average" fill the new empty elements with the average of the array fill="wrap" fill the new empty elements with the lopped-off elements fill=37.2 fill the new empty elements with the value 37.2
2.477232
2.393803
1.034852
covariance = [] for n in range(0, len(error)): covariance.append([]) for m in range(0, len(error)): covariance[n].append(correlation[n][m]*error[n]*error[m]) return _n.array(covariance)
def assemble_covariance(error, correlation)
This takes an error vector and a correlation matrix and assembles the covariance
2.795051
2.677628
1.043854
c = {} for key in list(b.keys()): c[key]=b[key] for key in list(a.keys()): c[key]=a[key] return c
def combine_dictionaries(a, b)
returns the combined dictionary. a's values preferentially chosen
2.736476
2.423359
1.129208
# make it a kickass copy of the original c = _n.array(c) # first get the error vector e = [] for n in range(0, len(c[0])): e.append(_n.sqrt(c[n][n])) # now cycle through the matrix, dividing by e[1]*e[2] for n in range(0, len(c[0])): for m in range(0, len(c[0])): c[n][m] = c[n][m] / (e[n]*e[m]) return [_n.array(e), _n.array(c)]
def decompose_covariance(c)
This decomposes a covariance matrix into an error vector and a correlation matrix
3.812724
3.386137
1.125981
D_ydata = [] D_xdata = [] for n in range(1, len(xdata)-1): D_xdata.append(xdata[n]) D_ydata.append((ydata[n+1]-ydata[n-1])/(xdata[n+1]-xdata[n-1])) return [D_xdata, D_ydata]
def derivative(xdata, ydata)
performs d(ydata)/d(xdata) with nearest-neighbor slopes must be well-ordered, returns new arrays [xdata, dydx_data] neighbors:
1.903239
1.953176
0.974433
x = [] dydx = [] nmax = len(xdata)-1 for n in range(nmax+1): # get the indices of the data to fit i1 = max(0, n-neighbors) i2 = min(nmax, n+neighbors) # get the sub data to fit xmini = _n.array(xdata[i1:i2+1]) ymini = _n.array(ydata[i1:i2+1]) slope, intercept = fit_linear(xmini, ymini) # make x the average of the xmini x.append(float(sum(xmini))/len(xmini)) dydx.append(slope) return _n.array(x), _n.array(dydx)
def derivative_fit(xdata, ydata, neighbors=1)
loops over the data points, performing a least-squares linear fit of the nearest neighbors at each point. Returns an array of x-values and slopes. xdata should probably be well-ordered. neighbors How many data point on the left and right to include.
2.905946
3.161386
0.9192
Z = _n.array(Z) X = _n.array(X) points = len(Z)*subsample # define a function for searching def zero_me(new_x): return f(new_x)-target_old_x # do a simple search to find the new_x that gives old_x = min(X) target_old_x = min(X) new_xmin = find_zero_bisect(zero_me, new_xmin, new_xmax, _n.abs(new_xmax-new_xmin)*0.0001) target_old_x = max(X) new_xmax = find_zero_bisect(zero_me, new_xmin, new_xmax, _n.abs(new_xmax-new_xmin)*0.0001) # now loop over all the new x values new_X = [] new_Z = [] bin_width = float(new_xmax-new_xmin)/(points) for new_x in frange(new_xmin, new_xmax, bin_width): # make sure we're in the range of X if f(new_x) <= max(X) and f(new_x) >= min(X): # add this guy to the array new_X.append(new_x) # get the interpolated column new_Z.append( interpolate(X,Z,f(new_x)) ) return _n.array(new_Z), _n.array(new_X)
def distort_matrix_X(Z, X, f, new_xmin, new_xmax, subsample=3)
Applies a distortion (remapping) to the matrix Z (and x-values X) using function f. returns new_Z, new_X f is an INVERSE function old_x(new_x) Z is a matrix. X is an array where X[n] is the x-value associated with the array Z[n]. new_xmin, new_xmax is the possible range of the distorted x-variable for generating Z points is how many elements the stretched Z should have. "auto" means use the same number of bins
3.29623
3.286994
1.00281
# just use the same methodology as before by transposing, distorting X, then # transposing back new_Z, new_Y = distort_matrix_X(Z.transpose(), Y, f, new_ymin, new_ymax, subsample) return new_Z.transpose(), new_Y
def distort_matrix_Y(Z, Y, f, new_ymin, new_ymax, subsample=3)
Applies a distortion (remapping) to the matrix Z (and y-values Y) using function f. returns new_Z, new_Y f is a function old_y(new_y) Z is a matrix. Y is an array where Y[n] is the y-value associated with the array Z[:,n]. new_ymin, new_ymax is the range of the distorted x-variable for generating Z points is how many elements the stretched Z should have. "auto" means use the same number of bins
4.877916
4.857413
1.004221
prev = f(xmin) this = f(xmin+xstep) for x in frange(xmin+xstep,xmax,xstep): next = f(x+xstep) # see if we're on top if this < prev and this < next: return x, this prev = this this = next return x, this
def dumbguy_minimize(f, xmin, xmax, xstep)
This just steps x and looks for a peak returns x, f(x)
4.260284
4.056373
1.050269
# empty case if len(array) == 0: return 0 output_value = 1 for x in array: # test it and die if it's not a number test = is_a_number(x) if not test: return False # mention if it's complex output_value = max(output_value,test) return output_value
def elements_are_numbers(array)
Tests whether the elements of the supplied array are numbers.
7.020308
6.849551
1.02493
if not _s.fun.is_iterable(a): a = [a] a = list(a) while len(a)>len(b): a.pop(-1) while len(a)<len(b): a.append(a[-1]) return a
def equalize_list_lengths(a,b)
Modifies the length of list a to match b. Returns a. a can also not be a list (will convert it to one). a will not be modified.
3.363735
3.055763
1.100784
if recursion<0: return None # get an initial guess as to the baseline ymin = min(array) ymax = max(array) for n in range(max_iterations): # bisect the range to estimate the baseline y1 = (ymin+ymax)/2.0 # now see how many peaks this finds. p could have 40 for all we know p, s, i = find_peaks(array, y1, True) # now loop over the subarrays and make sure there aren't two peaks in any of them for n in range(len(i)): # search the subarray for two peaks, iterating 3 times (75% selectivity) p2 = find_N_peaks(s[n], 2, rec_max_iterations, rec_max_iterations=rec_max_iterations, recursion=recursion-1) # if we found a double-peak if not p2 is None: # push these non-duplicate values into the master array for x in p2: # if this point is not already in p, push it on if not x in p: p.append(x+i[n]) # don't forget the offset, since subarrays start at 0 # if we nailed it, finish up if len(p) == N: return p # if we have too many peaks, we need to increase the baseline if len(p) > N: ymin = y1 # too few? decrease the baseline else: ymax = y1 return None
def find_N_peaks(array, N=4, max_iterations=100, rec_max_iterations=3, recursion=1)
This will run the find_peaks algorythm, adjusting the baseline until exactly N peaks are found.
6.034048
5.991753
1.007059
peaks = [] if return_subarrays: subarray_values = [] subarray_indices = [] # loop over the data n = 0 while n < len(array): # see if we're above baseline, then start the "we're in a peak" loop if array[n] > baseline: # start keeping track of the subarray here if return_subarrays: subarray_values.append([]) subarray_indices.append(n) # find the max ymax=baseline nmax = n while n < len(array) and array[n] > baseline: # add this value to the subarray if return_subarrays: subarray_values[-1].append(array[n]) if array[n] > ymax: ymax = array[n] nmax = n n = n+1 # store the max peaks.append(nmax) else: n = n+1 if return_subarrays: return peaks, subarray_values, subarray_indices else: return peaks
def find_peaks(array, baseline=0.1, return_subarrays=False)
This will try to identify the indices of the peaks in array, returning a list of indices in ascending order. Runs along the data set until it jumps above baseline. Then it considers all the subsequent data above the baseline as part of the peak, and records the maximum of this data as one peak value.
2.943504
2.891572
1.01796
if f(xmax)*f(xmin) > 0: print("find_zero_bisect(): no zero on the range",xmin,"to",xmax) return None temp = min(xmin,xmax) xmax = max(xmin,xmax) xmin = temp xmid = (xmin+xmax)*0.5 while xmax-xmin > xprecision: y = f(xmid) # pick the direction with one guy above and one guy below zero if y > 0: # move left or right? if f(xmin) < 0: xmax=xmid else: xmin=xmid # f(xmid) is below zero elif y < 0: # move left or right? if f(xmin) > 0: xmax=xmid else: xmin=xmid # yeah, right else: return xmid # bisect again xmid = (xmin+xmax)*0.5 return xmid
def find_zero_bisect(f, xmin, xmax, xprecision)
This will bisect the range and zero in on zero.
3.549504
3.428303
1.035353
x = _n.array(xdata) y = _n.array(ydata) ax = _n.average(x) ay = _n.average(y) axx = _n.average(x*x) ayx = _n.average(y*x) slope = (ayx - ay*ax) / (axx - ax*ax) intercept = ay - slope*ax return slope, intercept
def fit_linear(xdata, ydata)
Returns slope and intercept of line of best fit: y = a*x + b through the supplied data. Parameters ---------- xdata, ydata: Arrays of x data and y data (having matching lengths).
2.374418
2.934097
0.80925
start = 1.0*start end = 1.0*end inc = 1.0*inc # if we got a dumb increment if not inc: return _n.array([start,end]) # if the increment is going the wrong direction if 1.0*(end-start)/inc < 0.0: inc = -inc # get the integer steps ns = _n.array(list(range(0, int(1.0*(end-start)/inc)+1))) return start + ns*inc
def frange(start, end, inc=1.0)
A range function, that accepts float increments and reversed direction. See also numpy.linspace()
4.157352
4.447421
0.934778
# Make a fitter object, which handily interprets string functions # The "+0*x" is a trick to ensure the function takes x as an argument # (makes it a little more idiot proof). fitty = _s.data.fitter().set_functions(f+"+0*x",'') # Make sure both errors are arrays of the right length if not _s.fun.is_iterable(ex): ex = _n.array([ex]*len(x)) if not _s.fun.is_iterable(ey): ey = _n.array([ey]*len(x)) # Get the x and y exact values first, then randomize x = _n.array(x) y = fitty.f[0](x) x = _n.random.normal(_n.array(x),ex) y = _n.random.normal(y, ey) # make a databox d = _s.data.databox() d['x'] = x d['y'] = y if include_errors: d['ey'] = ey d['ex'] = ex d.h(reality=f, ey=ey[0], ex=ex[0]) return d
def generate_fake_data(f='2*x-5', x=_n.linspace(-5,5,11), ey=1, ex=0, include_errors=False, **kwargs)
Generates a set of fake data from the underlying "reality" (or mean behavior) function f. Parameters ---------- f: Underlying "reality" function or mean behavior. This can be any python-evaluable string, and will have access to all the numpy functions (e.g., cos), scipy's special functions (e.g., erf), and any other variables defined by keyword arguments ex, ey: Uncertainty "strength" for x and y data. This can be a constant or an array of values. If the distributions (below) are normal, this corresponds to the standard deviation. include_errors=True Whether the databox should include a column for ex and ey. Keyword arguments are used as additional globals in the function evaluation. Returns a databox containing the data and other relevant information in the header.
5.892694
5.438829
1.083449
# try for ipython if 'get_ipython' in globals(): a = list(get_ipython().history_manager.input_hist_raw) a.reverse() return a elif 'SPYDER_SHELL_ID' in _os.environ: try: p = _os.path.join(_settings.path_user, ".spyder2", "history.py") a = read_lines(p) a.reverse() return a except: pass # otherwise try pyshell or pycrust (requires wx) else: try: import wx for x in wx.GetTopLevelWindows(): if type(x) in [wx.py.shell.ShellFrame, wx.py.crust.CrustFrame]: a = x.shell.GetText().split(">>>") a.reverse() return a except: pass return ['shell history not available']
def get_shell_history()
This only works with some shells.
5.115251
5.034451
1.01605
i = array.searchsorted(value) if i == len(array): return -1 else: return i
def index(value, array)
Array search that behaves like I want it to. Totally dumb, I know.
5.37859
5.244314
1.025604
a = (array-value)**2 return index(a.min(), a)
def index_nearest(value, array)
expects a _n.array returns the global minimum of (value-array)^2
10.253634
9.051103
1.13286
for n in range(starting_index, len(array)-1): if (value-array[n] )*direction >= 0 \ and (value-array[n+1])*direction < 0: return n # no crossing found return -1
def index_next_crossing(value, array, starting_index=0, direction=1)
starts at starting_index, and walks through the array until it finds a crossing point with value set direction=-1 for down crossing
4.815204
4.976423
0.967603
index = 0 # search for the last array item that value is larger than for n in range(0,len(array)): if value >= array[n]: index = n+1 array.insert(index, value) return index
def insert_ordered(value, array)
This will insert the value into the array, keeping it sorted, and returning the index where it was inserted
4.728665
5.131335
0.921527
# sort the arrays and make sure they're numpy arrays [xdata, ydata] = sort_matrix([xdata,ydata],0) xdata = _n.array(xdata) ydata = _n.array(ydata) if xmin is None: xmin = min(xdata) if xmax is None: xmax = max(xdata) # find the index range imin = xdata.searchsorted(xmin) imax = xdata.searchsorted(xmax) xint = [xdata[imin]] yint = [0] # get the autozero if autozero >= 1: zero = _n.average(ydata[imin:imin+int(autozero)]) ydata = ydata-zero for n in range(imin+1,imax): if len(yint): xint.append(xdata[n]) yint.append(yint[-1]+0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1])) else: xint.append(xdata[n]) yint.append(0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1])) return _n.array(xint), _n.array(yint)
def integrate_data(xdata, ydata, xmin=None, xmax=None, autozero=0)
Numerically integrates up the ydata using the trapezoid approximation. estimate the bin width (scaled by the specified amount). Returns (xdata, integrated ydata). autozero is the number of data points to use as an estimate of the background (then subtracted before integrating).
2.22508
2.185979
1.017887
for n in range(max_iterations): # start at the middle x = 0.5*(xmin+xmax) df = f(x)-f0 if _n.fabs(df) < tolerance: return x # if we're high, set xmin to x etc... if df > 0: xmin=x else: xmax=x print("Couldn't find value!") return 0.5*(xmin+xmax)
def invert_increasing_function(f, f0, xmin, xmax, tolerance, max_iterations=100)
This will try try to qickly find a point on the f(x) curve between xmin and xmax that is equal to f0 within tolerance.
5.324298
5.27145
1.010025
# make sure they're numpy arrays, and make copies to avoid the referencing error y = _n.array(y) t = _n.array(t) # if we're doing the power of 2, do it if pow2: keep = 2**int(_n.log2(len(y))) # now resize the data y.resize(keep) t.resize(keep) # Window the data if not window in [None, False, 0]: try: # Get the windowing array w = eval("_n."+window, dict(_n=_n))(len(y)) # Store the original variance v0 = _n.average(abs(y)**2) # window the time domain data y = y * w # Rescale by the variance ratio if rescale: y = y * _n.sqrt(v0 / _n.average(abs(y)**2)) except: print("ERROR: Bad window!") return # do the actual fft, and normalize Y = _n.fft.fftshift( _n.fft.fft(y) / len(t) ) f = _n.fft.fftshift( _n.fft.fftfreq(len(t), t[1]-t[0]) ) return f, Y
def fft(t, y, pow2=False, window=None, rescale=False)
FFT of y, assuming complex or real-valued inputs. This goes through the numpy fourier transform process, assembling and returning (frequencies, complex fft) given time and signal data y. Parameters ---------- t,y Time (t) and signal (y) arrays with which to perform the fft. Note the t array is assumed to be evenly spaced. pow2 = False Set this to true if you only want to keep the first 2^n data points (speeds up the FFT substantially) window = None Can be set to any of the windowing functions in numpy that require only the number of points as the argument, e.g. window='hanning'. rescale = False If True, the FFT will be rescaled by the square root of the ratio of variances before and after windowing, such that the sum of component amplitudes squared is equal to the actual variance.
4.249974
4.091009
1.038857
# do the actual fft f, Y = fft(t,y,pow2,window,rescale) # take twice the negative frequency branch, because it contains the # extra frequency point when the number of points is odd. f = _n.abs(f[int(len(f)/2)::-1]) P = _n.abs(Y[int(len(Y)/2)::-1])**2 / (f[1]-f[0]) # Since this is the same as the positive frequency branch, double the # appropriate frequencies. For even number of points, there is one # extra negative frequency to avoid doubling. For odd, you only need to # avoid the DC value. # For the even if len(t)%2 == 0: P[1:len(P)-1] = P[1:len(P)-1]*2 else: P[1:] = P[1:]*2 return f, P
def psd(t, y, pow2=False, window=None, rescale=False)
Single-sided power spectral density, assuming real valued inputs. This goes through the numpy fourier transform process, assembling and returning (frequencies, psd) given time and signal data y. Note it is defined such that sum(psd)*df, where df is the frequency spacing, is the variance of the original signal for any range of frequencies. This includes the DC and Nyquist components: sqrt(psd[0]*df) = average value of original time trace sqrt(psd[-1]*df) = amplitude of Nyquist component (for even # points) Parameters ---------- t,y Time (t) and signal (y) arrays with which to perform the PSD. Note the t array is assumed to be evenly spaced. pow2 = False Set this to true if you only want to keep the first 2^n data points (speeds up the FFT substantially) window = None can be set to any of the windowing functions in numpy, e.g. window='hanning'. rescale = False If True, the FFT will be rescaled by the square root of the ratio of variances before and after windowing, such that the integral sum(PSD)*df is the variance of the *original* time-domain data. returns frequencies, psd (y^2/Hz)
6.539889
6.168815
1.060153
# have the user select some files if paths==None: paths = _s.dialogs.MultipleFiles('DIS AND DAT|*.*') if paths == []: return for path in paths: lines = read_lines(path) if depth: N=min(len(lines),depth) else: N=len(lines) for n in range(0,N): if lines[n].find(search) >= 0: lines[n] = lines[n].replace(search,replace) print(path.split(_os.path.pathsep)[-1]+ ': "'+lines[n]+'"') # only write if we're not confirming if not confirm: _os.rename(path, path+".backup") write_to_file(path, join(lines, '')) if confirm: if input("yes? ")=="yes": replace_in_files(search,replace,depth,paths,False) return
def replace_in_files(search, replace, depth=0, paths=None, confirm=True)
Does a line-by-line search and replace, but only up to the "depth" line.
5.279463
5.059477
1.04348
# have the user select some files paths = _s.dialogs.MultipleFiles('DIS AND DAT|*.*') if paths == []: return for path in paths: _shutil.copy(path, path+".backup") lines = read_lines(path) for n in range(0,len(lines)): if lines[n].find(search_string) >= 0: print(lines[n]) lines[n] = replacement_line.strip() + "\n" write_to_file(path, join(lines, '')) return
def replace_lines_in_files(search_string, replacement_line)
Finds lines containing the search string and replaces the whole line with the specified replacement string.
6.587368
6.606096
0.997165
l = list(array) l.reverse() return _n.array(l)
def reverse(array)
returns a reversed numpy array
7.434413
6.092354
1.220286
iterable = is_iterable(x) if not iterable: x = [x] # make a copy to be safe x = _n.array(x) # loop over the elements for i in range(len(x)): # Handle the weird cases if not x[i] in [None, _n.inf, _n.nan]: sig_figs = -int(_n.floor(_n.log10(abs(x[i]))))+n-1 x[i] = _n.round(x[i], sig_figs) if iterable: return x else: return x[0]
def round_sigfigs(x, n=2)
Rounds the number to the specified significant figures. x can also be a list or array of numbers (in these cases, a numpy array is returned).
3.469812
3.516241
0.986796
i = feature(ydata) return xdata-xdata[i]+x0, ydata
def shift_feature_to_x0(xdata, ydata, x0=0, feature=imax)
Finds a feature in the the ydata and shifts xdata so the feature is centered at x0. Returns shifted xdata, ydata. Try me with plot.tweaks.manipulate_shown_data()! xdata,ydata data set x0=0 where to shift the peak feature=imax function taking an array/list and returning the index of said feature
7.024232
6.517807
1.077699
new_xdata = smooth_array(_n.array(xdata), amount) new_ydata = smooth_array(_n.array(ydata), amount) if yerror is None: new_yerror = None else: new_yerror = smooth_array(_n.array(yerror), amount) return [new_xdata, new_ydata, new_yerror]
def smooth_data(xdata, ydata, yerror, amount=1)
Returns smoothed [xdata, ydata, yerror]. Does not destroy the input arrays.
2.266601
2.153764
1.052391
a = _n.array(a) return a[:,a[n,:].argsort()]
def sort_matrix(a,n=0)
This will rearrange the array a[n] from lowest to highest, and rearrange the rest of a[i]'s in the same way. It is dumb and slow. Returns a numpy array.
9.102055
11.86659
0.767032
new = [] for i in range(i1,i2+1): new.append(matrix[i][j1:j2+1]) return _n.array(new)
def submatrix(matrix,i1,i2,j1,j2)
returns the submatrix defined by the index bounds i1-i2 and j1-j2 Endpoints included!
3.43235
3.193951
1.074641
# make sure it's a numpy array if not isinstance(xdata, _n.ndarray): xdata = _n.array(xdata) # make sure xmin and xmax are numbers if xmin is None: xmin = min(xdata) if xmax is None: xmax = max(xdata) # get all the indices satisfying the trim condition ns = _n.argwhere((xdata >= xmin) & (xdata <= xmax)).transpose()[0] # trim the xdata output = [] output.append(xdata[ns]) # trim the rest for a in args: # make sure it's a numpy array if not isinstance(a, _n.ndarray): a = _n.array(a) output.append(a[ns]) return output
def trim_data(xmin, xmax, xdata, *args)
Removes all the data except that in which xdata is between xmin and xmax. This does not mutilate the input arrays, and additional arrays can be supplied via args (provided they match xdata in shape) xmin and xmax can be None
2.68845
2.639595
1.018508
# dumb conditions if len(conditions) == 0: return arrays if len(arrays) == 0: return [] # find the indices to keep all_conditions = conditions[0] for n in range(1,len(conditions)): all_conditions = all_conditions & conditions[n] ns = _n.argwhere(all_conditions).transpose()[0] # assemble and return trimmed data output = [] for n in range(len(arrays)): if not arrays[n] is None: output.append(arrays[n][ns]) else: output.append(None) return output
def trim_data_uber(arrays, conditions)
Non-destructively selects data from the supplied list of arrays based on the supplied list of conditions. Importantly, if any of the conditions are not met for the n'th data point, the n'th data point is rejected for all supplied arrays. Example ------- x = numpy.linspace(0,10,20) y = numpy.sin(x) trim_data_uber([x,y], [x>3,x<9,y<0.7]) This will keep only the x-y pairs in which 3<x<9 and y<0.7, returning a list of shorter arrays (all having the same length, of course).
4.286561
4.71758
0.908636
'''Fetch a response from the Geocoding API.''' fields['vintage'] = self.vintage fields['benchmark'] = self.benchmark fields['format'] = 'json' if 'layers' in kwargs: fields['layers'] = kwargs['layers'] returntype = kwargs.get('returntype', 'geographies') url = self._geturl(searchtype, returntype) try: with requests.get(url, params=fields, timeout=kwargs.get('timeout')) as r: content = r.json() if "addressMatches" in content.get('result', {}): return AddressResult(content) if "geographies" in content.get('result', {}): return GeographyResult(content) raise ValueError() except (ValueError, KeyError): raise ValueError("Unable to parse response from Census") except RequestException as e: raise e
def _fetch(self, searchtype, fields, **kwargs)
Fetch a response from the Geocoding API.
4.093833
3.882907
1.054322
'''Geocode a (lon, lat) coordinate.''' kwargs['returntype'] = 'geographies' fields = { 'x': x, 'y': y } return self._fetch('coordinates', fields, **kwargs)
def coordinates(self, x, y, **kwargs)
Geocode a (lon, lat) coordinate.
7.459626
5.426737
1.374606
'''Geocode an address.''' fields = { 'street': street, 'city': city, 'state': state, 'zip': zipcode, } return self._fetch('address', fields, **kwargs)
def address(self, street, city=None, state=None, zipcode=None, **kwargs)
Geocode an address.
3.415413
3.529228
0.967751
'''Geocode an an address passed as one string. e.g. "4600 Silver Hill Rd, Suitland, MD 20746" ''' fields = { 'address': address, } return self._fetch('onelineaddress', fields, **kwargs)
def onelineaddress(self, address, **kwargs)
Geocode an an address passed as one string. e.g. "4600 Silver Hill Rd, Suitland, MD 20746"
8.416977
2.508384
3.355538
''' Send either a CSV file or data to the addressbatch API. According to the Census, "there is currently an upper limit of 1000 records per batch file." If a file, must have no header and fields id,street,city,state,zip If data, should be a list of dicts with the above fields (although ID is optional) ''' # Does data quack like a file handle? if hasattr(data, 'read'): return self._post_batch(f=data, **kwargs) # Check if it's a string file elif isinstance(data, string_types): with open(data, 'rb') as f: return self._post_batch(f=f, **kwargs) else: # Otherwise, assume a list of dicts return self._post_batch(data=data, **kwargs)
def addressbatch(self, data, **kwargs)
Send either a CSV file or data to the addressbatch API. According to the Census, "there is currently an upper limit of 1000 records per batch file." If a file, must have no header and fields id,street,city,state,zip If data, should be a list of dicts with the above fields (although ID is optional)
5.416944
2.110293
2.566916
if name == None: name = self.get_name() if name == "" or not type(name)==str: return "Error: Bad name." # assemble the path to the colormap path = _os.path.join(_settings.path_home, "colormaps", name+".cmap") # make sure the file exists if not _os.path.exists(path): print("load_colormap(): Colormap '"+name+"' does not exist. Creating.") self.save_colormap(name) return # open the file and get the lines f = open(path, 'r') x = f.read() f.close() try: self._colorpoint_list = eval(x) except: print("Invalid colormap. Overwriting.") self.save_colormap() # update the image self.update_image() return self
def load_colormap(self, name=None)
Loads a colormap of the supplied name. None means used the internal name. (See self.get_name())
3.826314
3.622331
1.056313
if name == None: name = self.get_name() if name == "" or not type(name)==str: return "Error: invalid name." # get the colormaps directory colormaps = _os.path.join(_settings.path_home, 'colormaps') # make sure we have the colormaps directory _settings.MakeDir(colormaps) # assemble the path to the colormap path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap") # open the file and overwrite f = open(path, 'w') f.write(str(self._colorpoint_list)) f.close() return self
def save_colormap(self, name=None)
Saves the colormap with the specified name. None means use internal name. (See get_name())
3.804145
3.554427
1.070256
if name == None: name = self.get_name() if name == "" or not type(name)==str: return "Error: invalid name." # assemble the path to the colormap path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap") _os.unlink(path) return self
def delete_colormap(self, name=None)
Deletes the colormap with the specified name. None means use the internal name (see get_name())
5.814429
5.451941
1.066488
if not type(name)==str: print("set_name(): Name must be a string.") return self._name = name return self
def set_name(self, name="My Colormap")
Sets the name. Make sure the name is something your OS could name a file.
4.834431
5.164208
0.936142
if image=="auto": image = _pylab.gca().images[0] self._image=image self.update_image()
def set_image(self, image='auto')
Set which pylab image to tweak.
6.226748
4.742185
1.313055
if self._image: self._image.set_cmap(self.get_cmap()) _pylab.draw()
def update_image(self)
Set's the image's cmap.
7.553135
4.26065
1.772766
# make sure we have more than 2; otherwise don't pop it, just return it if len(self._colorpoint_list) > 2: # do the popping x = self._colorpoint_list.pop(n) # make sure the endpoints are 0 and 1 self._colorpoint_list[0][0] = 0.0 self._colorpoint_list[-1][0] = 1.0 # update the image self.update_image() return x # otherwise just return the indexed item else: return self[n]
def pop_colorpoint(self, n=0)
Removes and returns the specified colorpoint. Will always leave two behind.
4.344876
4.229735
1.027222
L = self._colorpoint_list # if position = 0 or 1, push the end points inward if position <= 0.0: L.insert(0,[0.0,color1,color2]) elif position >= 1.0: L.append([1.0,color1,color2]) # otherwise, find the position where it belongs else: # loop over all the points for n in range(len(self._colorpoint_list)): # check if it's less than the next one if position <= L[n+1][0]: # found the place to insert it L.insert(n+1,[position,color1,color2]) break # update the image with the new cmap self.update_image() return self
def insert_colorpoint(self, position=0.5, color1=[1.0,1.0,0.0], color2=[1.0,1.0,0.0])
Inserts the specified color into the list.
3.667506
3.668417
0.999752
if n==0.0 : position = 0.0 elif n==len(self._colorpoint_list)-1: position = 1.0 else: position = max(self._colorpoint_list[n-1][0], position) self._colorpoint_list[n] = [position, color1, color2] self.update_image() self.save_colormap("Last Used")
def modify_colorpoint(self, n, position=0.5, color1=[1.0,1.0,1.0], color2=[1.0,1.0,1.0])
Changes the values of an existing colorpoint, then updates the colormap.
4.192662
4.088211
1.025549
# now generate the colormap from the ordered list r = [] g = [] b = [] for p in self._colorpoint_list: r.append((p[0], p[1][0]*1.0, p[2][0]*1.0)) g.append((p[0], p[1][1]*1.0, p[2][1]*1.0)) b.append((p[0], p[1][2]*1.0, p[2][2]*1.0)) # store the formatted dictionary c = {'red':r, 'green':g, 'blue':b} # now set the dang thing return _mpl.colors.LinearSegmentedColormap('custom', c)
def get_cmap(self)
Generates a pylab cmap object from the colorpoint data.
3.076939
2.754695
1.11698
# set our name self.set_name(str(self._combobox_cmaps.currentText())) # load the colormap self.load_colormap() # rebuild the interface self._build_gui() self._button_save.setEnabled(False)
def _signal_load(self)
Load the selected cmap.
6.854298
5.216774
1.313896
self.set_name(str(self._combobox_cmaps.currentText())) self.save_colormap() self._button_save.setEnabled(False) self._load_cmap_list()
def _button_save_clicked(self)
Save the selected cmap.
6.507658
4.117705
1.580409
name = str(self._combobox_cmaps.currentText()) self.delete_colormap(name) self._combobox_cmaps.setEditText("") self._load_cmap_list()
def _button_delete_clicked(self)
Save the selected cmap.
5.796655
3.780089
1.53347
self._button_save.setEnabled(True) cp = self._colorpoint_list[n] # if they're linked, set both if self._checkboxes[n].isChecked(): self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0]) self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;") self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;") elif top: self.modify_colorpoint(n, cp[0], cp[1], [c.red()/255.0, c.green()/255.0, c.blue()/255.0]) self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;") else: self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0], cp[2]) self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
def _color_dialog_changed(self, n, top, c)
Updates the color of the slider.
1.756934
1.74771
1.005278
self._button_save.setEnabled(True) self.insert_colorpoint(self._colorpoint_list[n][0], self._colorpoint_list[n][1], self._colorpoint_list[n][2]) self._build_gui()
def _button_plus_clicked(self, n)
Create a new colorpoint.
4.586721
3.233644
1.418437
self._button_save.setEnabled(True) self.pop_colorpoint(n) self._build_gui()
def _button_minus_clicked(self, n)
Remove a new colorpoint.
17.112541
7.54962
2.266676
self._button_save.setEnabled(True) self.modify_colorpoint(n, self._sliders[n].value()*0.001, self._colorpoint_list[n][1], self._colorpoint_list[n][2])
def _slider_changed(self, n)
updates the colormap / plot
5.606382
5.359916
1.045983
self._button_save.setEnabled(True) if top: self._color_dialogs_top[n].open() else: self._color_dialogs_bottom[n].open()
def _color_button_clicked(self, n,top)
Opens the dialog.
5.54341
4.633984
1.196252
# store the current name name = self.get_name() # clear the list self._combobox_cmaps.blockSignals(True) self._combobox_cmaps.clear() # list the existing contents paths = _settings.ListDir('colormaps') # loop over the paths and add the names to the list for path in paths: self._combobox_cmaps.addItem(_os.path.splitext(path)[0]) # try to select the current name self._combobox_cmaps.setCurrentIndex(self._combobox_cmaps.findText(name)) self._combobox_cmaps.blockSignals(False)
def _load_cmap_list(self)
Searches the colormaps directory for all files, populates the list.
3.229313
3.023291
1.068145
# make sure the filters contains "*.*" as an option! if not '*' in filters.split(';'): filters = filters + ";;All files (*)" # if this type of pref doesn't exist, we need to make a new one if default_directory in _settings.keys(): default = _settings[default_directory] else: default = "" # pop up the dialog result = _qtw.QFileDialog.getSaveFileName(None,text,default,filters) # If Qt5, take the zeroth element if _s._qt.VERSION_INFO[0:5] == "PyQt5": result = result[0] # Make sure it's a string result = str(result) # Enforce the extension if necessary if not force_extension == None: # In case the user put "*.txt" instead of just "txt" force_extension = force_extension.replace('*','').replace('.','') # If it doesn't end with the right extension, add this. if not _os.path.splitext(result)[-1][1:] == force_extension: result = result + '.' + force_extension if result == '': return None else: _settings[default_directory] = _os.path.split(result)[0] return result
def save(filters='*.*', text='Save THIS, facehead!', default_directory='default_directory', force_extension=None)
Pops up a save dialog and returns the string path of the selected file. Parameters ---------- filters='*.*' Which file types should appear in the dialog. text='Save THIS, facehead!' Title text for the dialog. default_directory='default_directory' Key for the spinmob.settings default directory. If you use a name, e.g. 'my_defaultypoo', for one call of this function, the next time you use the same name, it will start in the last dialog's directory by default. force_extension=None Setting this to a string, e.g. 'txt', will enforce that the filename will have this extension.
4.961846
5.054291
0.98171
# make sure the filters contains "*.*" as an option! if not '*' in filters.split(';'): filters = filters + ";;All files (*)" # if this type of pref doesn't exist, we need to make a new one if default_directory in _settings.keys(): default = _settings[default_directory] else: default = "" # pop up the dialog result = _qtw.QFileDialog.getOpenFileName(None,text,default,filters) # If Qt5, take the zeroth element if _s._qt.VERSION_INFO[0:5] == "PyQt5": result = result[0] # Make sure it's a string result = str(result) if result == '': return None else: _settings[default_directory] = _os.path.split(result)[0] return result
def load(filters="*.*", text='Select a file, FACEFACE!', default_directory='default_directory')
Pops up a dialog for opening a single file. Returns a string path or None.
5.854009
5.865815
0.997987
# make sure the filters contains "*.*" as an option! if not '*' in filters.split(';'): filters = filters + ";;All files (*)" # if this type of pref doesn't exist, we need to make a new one if default_directory in _settings.keys(): default = _settings[default_directory] else: default = "" # pop up the dialog results = _qtw.QFileDialog.getOpenFileNames(None,text,default,filters) # If Qt5, take the zeroth element if _s._qt.VERSION_INFO[0:5] == "PyQt5": results = results[0] # Make sure it's a string result = [] for r in results: result.append(str(r)) if len(result)==0: return else: _settings[default_directory] = _os.path.split(result[0])[0] return result
def load_multiple(filters="*.*", text='Select some files, FACEFACE!', default_directory='default_directory')
Pops up a dialog for opening more than one file. Returns a list of string paths or None.
5.740606
5.744807
0.999269
prefs_file = open(self.prefs_path, 'w') for n in range(0,len(self.prefs)): if len(list(self.prefs.items())[n]) > 1: prefs_file.write(str(list(self.prefs.items())[n][0]) + ' = ' + str(list(self.prefs.items())[n][1]) + '\n') prefs_file.close()
def Dump(self)
Dumps the current prefs to the preferences.txt file
2.486096
2.159074
1.151464
type_specs = {} types = [] for name, type_spec in self.scope.type_specs.items(): type_spec = type_spec.link(self.scope) type_specs[name] = type_spec if type_spec.surface is not None: self.scope.add_surface(name, type_spec.surface) types.append(type_spec.surface) self.scope.type_specs = type_specs self.scope.add_surface('__types__', tuple(types))
def link(self)
Resolve and link all types in the scope.
3.0677
2.692972
1.13915
if name is None: name = os.path.splitext(os.path.basename(path))[0] callermod = inspect.getmodule(inspect.stack()[1][0]) name = '%s.%s' % (callermod.__name__, name) if name in sys.modules: return sys.modules[name] if not os.path.isabs(path): callerfile = callermod.__file__ path = os.path.normpath( os.path.join(os.path.dirname(callerfile), path) ) sys.modules[name] = mod = load(path, name=name) return mod
def install(path, name=None)
Compiles a Thrift file and installs it as a submodule of the caller. Given a tree organized like so:: foo/ __init__.py bar.py my_service.thrift You would do, .. code-block:: python my_service = thriftrw.install('my_service.thrift') To install ``my_service`` as a submodule of the module from which you made the call. If the call was made in ``foo/bar.py``, the compiled Thrift file will be installed as ``foo.bar.my_service``. If the call was made in ``foo/__init__.py``, the compiled Thrift file will be installed as ``foo.my_service``. This allows other modules to import ``from`` the compiled module like so, .. code-block:: python from foo.my_service import MyService .. versionadded:: 0.2 :param path: Path of the Thrift file. This may be an absolute path, or a path relative to the Python module making the call. :param str name: Name of the submodule. Defaults to the basename of the Thrift file. :returns: The compiled module
2.250866
2.587824
0.869791
return self.compiler.compile(name, document).link().surface
def loads(self, name, document)
Parse and compile the given Thrift document. :param str name: Name of the Thrift document. :param str document: The Thrift IDL as a string.
36.123722
52.908096
0.682764
if name is None: name = os.path.splitext(os.path.basename(path))[0] # TODO do we care if the file extension is .thrift? with open(path, 'r') as f: document = f.read() return self.compiler.compile(name, document, path).link().surface
def load(self, path, name=None)
Load and compile the given Thrift file. :param str path: Path to the ``.thrift`` file. :param str name: Name of the generated module. Defaults to the base name of the file. :returns: The compiled module.
5.133122
5.232479
0.981012
url = "https://oauth.vk.com/authorize" params = { "client_id": client_id, "scope": scope, "redirect_uri": redirect_uri, "display": display, "response_type": response_type, "version": version, "state": state, "revoke": revoke } params = {key: value for key, value in params.items() if value is not None} return u"{url}?{params}".format(url=url, params=urlencode(params))
def get_url_implicit_flow_user(client_id, scope, redirect_uri='https://oauth.vk.com/blank.html', display='page', response_type='token', version=None, state=None, revoke=1)
https://vk.com/dev/implicit_flow_user :return: url
1.521235
1.619195
0.939501
url = "https://oauth.vk.com/authorize" params = { "client_id": client_id, "redirect_uri": redirect_uri, "display": display, "response_type": "code" } if scope: params['scope'] = scope if state: params['state'] = state return u"{url}?{params}".format(url=url, params=urlencode(params))
def get_url_authcode_flow_user(client_id, redirect_uri, display="page", scope=None, state=None)
Authorization Code Flow for User Access Token Use Authorization Code Flow to run VK API methods from the server side of an application. Access token received this way is not bound to an ip address but set of permissions that can be granted is limited for security reasons. Args: client_id (int): Application id. redirect_uri (str): Address to redirect user after authorization. display (str): Sets authorization page appearance. Sets: {`page`, `popup`, `mobile`} Defaults to `page` scope (:obj:`str`, optional): Permissions bit mask, to check on authorization and request if necessary. More scope: https://vk.com/dev/permissions state (:obj:`str`, optional): An arbitrary string that will be returned together with authorization result. Returns: str: Url Examples: >>> vk.get_url_authcode_flow_user(1, 'http://example.com/', scope="wall,email") 'https://oauth.vk.com/authorize?client_id=1&display=page&redirect_uri=http://example.com/&scope=wall,email&response_type=code .. _Docs: https://vk.com/dev/authcode_flow_user
1.699646
2.054208
0.827397
# add columns of data to the databox d['x'] = _n.linspace(0,10,100) d['y'] = _n.cos(d['x']) + 0.1*_n.random.rand(100) # update the curve c.setData(d['x'], d['y'])
def get_fake_data(*a)
Called whenever someone presses the "fire" button.
6.041722
5.357213
1.127773
#print opts for k in opts: if k == 'bounds': #print opts[k] self.setMinimum(opts[k][0], update=False) self.setMaximum(opts[k][1], update=False) #for i in [0,1]: #if opts[k][i] is None: #self.opts[k][i] = None #else: #self.opts[k][i] = D(unicode(opts[k][i])) elif k in ['step', 'minStep']: self.opts[k] = D(asUnicode(opts[k])) elif k == 'value': pass ## don't set value until bounds have been set else: self.opts[k] = opts[k] if 'value' in opts: self.setValue(opts['value']) ## If bounds have changed, update value to match if 'bounds' in opts and 'value' not in opts: self.setValue() ## sanity checks: if self.opts['int']: if 'step' in opts: step = opts['step'] ## not necessary.. #if int(step) != step: #raise Exception('Integer SpinBox must have integer step size.') else: self.opts['step'] = int(self.opts['step']) if 'minStep' in opts: step = opts['minStep'] if int(step) != step: raise Exception('Integer SpinBox must have integer minStep size.') else: ms = int(self.opts.get('minStep', 1)) if ms < 1: ms = 1 self.opts['minStep'] = ms if 'delay' in opts: self.proxy.setDelay(opts['delay']) self.updateText()
def setOpts(self, **opts)
Changes the behavior of the SpinBox. Accepts most of the arguments allowed in :func:`__init__ <pyqtgraph.SpinBox.__init__>`.
2.992662
2.879174
1.039417
if m is not None: m = D(asUnicode(m)) self.opts['bounds'][1] = m if update: self.setValue()
def setMaximum(self, m, update=True)
Set the maximum allowed value (or None for no limit)
9.141963
8.970525
1.019111
le = self.lineEdit() text = asUnicode(le.text()) if self.opts['suffix'] == '': le.setSelection(0, len(text)) else: try: index = text.index(' ') except ValueError: return le.setSelection(0, index)
def selectNumber(self)
Select the numerical portion of the text to allow quick editing by the user.
5.559425
4.816124
1.154336
if self.opts['int']: return int(self.val) else: return float(self.val)
def value(self)
Return the value of this SpinBox.
5.553987
4.442578
1.250172
if value is None: value = self.value() bounds = self.opts['bounds'] if bounds[0] is not None and value < bounds[0]: value = bounds[0] if bounds[1] is not None and value > bounds[1]: value = bounds[1] if self.opts['int']: value = int(value) value = D(asUnicode(value)) if value == self.val: return prev = self.val self.val = value if update: self.updateText(prev=prev) self.sigValueChanging.emit(self, float(self.val)) ## change will be emitted in 300ms if there are no subsequent changes. if not delaySignal: self.emitChanged() return value
def setValue(self, value=None, update=True, delaySignal=False)
Set the value of this spin. If the value is out of bounds, it will be clipped to the nearest boundary. If the spin is integer type, the value will be coerced to int. Returns the actual value set. If value is None, then the current value is used (this is for resetting the value after bounds, etc. have changed)
3.813915
3.671256
1.038858
strn = self.lineEdit().text() suf = self.opts['suffix'] if len(suf) > 0: if strn[-len(suf):] != suf: return False #raise Exception("Units are invalid.") strn = strn[:-len(suf)] try: val = fn.siEval(strn) except: #sys.excepthook(*sys.exc_info()) #print "invalid" return False #print val return val
def interpret(self)
Return value of text. Return False if text is invalid, raise exception if text is intermediate
5.605177
5.029209
1.114525
#print "Edit finished." if asUnicode(self.lineEdit().text()) == self.lastText: #print "no text change." return try: val = self.interpret() except: return if val is False: #print "value invalid:", str(self.lineEdit().text()) return if val == self.val: #print "no value change:", val, self.val return self.setValue(val, delaySignal=False)
def editingFinishedEvent(self)
Edit has finished; set value.
5.836359
5.47796
1.065426
field_names = cls._meta.get_all_field_names() fields = {} text = [] finalizer = None scaffold = scaffolding.scaffold_for_model(cls) for field_name in field_names: generator = getattr(scaffold, field_name, None) if generator: if hasattr(generator, 'set_up'): generator.set_up(cls, count) fields[field_name] = generator text.append(u'%s: %s; ' % (field_name, fields[field_name])) try: self.stdout.write(u'Generator for %s: %s\n' % (cls, u''.join(text))) except models.ObjectDoesNotExist: self.stdout.write(u'Generator for %s\n' % u''.join(text)) if hasattr(scaffold, 'finalize') and hasattr(scaffold.finalize, '__call__'): finalizer = scaffold.finalize return fields, finalizer
def make_factory(self, cls, count)
Get the generators from the Scaffolding class within the model.
3.171947
2.894125
1.095995
indel_len = pd.Series(index=fs_df.index) indel_len[fs_df['Reference_Allele']=='-'] = fs_df['Tumor_Allele'][fs_df['Reference_Allele']=='-'].str.len() indel_len[fs_df['Tumor_Allele']=='-'] = fs_df['Reference_Allele'][fs_df['Tumor_Allele']=='-'].str.len() indel_len = indel_len.fillna(0).astype(int) return indel_len
def compute_indel_length(fs_df)
Computes the indel length accounting for wether it is an insertion or deletion. Parameters ---------- fs_df : pd.DataFrame mutation input as dataframe only containing indel mutations Returns ------- indel_len : pd.Series length of indels
1.7697
1.952548
0.906354
# keep only frameshifts mut_df = mut_df[is_indel_annotation(mut_df)] if indel_len_col: # calculate length mut_df.loc[:, 'indel len'] = compute_indel_length(mut_df) if indel_type_col: is_ins = mut_df['Reference_Allele']=='-' is_del = mut_df['Tumor_Allele']=='-' mut_df['indel type'] = '' mut_df.loc[is_ins, 'indel type'] = 'INS' mut_df.loc[is_del, 'indel type'] = 'DEL' return mut_df
def keep_indels(mut_df, indel_len_col=True, indel_type_col=True)
Filters out all mutations that are not indels. Requires that one of the alleles have '-' indicating either an insertion or deletion depending if found in reference allele or somatic allele columns, respectively. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format indel_len_col : bool whether or not to add a column indicating the length of the indel Returns ------- mut_df : pd.DataFrame mutations with only frameshift mutations kept
2.6098
2.560558
1.019231
# keep only frameshifts mut_df = mut_df[is_frameshift_annotation(mut_df)] if indel_len_col: # calculate length mut_df.loc[:, 'indel len'] = compute_indel_length(mut_df) return mut_df
def keep_frameshifts(mut_df, indel_len_col=True)
Filters out all mutations that are not frameshift indels. Requires that one of the alleles have '-' indicating either an insertion or deletion depending if found in reference allele or somatic allele columns, respectively. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format indel_len_col : bool whether or not to add a column indicating the length of the frameshift Returns ------- mut_df : pd.DataFrame mutations with only frameshift mutations kept
3.564994
4.714593
0.756162
# calculate length, 0-based coordinates #indel_len = mut_df['End_Position'] - mut_df['Start_Position'] if 'indel len' in mut_df.columns: indel_len = mut_df['indel len'] else: indel_len = compute_indel_length(mut_df) # only non multiples of 3 are frameshifts is_fs = (indel_len%3)>0 # make sure no single base substitutions are counted is_indel = (mut_df['Reference_Allele']=='-') | (mut_df['Tumor_Allele']=='-') is_fs[~is_indel] = False return is_fs
def is_frameshift_len(mut_df)
Simply returns a series indicating whether each corresponding mutation is a frameshift. This is based on the length of the indel. Thus may be fooled by frameshifts at exon-intron boundaries or other odd cases. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format Returns ------- is_fs : pd.Series pandas series indicating if mutaitons are frameshifts
3.685383
3.749058
0.983016
fs_len = [] i = 1 tmp_bins = 0 while(tmp_bins<num_bins): if i%3: fs_len.append(i) tmp_bins += 1 i += 1 return fs_len
def get_frameshift_lengths(num_bins)
Simple function that returns the lengths for each frameshift category if `num_bins` number of frameshift categories are requested.
3.731237
3.789567
0.984608