_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q40400
ExpCM_empirical_phi._compute_empirical_phi
train
def _compute_empirical_phi(self, beta): """Returns empirical `phi` at the given value of `beta`. Does **not** set `phi` attribute, simply returns what should be value of `phi` given the current `g` and `pi_codon` attributes, plus the passed value of `beta`. Note that it uses the passed value of `beta`, **not** the current `beta` attribute. Initial guess is current value of `phi` attribute.""" def F(phishort): """Difference between `g` and expected `g` given `phishort`.""" phifull = scipy.append(phishort, 1 - phishort.sum()) phiprod = scipy.ones(N_CODON, dtype='float') for w in range(N_NT): phiprod *= phifull[w]**CODON_NT_COUNT[w] frx_phiprod = frx * phiprod frx_phiprod_codonsum = frx_phiprod.sum(axis=1) gexpect = [] for w in range(N_NT - 1): gexpect.append( ((CODON_NT_COUNT[w] * frx_phiprod).sum(axis=1) / frx_phiprod_codonsum).sum() / (3 * self.nsites)) gexpect = scipy.array(gexpect, dtype='float') return self.g[ : -1] - gexpect frx = self.pi_codon**beta with scipy.errstate(invalid='ignore'): result = scipy.optimize.root(F, self.phi[ : -1].copy(), tol=1e-8) assert result.success, "Failed: {0}".format(result) phishort = result.x return scipy.append(phishort, 1 - phishort.sum())
python
{ "resource": "" }
q40401
ExpCM_empirical_phi._update_dPrxy
train
def _update_dPrxy(self): """Update `dPrxy`, accounting for dependence of `phi` on `beta`.""" super(ExpCM_empirical_phi, self)._update_dPrxy() if 'beta' in self.freeparams: self.dQxy_dbeta = scipy.zeros((N_CODON, N_CODON), dtype='float') for w in range(N_NT): scipy.copyto(self.dQxy_dbeta, self.dphi_dbeta[w], where=CODON_NT_MUT[w]) self.dQxy_dbeta[CODON_TRANSITION] *= self.kappa self.dPrxy['beta'] += self.Frxy * self.dQxy_dbeta _fill_diagonals(self.dPrxy['beta'], self._diag_indices)
python
{ "resource": "" }
q40402
ExpCM_empirical_phi._update_dprx
train
def _update_dprx(self): """Update `dprx`, accounting for dependence of `phi` on `beta`.""" super(ExpCM_empirical_phi, self)._update_dprx() if 'beta' in self.freeparams: dphi_over_phi = scipy.zeros(N_CODON, dtype='float') for j in range(3): dphi_over_phi += (self.dphi_dbeta / self.phi)[CODON_NT_INDEX[j]] for r in range(self.nsites): self.dprx['beta'][r] += self.prx[r] * (dphi_over_phi - scipy.dot(dphi_over_phi, self.prx[r]))
python
{ "resource": "" }
q40403
ExpCM_empirical_phi_divpressure._update_dPrxy
train
def _update_dPrxy(self): """Update `dPrxy`, accounting for dependence of `Prxy` on `omega2`.""" super(ExpCM_empirical_phi_divpressure, self)._update_dPrxy() if 'omega2' in self.freeparams: with scipy.errstate(divide='raise', under='raise', over='raise', invalid='ignore'): scipy.copyto(self.dPrxy['omega2'], -self.ln_piAx_piAy_beta * self.Qxy * self.omega / (1 - self.piAx_piAy_beta), where=CODON_NONSYN) scipy.copyto(self.dPrxy['omega2'], self.Qxy * self.omega, where=scipy.logical_and(CODON_NONSYN, scipy.fabs(1 - self.piAx_piAy_beta) < ALMOST_ZERO)) for r in range(self.nsites): self.dPrxy['omega2'][r] *= self.deltar[r] _fill_diagonals(self.dPrxy['omega2'], self._diag_indices)
python
{ "resource": "" }
q40404
ExpCM_empirical_phi_divpressure._update_Frxy
train
def _update_Frxy(self): """Update `Frxy` from `piAx_piAy_beta`, `omega`, `omega2`, and `beta`.""" self.Frxy.fill(1.0) self.Frxy_no_omega.fill(1.0) with scipy.errstate(divide='raise', under='raise', over='raise', invalid='ignore'): scipy.copyto(self.Frxy_no_omega, -self.ln_piAx_piAy_beta / (1 - self.piAx_piAy_beta), where=scipy.logical_and( CODON_NONSYN, scipy.fabs(1 - self.piAx_piAy_beta) > ALMOST_ZERO)) for r in range(self.nsites): scipy.copyto(self.Frxy_no_omega[r], self.Frxy_no_omega[r] * (1 + self.omega2 * self.deltar[r]), where=CODON_NONSYN) scipy.copyto(self.Frxy, self.Frxy_no_omega * self.omega, where=CODON_NONSYN)
python
{ "resource": "" }
q40405
YNGKP_M0._calculate_correctedF3X4
train
def _calculate_correctedF3X4(self): '''Calculate `phi` based on the empirical `e_pw` values''' def F(phi): phi_reshape = phi.reshape((3, N_NT)) functionList = [] stop_frequency = [] for x in range(N_STOP): codonFrequency = STOP_CODON_TO_NT_INDICES[x] * phi_reshape codonFrequency = scipy.prod(codonFrequency.sum(axis=1)) stop_frequency.append(codonFrequency) C = scipy.sum(stop_frequency) for p in range(3): for w in range(N_NT): s = 0 for x in range(N_STOP): if STOP_CODON_TO_NT_INDICES[x][p][w] == 1: s += stop_frequency[x] functionList.append((phi_reshape[p][w] - s)/(1 - C) - self.e_pw[p][w]) return functionList phi = self.e_pw.copy().flatten() with scipy.errstate(invalid='ignore'): result = scipy.optimize.root(F, phi, tol=1e-8) assert result.success, "Failed: {0}".format(result) return result.x.reshape((3, N_NT))
python
{ "resource": "" }
q40406
YNGKP_M0._update_Pxy
train
def _update_Pxy(self): """Update `Pxy` using current `omega`, `kappa`, and `Phi_x`.""" scipy.copyto(self.Pxy_no_omega, self.Phi_x.transpose(), where=CODON_SINGLEMUT) self.Pxy_no_omega[0][CODON_TRANSITION] *= self.kappa self.Pxy = self.Pxy_no_omega.copy() self.Pxy[0][CODON_NONSYN] *= self.omega _fill_diagonals(self.Pxy, self._diag_indices)
python
{ "resource": "" }
q40407
YNGKP_M0._update_dPxy
train
def _update_dPxy(self): """Update `dPxy`.""" if 'kappa' in self.freeparams: scipy.copyto(self.dPxy['kappa'], self.Pxy / self.kappa, where=CODON_TRANSITION) _fill_diagonals(self.dPxy['kappa'], self._diag_indices) if 'omega' in self.freeparams: scipy.copyto(self.dPxy['omega'], self.Pxy_no_omega, where=CODON_NONSYN) _fill_diagonals(self.dPxy['omega'], self._diag_indices)
python
{ "resource": "" }
q40408
YNGKP_M0._update_Pxy_diag
train
def _update_Pxy_diag(self): """Update `D`, `A`, `Ainv` from `Pxy`, `Phi_x`.""" for r in range(1): Phi_x_half = self.Phi_x**0.5 Phi_x_neghalf = self.Phi_x**-0.5 #symm_p = scipy.dot(scipy.diag(Phi_x_half), scipy.dot(self.Pxy[r], scipy.diag(Phi_x_neghalf))) symm_p = (Phi_x_half * (self.Pxy[r] * Phi_x_neghalf).transpose()).transpose() #assert scipy.allclose(symm_p, symm_p.transpose()) (evals, evecs) = scipy.linalg.eigh(symm_p) #assert scipy.allclose(scipy.linalg.inv(evecs), evecs.transpose()) #assert scipy.allclose(symm_pr, scipy.dot(evecs, scipy.dot(scipy.diag(evals), evecs.transpose()))) self.D[r] = evals self.Ainv[r] = evecs.transpose() * Phi_x_half self.A[r] = (Phi_x_neghalf * evecs.transpose()).transpose()
python
{ "resource": "" }
q40409
GammaDistributedModel.dlogprior
train
def dlogprior(self, param): """Equal to value of `basemodel.dlogprior`.""" assert param in self.freeparams, "Invalid param: {0}".format(param) if param in self.distributionparams: return 0.0 else: return self._models[0].dlogprior(param)
python
{ "resource": "" }
q40410
step_impl04
train
def step_impl04(context): """Compare behavior of singleton vs. non-singleton. :param context: test context. """ single = context.singleStore general = context.generalStore key = 13 item = 42 assert single.request(key) == general.request(key) single.add_item(key, item) general.add_item(key, item) assert single.request(key) == general.request(key)
python
{ "resource": "" }
q40411
step_impl06
train
def step_impl06(context): """Prepare test for singleton property. :param context: test context. """ store = context.SingleStore context.st_1 = store() context.st_2 = store() context.st_3 = store()
python
{ "resource": "" }
q40412
step_impl07
train
def step_impl07(context): """Test for singleton property. :param context: test context. """ assert context.st_1 is context.st_2 assert context.st_2 is context.st_3
python
{ "resource": "" }
q40413
Dbf.open
train
def open(cls, dbfile, encoding=None, fieldnames_lower=True, case_sensitive=True): """Context manager. Allows opening a .dbf file. .. code-block:: with Dbf.open('some.dbf') as dbf: ... :param str|unicode|file dbfile: .dbf filepath or a file-like object. :param str|unicode encoding: Encoding used by DB. This will be used if there's no encoding information in the DB itself. :param bool fieldnames_lower: Lowercase field names. :param bool case_sensitive: Whether DB filename is case sensitive. :rtype: Dbf """ if not case_sensitive: if isinstance(dbfile, string_types): dbfile = pick_name(dbfile, listdir(path.dirname(dbfile))) with open(dbfile, 'rb') as f: yield cls(f, encoding=encoding, fieldnames_lower=fieldnames_lower)
python
{ "resource": "" }
q40414
Dbf.open_zip
train
def open_zip(cls, dbname, zipped, encoding=None, fieldnames_lower=True, case_sensitive=True): """Context manager. Allows opening a .dbf file from zip archive. .. code-block:: with Dbf.open_zip('some.dbf', 'myarch.zip') as dbf: ... :param str|unicode dbname: .dbf file name :param str|unicode|file zipped: .zip file path or a file-like object. :param str|unicode encoding: Encoding used by DB. This will be used if there's no encoding information in the DB itself. :param bool fieldnames_lower: Lowercase field names. :param bool case_sensitive: Whether DB filename is case sensitive. :rtype: Dbf """ with ZipFile(zipped, 'r') as zip_: if not case_sensitive: dbname = pick_name(dbname, zip_.namelist()) with zip_.open(dbname) as f: yield cls(f, encoding=encoding, fieldnames_lower=fieldnames_lower)
python
{ "resource": "" }
q40415
Dbf.iter_rows
train
def iter_rows(self): """Generator reading .dbf row one by one. Yields named tuple Row object. :rtype: Row """ fileobj = self._fileobj cls_row = self.cls_row fields = self.fields for idx in range(self.prolog.records_count): data = fileobj.read(1) marker = struct.unpack('<1s', data)[0] is_deleted = marker == b'*' if is_deleted: continue row_values = [] for field in fields: val = field.cast(fileobj.read(field.len)) row_values.append(val) yield cls_row(*row_values)
python
{ "resource": "" }
q40416
FigColorbar.setup_colorbars
train
def setup_colorbars(self, plot_call_sign): """Setup colorbars for each type of plot. Take all of the optional performed during ``__init__`` method and makes the colorbar. Args: plot_call_sign (obj): Plot instance of ax.contourf with colormapping to add as a colorbar. """ self.fig.colorbar(plot_call_sign, cax=self.cbar_ax, ticks=self.cbar_ticks, orientation=self.cbar_orientation) # setup colorbar ticks (getattr(self.cbar_ax, 'set_' + self.cbar_var + 'ticklabels') (self.cbar_tick_labels, fontsize=self.cbar_ticks_fontsize)) (getattr(self.cbar_ax, 'set_' + self.cbar_var + 'label') (self.cbar_label, fontsize=self.cbar_label_fontsize, labelpad=self.cbar_label_pad)) return
python
{ "resource": "" }
q40417
setup_environ
train
def setup_environ(manage_file, settings=None, more_pythonic=False): """Sets up a Django app within a manage.py file. Keyword Arguments **settings** An imported settings module. Without this, playdoh tries to import these modules (in order): DJANGO_SETTINGS_MODULE, settings **more_pythonic** When True, does not do any path hackery besides adding the vendor dirs. This requires a newer Playdoh layout without top level apps, lib, etc. """ # sys is global to avoid undefined local global sys, current_settings, execute_from_command_line, ROOT ROOT = os.path.dirname(os.path.abspath(manage_file)) # Adjust the python path and put local packages in front. prev_sys_path = list(sys.path) # Make root application importable without the need for # python setup.py install|develop sys.path.append(ROOT) if not more_pythonic: warnings.warn("You're using an old-style Playdoh layout with a top " "level __init__.py and apps directories. This is error " "prone and fights the Zen of Python. " "See http://playdoh.readthedocs.org/en/latest/" "getting-started/upgrading.html") # Give precedence to your app's parent dir, which contains __init__.py sys.path.append(os.path.abspath(os.path.join(ROOT, os.pardir))) site.addsitedir(path('apps')) site.addsitedir(path('lib')) # Local (project) vendor library site.addsitedir(path('vendor-local')) site.addsitedir(path('vendor-local/lib/python')) # Global (upstream) vendor library site.addsitedir(path('vendor')) site.addsitedir(path('vendor/lib/python')) # Move the new items to the front of sys.path. (via virtualenv) new_sys_path = [] for item in list(sys.path): if item not in prev_sys_path: new_sys_path.append(item) sys.path.remove(item) sys.path[:0] = new_sys_path from django.core.management import execute_from_command_line # noqa if not settings: if 'DJANGO_SETTINGS_MODULE' in os.environ: settings = import_mod_by_name(os.environ['DJANGO_SETTINGS_MODULE']) elif os.path.isfile(os.path.join(ROOT, 'settings_local.py')): import settings_local as settings warnings.warn("Using settings_local.py is deprecated. See " "http://playdoh.readthedocs.org/en/latest/upgrading.html", DeprecationWarning) else: import settings current_settings = settings validate_settings(settings)
python
{ "resource": "" }
q40418
validate_settings
train
def validate_settings(settings): """ Raise an error in prod if we see any insecure settings. This used to warn during development but that was changed in 71718bec324c2561da6cc3990c927ee87362f0f7 """ from django.core.exceptions import ImproperlyConfigured if settings.SECRET_KEY == '': msg = 'settings.SECRET_KEY cannot be blank! Check your local settings' if not settings.DEBUG: raise ImproperlyConfigured(msg) if getattr(settings, 'SESSION_COOKIE_SECURE', None) is None: msg = ('settings.SESSION_COOKIE_SECURE should be set to True; ' 'otherwise, your session ids can be intercepted over HTTP!') if not settings.DEBUG: raise ImproperlyConfigured(msg) hmac = getattr(settings, 'HMAC_KEYS', {}) if not len(hmac.keys()): msg = 'settings.HMAC_KEYS cannot be empty! Check your local settings' if not settings.DEBUG: raise ImproperlyConfigured(msg)
python
{ "resource": "" }
q40419
incremental_neighbor_graph
train
def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None, weighting='none'): '''See neighbor_graph.''' assert ((k is not None) or (epsilon is not None) ), "Must provide `k` or `epsilon`" assert (_issequence(k) ^ _issequence(epsilon) ), "Exactly one of `k` or `epsilon` must be a sequence." assert weighting in ('binary','none'), "Invalid weighting param: " + weighting is_weighted = weighting == 'none' if precomputed: D = X else: D = pairwise_distances(X, metric='euclidean') # pre-sort for efficiency order = np.argsort(D)[:,1:] if k is None: k = D.shape[0] # generate the sequence of graphs # TODO: convert the core of these loops to Cython for speed W = np.zeros_like(D) I = np.arange(D.shape[0]) if _issequence(k): # varied k, fixed epsilon if epsilon is not None: D[D > epsilon] = 0 old_k = 0 for new_k in k: idx = order[:, old_k:new_k] dist = D[I, idx.T] W[I, idx.T] = dist if is_weighted else 1 yield Graph.from_adj_matrix(W) old_k = new_k else: # varied epsilon, fixed k idx = order[:,:k] dist = D[I, idx.T].T old_i = np.zeros(D.shape[0], dtype=int) for eps in epsilon: for i, row in enumerate(dist): oi = old_i[i] ni = oi + np.searchsorted(row[oi:], eps) rr = row[oi:ni] W[i, idx[i,oi:ni]] = rr if is_weighted else 1 old_i[i] = ni yield Graph.from_adj_matrix(W)
python
{ "resource": "" }
q40420
Versions
train
def Versions(): """Returns a string with version information. You would call this function if you want a string giving detailed information on the version of ``phydms`` and the associated packages that it uses. """ s = [\ 'Version information:', '\tTime and date: %s' % time.asctime(), '\tPlatform: %s' % platform.platform(), '\tPython version: %s' % sys.version.replace('\n', ' '), '\tphydms version: %s' % phydmslib.__version__, ] for modname in ['Bio', 'cython', 'numpy', 'scipy', 'matplotlib', 'natsort', 'sympy', 'six', 'pandas', 'pyvolve', 'statsmodels', 'weblogolib', 'PyPDF2']: try: v = importlib.import_module(modname).__version__ s.append('\t%s version: %s' % (modname, v)) except ImportError: s.append('\t%s cannot be imported into Python' % modname) return '\n'.join(s)
python
{ "resource": "" }
q40421
readDivPressure
train
def readDivPressure(fileName): """Reads in diversifying pressures from some file. Scale diversifying pressure values so absolute value of the max value is 1, unless all values are zero. Args: `fileName` (string or readable file-like object) File holding diversifying pressure values. Can be comma-, space-, or tab-separated file. The first column is the site (consecutively numbered, sites starting with one) and the second column is the diversifying pressure values. Returns: `divPressure` (dict keyed by ints) `divPressure[r][v]` is the diversifying pressure value of site `r`. """ try: df = pandas.read_csv(fileName, sep=None, engine='python') pandasformat = True except ValueError: pandasformat = False df.columns = ['site', 'divPressureValue'] scaleFactor = max(df["divPressureValue"].abs()) if scaleFactor > 0: df["divPressureValue"] = [x / scaleFactor for x in df["divPressureValue"]] assert len(df['site'].tolist()) == len(set(df['site'].tolist())),"There is at least one non-unique site in {0}".format(fileName) assert max(df["divPressureValue"].abs()) <= 1, "The scaling produced a diversifying pressure value with an absolute value greater than one." sites = df['site'].tolist() divPressure = {} for r in sites: divPressure[r] = df[df['site'] == r]["divPressureValue"].tolist()[0] return divPressure
python
{ "resource": "" }
q40422
load_configuration
train
def load_configuration(conf_path): """Load and validate test configuration. :param conf_path: path to YAML configuration file. :return: configuration as dict. """ with open(conf_path) as f: conf_dict = yaml.load(f) validate_config(conf_dict) return conf_dict
python
{ "resource": "" }
q40423
main
train
def main(): """Read configuration and execute test runs.""" parser = argparse.ArgumentParser(description='Stress test applications.') parser.add_argument('config_path', help='Path to configuration file.') args = parser.parse_args() try: configuration = load_configuration(args.config_path) except InvalidConfigurationError: print("\nConfiguration is not valid.") print('Example:\n{}'.format(help_configuration)) return 1 print("Starting up ...") futures = [] with ProcessPoolExecutor(configuration[PROCESSORS]) as executor: for _ in range(configuration[PROCESSES]): futures.append(executor.submit(execute_test, configuration)) print("... finished") test_stats = combine_test_stats([f.result() for f in futures]) show_test_stats(test_stats) return 0
python
{ "resource": "" }
q40424
ctox
train
def ctox(arguments, toxinidir): """Sets up conda environments, and sets up and runs each environment based on the project's tox.ini configuration file. Returns 1 if either the build or running the commands failed or 0 if all commmands ran successfully. """ if arguments is None: arguments = [] if toxinidir is None: toxinidir = os.getcwd() args, options = parse_args(arguments) if args.version: print(version) return 0 # if no conda trigger OSError try: with open(os.devnull, "w") as fnull: check_output(['conda', '--version'], stderr=fnull) except OSError: cprint("conda not found, you need to install it to use ctox.\n" "The recommended way is to download miniconda,\n" "Do not install conda via pip.", 'err') return 1 toxinifile = os.path.join(toxinidir, "tox.ini") from ctox.config import read_config, get_envlist config = read_config(toxinifile) if args.e == 'ALL': envlist = get_envlist(config) else: envlist = args.e.split(',') # TODO configure with option toxdir = os.path.join(toxinidir, ".tox") # create a zip file for the project from ctox.pkg import make_dist, package_name cprint("GLOB sdist-make: %s" % os.path.join(toxinidir, "setup.py")) package = package_name(toxinidir) if not make_dist(toxinidir, toxdir, package): cprint(" setup.py sdist failed", 'err') return 1 # setup each environment and run ctox failing = {} for env_name in envlist: env = Env(name=env_name, config=config, options=options, toxdir=toxdir, toxinidir=toxinidir, package=package) failing[env_name] = env.ctox() # print summary of the outcomes of ctox for each environment cprint('Summary') print("-" * 23) for env_name in envlist: n = failing[env_name] outcome = ('succeeded', 'failed', 'skipped')[n] status = ('ok', 'err', 'warn')[n] cprint("%s commands %s" % (env_name, outcome), status) return any(1 == v for v in failing.values())
python
{ "resource": "" }
q40425
positional_args
train
def positional_args(arguments): """"Generator for position arguments. Example ------- >>> list(positional_args(["arg1", "arg2", "--kwarg"])) ["arg1", "arg2"] >>> list(positional_args(["--", "arg1", "--kwarg"])) ["arg1", "kwarg"] """ # TODO this behaviour probably isn't quite right. if arguments and arguments[0] == '--': for a in arguments[1:]: yield a else: for a in arguments: if a.startswith('-'): break yield a
python
{ "resource": "" }
q40426
Env.ctox
train
def ctox(self): """Main method for the environment. Parse the tox.ini config, install the dependancies and run the commands. The output of the commands is printed. Returns 0 if they ran successfully, 1 if there was an error (either in setup or whilst running the commands), 2 if the build was skipped. """ # TODO make this less of a hack e.g. using basepython from config # if it exists (and use an attribute directly). if self.name[:4] not in SUPPORTED_ENVS: from colorama import Style cprint(Style.BRIGHT + "Skipping unsupported python version %s\n" % self.name, 'warn') return 2 # TODO don't remove env if there's a dependancy mis-match # rather "clean" it to the empty state (the hope being to keep # the dist build around - so not all files need to be rebuilt) # TODO extract this as a method (for readability) if not self.env_exists() or self.reusableable(): cprint("%s create: %s" % (self.name, self.envdir)) self.create_env(force_remove=True) cprint("%s installdeps: %s" % (self.name, ', '.join(self.deps))) if not self.install_deps(): cprint(" deps installation failed, aborted.\n", 'err') return 1 else: cprint("%s cached (deps unchanged): %s" % (self.name, self.envdir)) # install the project from the zipped file # TODO think more carefully about where it should be installed # specifically we want to be able this to include the test files (which # are not always unpacked when installed so as to run the tests there) # if there are build files (e.g. cython) then tests must run where # the build was. Also, reinstalling should not overwrite the builds # e.g. setup.py will skip rebuilding cython files if they are unchanged cprint("%s inst: %s" % (self.name, self.envdistdir)) if not self.install_dist(): cprint(" install failed.\n", 'err') return 1 cprint("%s runtests" % self.name) # return False if all commands were successfully run # otherwise returns True if at least one command exited badly return self.run_commands()
python
{ "resource": "" }
q40427
Tokenizer.is_blankspace
train
def is_blankspace(self, char): """ Test if a character is a blankspace. Parameters ---------- char : str The character to test. Returns ------- ret : bool True if character is a blankspace, False otherwise. """ if len(char) > 1: raise TypeError("Expected a char.") if char in self.blankspaces: return True return False
python
{ "resource": "" }
q40428
Tokenizer.is_separator
train
def is_separator(self, char): """ Test if a character is a separator. Parameters ---------- char : str The character to test. Returns ------- ret : bool True if character is a separator, False otherwise. """ if len(char) > 1: raise TypeError("Expected a char.") if char in self.separators: return True return False
python
{ "resource": "" }
q40429
jitChol
train
def jitChol(A, maxTries=10, warning=True): """Do a Cholesky decomposition with jitter. Description: U, jitter = jitChol(A, maxTries, warning) attempts a Cholesky decomposition on the given matrix, if matrix isn't positive definite the function adds 'jitter' and tries again. Thereafter the amount of jitter is multiplied by 10 each time it is added again. This is continued for a maximum of 10 times. The amount of jitter added is returned. Returns: U - the Cholesky decomposition for the matrix. jitter - the amount of jitter that was added to the matrix. Arguments: A - the matrix for which the Cholesky decomposition is required. maxTries - the maximum number of times that jitter is added before giving up (default 10). warning - whether to give a warning for adding jitter (default is True) See also CHOL, PDINV, LOGDET Copyright (c) 2005, 2006 Neil D. Lawrence """ jitter = 0 i = 0 while(True): try: # Try --- need to check A is positive definite if jitter == 0: jitter = abs(SP.trace(A))/A.shape[0]*1e-6 LC = linalg.cholesky(A, lower=True) return LC.T, 0.0 else: if warning: # pdb.set_trace() # plt.figure() # plt.imshow(A, interpolation="nearest") # plt.colorbar() # plt.show() logging.error("Adding jitter of %f in jitChol()." % jitter) LC = linalg.cholesky(A+jitter*SP.eye(A.shape[0]), lower=True) return LC.T, jitter except linalg.LinAlgError: # Seems to have been non-positive definite. if i<maxTries: jitter = jitter*10 else: raise linalg.LinAlgError("Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials.") i += 1 return LC
python
{ "resource": "" }
q40430
jitEigh
train
def jitEigh(A,maxTries=10,warning=True): """ Do a Eigenvalue Decomposition with Jitter, works as jitChol """ warning = True jitter = 0 i = 0 while(True): if jitter == 0: jitter = abs(SP.trace(A))/A.shape[0]*1e-6 S,U = linalg.eigh(A) else: if warning: # pdb.set_trace() # plt.figure() # plt.imshow(A, interpolation="nearest") # plt.colorbar() # plt.show() logging.error("Adding jitter of %f in jitEigh()." % jitter) S,U = linalg.eigh(A+jitter*SP.eye(A.shape[0])) if S.min()>1E-10: return S,U if i<maxTries: jitter = jitter*10 i += 1 raise linalg.LinAlgError("Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials.")
python
{ "resource": "" }
q40431
modelComparisonDataFrame
train
def modelComparisonDataFrame(modelcomparisonfile, splitparams): """Converts ``modelcomparison.md`` file to `pandas` DataFrame. Running ``phydms_comprehensive`` creates a file with the suffix ``modelcomparison.md``. This function converts that file into a DataFrame that is easy to handle for downstream analysis. Args: `modelcomparisonfile` (str) The name of the ``modelcomparison.md`` file. `splitparams` (bool) If `True`, create a new column for each model param in the `ParamValues` column, with values of `NaN` if that model does not have such a parameter. Returns: A `pandas` DataFrame with the information in the model comparison file. >>> with tempfile.NamedTemporaryFile(mode='w') as f: ... _ = f.write('\\n'.join([ ... '| Model | deltaAIC | LogLikelihood | nParams | ParamValues |', ... '|-------|----------|---------------|---------|--------------|', ... '| ExpCM | 0.00 | -1000.00 | 7 | x=1.0, y=2.0 |', ... '| YNGKP | 10.2 | -1005.10 | 7 | x=1.3, z=0.1 |', ... ])) ... f.flush() ... df_split = modelComparisonDataFrame(f.name, splitparams=True) ... df_nosplit = modelComparisonDataFrame(f.name, splitparams=False) >>> df_nosplit.equals(pandas.DataFrame.from_records( ... [['ExpCM', 0, -1000, 7, 'x=1.0, y=2.0'], ... ['YNGKP', 10.2, -1005.1, 7, 'x=1.3, z=0.1']], ... columns=['Model', 'deltaAIC', 'LogLikelihood', ... 'nParams', 'ParamValues'])) True >>> df_split.equals(pandas.DataFrame.from_records( ... [['ExpCM', 0, -1000, 7, 1.0, 2.0, numpy.nan], ... ['YNGKP', 10.2, -1005.1, 7, 1.3, numpy.nan, 0.1]], ... columns=['Model', 'deltaAIC', 'LogLikelihood', ... 'nParams', 'x', 'y', 'z'])) True """ df = (pandas.read_csv(modelcomparisonfile, sep='|', skiprows=[1]) .select(lambda x: 'Unnamed' not in x, axis=1) ) # strip whitespace df.columns = df.columns.str.strip() for col in df.columns: if pandas.api.types.is_string_dtype(df[col]): df[col] = df[col].str.strip() paramsdict = {} if splitparams: for (i, paramstr) in df['ParamValues'].iteritems(): paramsdict[i] = dict(map(lambda tup: (tup[0], float(tup[1])), [param.strip().split('=') for param in paramstr.split(',')])) params_df = pandas.DataFrame.from_dict(paramsdict, orient='index') params_df = params_df[sorted(params_df.columns)] df = (df.join(params_df) .drop('ParamValues', axis=1) ) return df
python
{ "resource": "" }
q40432
BenjaminiHochbergCorrection
train
def BenjaminiHochbergCorrection(pvals, fdr): """Benjamini-Hochberg procedure to control false discovery rate. Calling arguments: *pvals* : a list of tuples of *(label, p)* where *label* is some label assigned to each data point, and *p* is the corresponding *P-value*. *fdr* : the desired false discovery rate The return value is the 2-tuple *(pcutoff, significantlabels)*. After applying the algorithm, all data points with *p <= pcutoff* are declared significant. The labels for these data points are in *significantlabels*. If there are no significant sites, *pcutoff* is returned as the maximum P-value that would have made a single point significant. """ num_tests = len(pvals) # sort by p-value sorted_tests = sorted(pvals, key=lambda tup: tup[1]) # find maximum rank for which p <= (rank/num_tests)*FDR max_rank = 0 pcutoff = None for (rank, (label, p)) in enumerate(sorted_tests): rank = rank + 1 # rank beginning with 1 for smallest p-value (there is no rank 0) bh_threshold = fdr * float(rank) / num_tests if p <= bh_threshold: assert rank > max_rank max_rank = rank pcutoff = bh_threshold # pcutoff to have one significant site if there are none if pcutoff == None: pcutoff = 1.0 / num_tests * fdr # collect significant ranks: significantlabels = [] for (rank, (label, p)) in enumerate(sorted_tests): rank = rank + 1 # rank beginning with 1 for site with smallest p-vaalue if rank <= max_rank: assert p <= pcutoff significantlabels.append(label) return (pcutoff, significantlabels)
python
{ "resource": "" }
q40433
param_dict_to_list
train
def param_dict_to_list(dict,skeys=None): """convert from param dictionary to list""" #sort keys RV = SP.concatenate([dict[key].flatten() for key in skeys]) return RV pass
python
{ "resource": "" }
q40434
checkgrad
train
def checkgrad(f, fprime, x, *args,**kw_args): """ Analytical gradient calculation using a 3-point method """ LG.debug("Checking gradient ...") import numpy as np # using machine precision to choose h eps = np.finfo(float).eps step = np.sqrt(eps)*(x.min()) # shake things up a bit by taking random steps for each x dimension h = step*np.sign(np.random.uniform(-1, 1, x.size)) f_ph = f(x+h, *args, **kw_args) f_mh = f(x-h, *args, **kw_args) numerical_gradient = (f_ph - f_mh)/(2*h) analytical_gradient = fprime(x, *args, **kw_args) ratio = (f_ph - f_mh)/(2*np.dot(h, analytical_gradient)) h = np.zeros_like(x) for i in range(len(x)): pdb.set_trace() h[i] = step f_ph = f(x+h, *args, **kw_args) f_mh = f(x-h, *args, **kw_args) numerical_gradient = (f_ph - f_mh)/(2*step) analytical_gradient = fprime(x, *args, **kw_args)[i] ratio = (f_ph - f_mh)/(2*step*analytical_gradient) h[i] = 0 LG.debug("[%d] numerical: %f, analytical: %f, ratio: %f" % (i, numerical_gradient,analytical_gradient,ratio))
python
{ "resource": "" }
q40435
Atom.chival
train
def chival(self, bonds): """compute the chiral value around an atom given a list of bonds""" # XXX I'm not sure how this works? order = [bond.xatom(self) for bond in bonds] return self._chirality(order)
python
{ "resource": "" }
q40436
Atom.setchival
train
def setchival(self, bondorder, rotation): """compute chiral ordering of surrounding atoms""" rotation = [None, "@", "@@"][(rotation % 2)] # check to see if the bonds are attached if not bondorder: # use the default xatoms if len(self.oatoms) < 3 and self.explicit_hcount != 1: raise PinkyError("Need to have an explicit hydrogen when specifying "\ "chirality with less than three bonds") self._chirality = chirality.T(self.oatoms, rotation) return if len(bondorder) != len(self.bonds): raise AtomError("The order of all bonds must be specified") for bond in bondorder: if bond not in self.bonds: raise AtomError("Specified bonds to assign chirality are not attatched to atom") order = [bond.xatom(self) for bond in bonds] self._chirality = chirality.T(order, rotation)
python
{ "resource": "" }
q40437
FreedDisambiguate.disambiguate
train
def disambiguate(self, symclasses): """Use the connection to the atoms around a given vertex as a multiplication function to disambiguate a vertex""" offsets = self.offsets result = symclasses[:] for index in self.range: try: val = 1 for offset, bondtype in offsets[index]: val *= symclasses[offset] * bondtype except OverflowError: # Hmm, how often does this occur? val = 1L for offset, bondtype in offsets[index]: val *= symclasses[offset] * bondtype result[index] = val return result
python
{ "resource": "" }
q40438
FreedDisambiguate.breakRankTies
train
def breakRankTies(self, oldsym, newsym): """break Ties to form a new list with the same integer ordering from high to low Example old = [ 4, 2, 4, 7, 8] (Two ties, 4 and 4) new = [60, 2 61,90,99] res = [ 4, 0, 3, 1, 2] * * This tie is broken in this case """ stableSort = map(None, oldsym, newsym, range(len(oldsym))) stableSort.sort() lastOld, lastNew = None, None x = -1 for old, new, index in stableSort: if old != lastOld: x += 1 # the last old value was changed, so update both lastOld = old lastNew = new elif new != lastNew: # break the tie based on the new info (update lastNew) x += 1 lastNew = new newsym[index] = x
python
{ "resource": "" }
q40439
FreedDisambiguate.findLowest
train
def findLowest(self, symorders): """Find the position of the first lowest tie in a symorder or -1 if there are no ties""" _range = range(len(symorders)) stableSymorders = map(None, symorders, _range) # XXX FIX ME # Do I need to sort? stableSymorders.sort() lowest = None for index in _range: if stableSymorders[index][0] == lowest: return stableSymorders[index-1][1] lowest = stableSymorders[index][0] return -1
python
{ "resource": "" }
q40440
FreedDisambiguate.findInvariantPartitioning
train
def findInvariantPartitioning(self): """Keep the initial ordering of the symmetry orders but make all values unique. For example, if there are two symmetry orders equal to 0, convert them to 0 and 1 and add 1 to the remaining orders [0, 1, 0, 1] should become [0, 2, 1, 3]""" symorders = self.symorders[:] _range = range(len(symorders)) while 1: pos = self.findLowest(symorders) if pos == -1: self.symorders = symorders return for i in _range: symorders[i] = symorders[i] * 2 + 1 symorders[pos] = symorders[pos] - 1 symorders = self.findInvariant(symorders)
python
{ "resource": "" }
q40441
LowRankCov.setCovariance
train
def setCovariance(self, cov): """ makes lowrank approximation of cov """ assert cov.shape[0]==self.dim, 'Dimension mismatch.' S, U = la.eigh(cov) U = U[:,::-1] S = S[::-1] _X = U[:, :self.rank] * sp.sqrt(S[:self.rank]) self.X = _X
python
{ "resource": "" }
q40442
mountain_car_trajectories
train
def mountain_car_trajectories(num_traj): '''Collect data using random hard-coded policies on MountainCar. num_traj : int, number of trajectories to collect Returns (trajectories, traces) ''' domain = MountainCar() slopes = np.random.normal(0, 0.01, size=num_traj) v0s = np.random.normal(0, 0.005, size=num_traj) trajectories = [] traces = [] norm = np.array((domain.MAX_POS-domain.MIN_POS, domain.MAX_VEL-domain.MIN_VEL)) for m,b in zip(slopes, v0s): mcar_policy = lambda s: 0 if s[0]*m + s[1] + b > 0 else 2 start = (np.random.uniform(domain.MIN_POS,domain.MAX_POS), np.random.uniform(domain.MIN_VEL,domain.MAX_VEL)) samples = _run_episode(mcar_policy, domain, start, max_iters=40) # normalize samples.state /= norm samples.next_state /= norm traces.append(samples) if samples.reward[-1] == 0: # Don't include the warp to the final state. trajectories.append(samples.state[:-1]) else: trajectories.append(samples.state) return trajectories, traces
python
{ "resource": "" }
q40443
before_all
train
def before_all(context): """Setup before all tests. Initialize the logger framework. :param context: test context. """ lf = LoggerFactory(config_file='../features/resources/test_config.yaml') lf.initialize() ll = lf.get_instance('environment') ll.info('Logger initialized: {}'.format(lf.config)) ll.info('Initial test context: {}'.format(context))
python
{ "resource": "" }
q40444
shell_escape
train
def shell_escape(text, _safe=re.compile(r"^[-._,+a-zA-Z0-9]+$")): """Escape given string according to shell rules.""" if not text or _safe.match(text): return text squote = type(text)("'") return squote + text.replace(squote, type(text)(r"'\''")) + squote
python
{ "resource": "" }
q40445
get_pattern_mat
train
def get_pattern_mat(oracle, pattern): """Output a matrix containing patterns in rows from a vmo. :param oracle: input vmo object :param pattern: pattern extracted from oracle :return: a numpy matrix that could be used to visualize the pattern extracted. """ pattern_mat = np.zeros((len(pattern), oracle.n_states-1)) for i,p in enumerate(pattern): length = p[1] for s in p[0]: pattern_mat[i][s-length:s-1] = 1 return pattern_mat
python
{ "resource": "" }
q40446
cache_image_data
train
def cache_image_data(cache_dir, cache_key, uploader, *args, **kwargs): """ Call uploader and cache its results. """ use_cache = True if "use_cache" in kwargs: use_cache = kwargs["use_cache"] del kwargs["use_cache"] json_path = None if cache_dir: json_path = os.path.join(cache_dir, "cached-img-%s.json" % cache_key) if use_cache and os.path.exists(json_path): LOG.info("Fetching %r from cache..." % (args,)) try: with closing(open(json_path, "r")) as handle: img_data = json.load(handle) return parts.Bunch([(key, parts.Bunch(val)) for key, val in img_data.items() # BOGUS pylint: disable=E1103 ]) except (EnvironmentError, TypeError, ValueError) as exc: LOG.warn("Problem reading cached data from '%s', ignoring cache... (%s)" % (json_path, exc)) LOG.info("Copying %r..." % (args,)) img_data = uploader(*args, **kwargs) if json_path: with closing(open(json_path, "w")) as handle: json.dump(img_data, handle) return img_data
python
{ "resource": "" }
q40447
copy_image_from_url
train
def copy_image_from_url(url, cache_dir=None, use_cache=True): """ Copy image from given URL and return upload metadata. """ return cache_image_data(cache_dir, hashlib.sha1(url).hexdigest(), ImgurUploader().upload, url, use_cache=use_cache)
python
{ "resource": "" }
q40448
_parse_fmt
train
def _parse_fmt(fmt, color_key='colors', ls_key='linestyles', marker_key='marker'): '''Modified from matplotlib's _process_plot_format function.''' try: # Is fmt just a colorspec? color = mcolors.colorConverter.to_rgb(fmt) except ValueError: pass # No, not just a color. else: # Either a color or a numeric marker style if fmt not in mlines.lineMarkers: return {color_key:color} result = dict() # handle the multi char special cases and strip them from the string if fmt.find('--') >= 0: result[ls_key] = '--' fmt = fmt.replace('--', '') if fmt.find('-.') >= 0: result[ls_key] = '-.' fmt = fmt.replace('-.', '') if fmt.find(' ') >= 0: result[ls_key] = 'None' fmt = fmt.replace(' ', '') for c in list(fmt): if c in mlines.lineStyles: if ls_key in result: raise ValueError('Illegal format string; two linestyle symbols') result[ls_key] = c elif c in mlines.lineMarkers: if marker_key in result: raise ValueError('Illegal format string; two marker symbols') result[marker_key] = c elif c in mcolors.colorConverter.colors: if color_key in result: raise ValueError('Illegal format string; two color symbols') result[color_key] = c else: raise ValueError('Unrecognized character %c in format string' % c) return result
python
{ "resource": "" }
q40449
VizMixin.plot
train
def plot(self, coordinates, directed=False, weighted=False, fig='current', ax=None, edge_style=None, vertex_style=None, title=None, cmap=None): '''Plot the graph using matplotlib in 2 or 3 dimensions. coordinates : (n,2) or (n,3) array of vertex coordinates directed : if True, edges have arrows indicating direction. weighted : if True, edges are colored by their weight. fig : a matplotlib Figure to use, or one of {'new','current'}. Defaults to 'current', which will call gcf(). Only used when ax=None. ax : a matplotlib Axes to use. Defaults to gca() edge_style : string or dict of styles for edges. Defaults to 'k-' vertex_style : string or dict of styles for vertices. Defaults to 'ko' title : string to display as the plot title cmap : a matplotlib Colormap to use for edge weight coloring ''' X = np.atleast_2d(coordinates) assert 0 < X.shape[1] <= 3, 'too many dimensions to plot' if X.shape[1] == 1: X = np.column_stack((np.arange(X.shape[0]), X)) is_3d = (X.shape[1] == 3) if ax is None: ax = _get_axis(is_3d, fig) edge_kwargs = dict(colors='k', linestyles='-', linewidths=1, zorder=1) vertex_kwargs = dict(marker='o', c='k', s=20, edgecolor='none', zorder=2) if edge_style is not None: if not isinstance(edge_style, dict): edge_style = _parse_fmt(edge_style, color_key='colors') edge_kwargs.update(edge_style) if vertex_style is not None: if not isinstance(vertex_style, dict): vertex_style = _parse_fmt(vertex_style, color_key='c') vertex_kwargs.update(vertex_style) if weighted and self.is_weighted(): edge_kwargs['array'] = self.edge_weights() if directed and self.is_directed(): _directed_edges(self, X, ax, is_3d, edge_kwargs, cmap) else: _undirected_edges(self, X, ax, is_3d, edge_kwargs, cmap) ax.scatter(*X.T, **vertex_kwargs) ax.autoscale_view() if title: ax.set_title(title) return pyplot.show
python
{ "resource": "" }
q40450
normalize_mapping_line
train
def normalize_mapping_line(mapping_line, previous_source_column=0): """ Often times the position will remain stable, such that the naive process will end up with many redundant values; this function will iterate through the line and remove all extra values. """ if not mapping_line: return [], previous_source_column # Note that while the local record here is also done as a 4-tuple, # element 1 and 2 are never used since they are always provided by # the segments in the mapping line; they are defined for consistency # reasons. def regenerate(segment): if len(segment) == 5: result = (record[0], segment[1], segment[2], record[3], segment[4]) else: result = (record[0], segment[1], segment[2], record[3]) # Ideally the exact location should still be kept, but given # that the sourcemap format is accumulative and permits a lot # of inferred positions, resetting all values to 0 is intended. record[:] = [0, 0, 0, 0] return result # first element of the line; sink column (0th element) is always # the absolute value, so always use the provided value sourced from # the original mapping_line; the source column (3rd element) is # never reset, so if a previous counter exists (which is specified # by the optional argument), make use of it to generate the initial # normalized segment. record = [0, 0, 0, previous_source_column] result = [] regen_next = True for segment in mapping_line: if not segment: # ignore empty records continue # if the line has not changed, and that the increases of both # columns are the same, accumulate the column counter and drop # the segment. # accumulate the current record first record[0] += segment[0] if len(segment) == 1: # Mark the termination, as 1-tuple determines the end of the # previous symbol and denote that whatever follows are not # in any previous source files. So if it isn't recorded, # make note of this if it wasn't done already. if result and len(result[-1]) != 1: result.append((record[0],)) record[0] = 0 # the next complete segment will require regeneration regen_next = True # skip the remaining processing. continue record[3] += segment[3] # 5-tuples are always special case with the remapped identifier # name element, and to mark the termination the next token must # also be explicitly written (in our case, regenerated). If the # filename or source line relative position changed (idx 1 and # 2), regenerate it too. Finally, if the column offsets differ # between source and sink, regenerate. if len(segment) == 5 or regen_next or segment[1] or segment[2] or ( record[0] != record[3]): result.append(regenerate(segment)) regen_next = len(segment) == 5 # must return the consumed/omitted values. return result, record[3]
python
{ "resource": "" }
q40451
write
train
def write( stream_fragments, stream, normalize=True, book=None, sources=None, names=None, mappings=None): """ Given an iterable of stream fragments, write it to the stream object by using its write method. Returns a 3-tuple, where the first element is the mapping, second element is the list of sources and the third being the original names referenced by the given fragment. Arguments: stream_fragments an iterable that only contains StreamFragments stream an io.IOBase compatible stream object normalize the default True setting will result in the mappings that were returned be normalized to the minimum form. This will reduce the size of the generated source map at the expense of slightly lower quality. Also, if any of the subsequent arguments are provided (for instance, for the multiple calls to this function), the usage of the normalize flag is currently NOT supported. If multiple sets of outputs are to be produced, the recommended method is to chain all the stream fragments together before passing in. Advanced usage arguments book A Book instance; if none is provided an instance will be created from the default_book constructor. The Bookkeeper instance is used for tracking the positions of rows and columns of the input stream. sources a Names instance for tracking sources; if None is provided, an instance will be created for internal use. names a Names instance for tracking names; if None is provided, an instance will be created for internal use. mappings a previously produced mappings. A stream fragment tuple must contain the following - The string to write to the stream - Original starting line of the string; None if not present - Original starting column fo the line; None if not present - Original string that this fragment represents (i.e. for the case where this string fragment was an identifier but got mangled into an alternative form); use None if this was not the case. - The source of the fragment. If the first fragment is unspecified, the INVALID_SOURCE url will be used (i.e. about:invalid). After that, a None value will be treated as the implicit value, and if NotImplemented is encountered, the INVALID_SOURCE url will be used also. If a number of stream_fragments are to be provided, common instances of Book (constructed via default_book) and Names (for sources and names) should be provided if they are not chained together. """ def push_line(): mappings.append([]) book.keeper._sink_column = 0 if names is None: names = Names() if sources is None: sources = Names() if book is None: book = default_book() if not isinstance(mappings, list): # note that mappings = [] # finalize initial states; the most recent list (mappings[-1]) # is the current line push_line() for chunk, lineno, colno, original_name, source in stream_fragments: # note that lineno/colno are assumed to be both provided or none # provided. lines = chunk.splitlines(True) for line in lines: stream.write(line) # Two separate checks are done. As per specification, if # either lineno or colno are unspecified, it is assumed that # the segment is unmapped - append a termination (1-tuple) # # Otherwise, note that if this segment is the beginning of a # line, and that an implied source colno/linecol were # provided (i.e. value of 0), and that the string is empty, # it can be safely skipped, since it is an implied and # unmapped indentation if lineno is None or colno is None: mappings[-1].append((book.keeper.sink_column,)) else: name_id = names.update(original_name) # this is a bit of a trick: an unspecified value (None) # will simply be treated as the implied value, hence 0. # However, a NotImplemented will be recorded and be # convereted to the invalid url at the end. source_id = sources.update(source) or 0 if lineno: # a new lineno is provided, apply it to the book and # use the result as the written value. book.keeper.source_line = lineno source_line = book.keeper.source_line else: # no change in offset, do not calculate and assume # the value to be written is unchanged. source_line = 0 # if the provided colno is to be inferred, calculate it # based on the previous line length plus the previous # real source column value, otherwise standard value # for tracking. # the reason for using the previous lengths is simply # due to how the bookkeeper class does the calculation # on-demand, and that the starting column for the # _current_ text fragment can only be calculated using # what was written previously, hence the original length # value being added if the current colno is to be # inferred. if colno: book.keeper.source_column = colno else: book.keeper.source_column = ( book.keeper._source_column + book.original_len) if original_name is not None: mappings[-1].append(( book.keeper.sink_column, source_id, source_line, book.keeper.source_column, name_id )) else: mappings[-1].append(( book.keeper.sink_column, source_id, source_line, book.keeper.source_column )) # doing this last to update the position for the next line # or chunk for the relative values based on what was added if line[-1:] in '\r\n': # Note: this HAS to be an edge case and should never # happen, but this has the potential to muck things up. # Since the parent only provided the start, will need # to manually track the chunks internal to here. # This normally shouldn't happen with sane parsers # and lexers, but this assumes that no further symbols # aside from the new lines got inserted. colno = ( colno if colno in (0, None) else colno + len(line.rstrip())) book.original_len = book.written_len = 0 push_line() if line is not lines[-1]: logger.warning( 'text in the generated document at line %d may be ' 'mapped incorrectly due to trailing newline character ' 'in provided text fragment.', len(mappings) ) logger.info( 'text in stream fragments should not have trailing ' 'characters after a new line, they should be split ' 'off into a separate fragment.' ) else: book.written_len = len(line) book.original_len = ( len(original_name) if original_name else book.written_len) book.keeper.sink_column = ( book.keeper._sink_column + book.written_len) # normalize everything if normalize: # if this _ever_ supports the multiple usage using existence # instances of names and book and mappings, it needs to deal # with NOT normalizing the existing mappings and somehow reuse # the previously stored value, probably in the book. It is # most certainly a bad idea to support that use case while also # supporting the default normalize flag due to the complex # tracking of all the existing values... mappings = normalize_mappings(mappings) list_sources = [ INVALID_SOURCE if s == NotImplemented else s for s in sources ] or [INVALID_SOURCE] return mappings, list_sources, list(names)
python
{ "resource": "" }
q40452
encode_sourcemap
train
def encode_sourcemap(filename, mappings, sources, names=[]): """ Take a filename, mappings and names produced from the write function and sources. As the write function currently does not handle the tracking of source filenames, the sources should be a list of one element with the original filename. Arguments filename The target filename that the stream was or to be written to. The stream being the argument that was supplied to the write function mappings The raw unencoded mappings produced by write, which is returned as its second element. sources List of original source filenames. When used in conjunction with the above write function, it should be a list of one item, being the path to the original filename. names The list of original names generated by write, which is returned as its first element. Returns a dict which can be JSON encoded into a sourcemap file. Example usage: >>> from io import StringIO >>> from calmjs.parse import es5 >>> from calmjs.parse.unparsers.es5 import pretty_printer >>> from calmjs.parse.sourcemap import write, encode_sourcemap >>> program = es5(u"var i = 'hello';") >>> stream = StringIO() >>> printer = pretty_printer() >>> sourcemap = encode_sourcemap( ... 'demo.min.js', *write(printer(program), stream)) """ return { "version": 3, "sources": sources, "names": names, "mappings": encode_mappings(mappings), "file": filename, }
python
{ "resource": "" }
q40453
repr_compat
train
def repr_compat(s): """ Since Python 2 is annoying with unicode literals, and that we are enforcing the usage of unicode, this ensures the repr doesn't spew out the unicode literal prefix. """ if unicode and isinstance(s, unicode): return repr(s)[1:] else: return repr(s)
python
{ "resource": "" }
q40454
normrelpath
train
def normrelpath(base, target): """ This function takes the base and target arguments as paths, and returns an equivalent relative path from base to the target, if both provided paths are absolute. """ if not all(map(isabs, [base, target])): return target return relpath(normpath(target), dirname(normpath(base)))
python
{ "resource": "" }
q40455
laplacian_reordering
train
def laplacian_reordering(G): '''Reorder vertices using the eigenvector of the graph Laplacian corresponding to the first positive eigenvalue.''' L = G.laplacian() vals, vecs = np.linalg.eigh(L) min_positive_idx = np.argmax(vals == vals[vals>0].min()) vec = vecs[:, min_positive_idx] return permute_graph(G, np.argsort(vec))
python
{ "resource": "" }
q40456
node_centroid_hill_climbing
train
def node_centroid_hill_climbing(G, relax=1, num_centerings=20, verbose=False): '''Iterative reordering method based on alternating rounds of node-centering and hill-climbing search.''' # Initialize order with BFS from a random start node. order = _breadth_first_order(G) for it in range(num_centerings): B = permute_graph(G, order).bandwidth() nc_order = _node_center(G, order, relax=relax) nc_B = permute_graph(G, nc_order).bandwidth() if nc_B < B: if verbose: # pragma: no cover print('post-center', B, nc_B) order = nc_order order = _hill_climbing(G, order, verbose=verbose) return permute_graph(G, order)
python
{ "resource": "" }
q40457
XSDGenerator.add_column_property_xsd
train
def add_column_property_xsd(self, tb, column_property): """ Add the XSD for a column property to the ``TreeBuilder``. """ if len(column_property.columns) != 1: raise NotImplementedError # pragma: no cover column = column_property.columns[0] if column.primary_key and not self.include_primary_keys: return if column.foreign_keys and not self.include_foreign_keys: if len(column.foreign_keys) != 1: # pragma: no cover # FIXME understand when a column can have multiple # foreign keys raise NotImplementedError() return attrs = {'name': column_property.key} self.add_column_xsd(tb, column, attrs)
python
{ "resource": "" }
q40458
XSDGenerator.add_class_properties_xsd
train
def add_class_properties_xsd(self, tb, cls): """ Add the XSD for the class properties to the ``TreeBuilder``. And call the user ``sequence_callback``. """ for p in class_mapper(cls).iterate_properties: if isinstance(p, ColumnProperty): self.add_column_property_xsd(tb, p) if self.sequence_callback: self.sequence_callback(tb, cls)
python
{ "resource": "" }
q40459
XSDGenerator.get_class_xsd
train
def get_class_xsd(self, io, cls): """ Returns the XSD for a mapped class. """ attrs = {} attrs['xmlns:gml'] = 'http://www.opengis.net/gml' attrs['xmlns:xsd'] = 'http://www.w3.org/2001/XMLSchema' tb = TreeBuilder() with tag(tb, 'xsd:schema', attrs) as tb: with tag(tb, 'xsd:complexType', {'name': cls.__name__}) as tb: with tag(tb, 'xsd:complexContent') as tb: with tag(tb, 'xsd:extension', {'base': 'gml:AbstractFeatureType'}) as tb: with tag(tb, 'xsd:sequence') as tb: self.add_class_properties_xsd(tb, cls) ElementTree(tb.close()).write(io, encoding='utf-8') return io
python
{ "resource": "" }
q40460
load_db_from_url
train
def load_db_from_url(url="https://github.com/OpenExoplanetCatalogue/oec_gzip/raw/master/systems.xml.gz"): """ Loads the database from a gzipped version of the system folder, by default the one located in the oec_gzip repo in the OpenExoplanetCatalogue GitHub group. The database is loaded from the url in memory :param url: url to load (must be gzipped version of systems folder) :return: OECDatabase objected initialised with latest OEC Version """ catalogue = gzip.GzipFile(fileobj=io.BytesIO(requests.get(url).content)) database = OECDatabase(catalogue, stream=True) return database
python
{ "resource": "" }
q40461
OECDatabase.searchPlanet
train
def searchPlanet(self, name): """ Searches the database for a planet. Input can be complete ie GJ1214b, alternate name variations or even just 1214. :param name: the name of the planet to search :return: dictionary of results as planetname -> planet object """ searchName = compactString(name) returnDict = {} for altname, planetObj in self._planetSearchDict.iteritems(): if re.search(searchName, altname): returnDict[planetObj.name] = planetObj if returnDict: if len(returnDict) == 1: return returnDict.values()[0] else: return returnDict.values() else: return False
python
{ "resource": "" }
q40462
OECDatabase.transitingPlanets
train
def transitingPlanets(self): """ Returns a list of transiting planet objects """ transitingPlanets = [] for planet in self.planets: try: if planet.isTransiting: transitingPlanets.append(planet) except KeyError: # No 'discoverymethod' tag - this also filters Solar System planets pass return transitingPlanets
python
{ "resource": "" }
q40463
OECDatabase._loadDatabase
train
def _loadDatabase(self, databaseLocation, stream=False): """ Loads the database from a given file path in the class :param databaseLocation: the location on disk or the stream object :param stream: if true treats the databaseLocation as a stream object """ # Initialise Database self.systems = [] self.binaries = [] self.stars = [] self.planets = [] if stream: tree = ET.parse(databaseLocation) for system in tree.findall(".//system"): self._loadSystem(system) else: databaseXML = glob.glob(os.path.join(databaseLocation, '*.xml')) if not len(databaseXML): raise LoadDataBaseError('could not find the database xml files. Have you given the correct location ' 'to the open exoplanet catalogues /systems folder?') for filename in databaseXML: try: with open(filename, 'r') as f: tree = ET.parse(f) except ET.ParseError as e: # this is sometimes raised rather than the root.tag system check raise LoadDataBaseError(e) root = tree.getroot() # Process the system if not root.tag == 'system': raise LoadDataBaseError('file {0} does not contain a valid system - could be an error with your version' ' of the catalogue'.format(filename)) self._loadSystem(root)
python
{ "resource": "" }
q40464
RandomSlugField.generate_slug
train
def generate_slug(self, model_instance): """Returns a unique slug.""" queryset = model_instance.__class__._default_manager.all() # Only count slugs that match current length to prevent issues # when pre-existing slugs are a different length. lookup = {'%s__regex' % self.attname: r'^.{%s}$' % self.length} if queryset.filter(**lookup).count() >= len(self.chars)**self.length: raise FieldError("No available slugs remaining.") slug = get_random_string(self.length, self.chars) # Exclude the current model instance from the queryset used in # finding next valid slug. if model_instance.pk: queryset = queryset.exclude(pk=model_instance.pk) # Form a kwarg dict used to impliment any unique_together # contraints. kwargs = {} for params in model_instance._meta.unique_together: if self.attname in params: for param in params: kwargs[param] = getattr(model_instance, param, None) kwargs[self.attname] = slug while queryset.filter(**kwargs): slug = get_random_string(self.length, self.chars) kwargs[self.attname] = slug return slug
python
{ "resource": "" }
q40465
BaseApi.create
train
def create(cls, session, record, endpoint_override=None, out_type=None, **add_params): """Create an object on HelpScout. Args: session (requests.sessions.Session): Authenticated session. record (helpscout.BaseModel): The record to be created. endpoint_override (str, optional): Override the default endpoint using this. out_type (helpscout.BaseModel, optional): The type of record to output. This should be provided by child classes, by calling super. **add_params (mixed): Add these to the request parameters. Returns: helpscout.models.BaseModel: Newly created record. Will be of the """ cls._check_implements('create') data = record.to_api() params = { 'reload': True, } params.update(**add_params) data.update(params) return cls( endpoint_override or '/%s.json' % cls.__endpoint__, data=data, request_type=RequestPaginator.POST, singleton=True, session=session, out_type=out_type, )
python
{ "resource": "" }
q40466
BaseApi.get
train
def get(cls, session, record_id, endpoint_override=None): """Return a specific record. Args: session (requests.sessions.Session): Authenticated session. record_id (int): The ID of the record to get. endpoint_override (str, optional): Override the default endpoint using this. Returns: helpscout.BaseModel: A record singleton, if existing. Otherwise ``None``. """ cls._check_implements('get') try: return cls( endpoint_override or '/%s/%d.json' % ( cls.__endpoint__, record_id, ), singleton=True, session=session, ) except HelpScoutRemoteException as e: if e.status_code == 404: return None else: raise
python
{ "resource": "" }
q40467
BaseApi.list
train
def list(cls, session, endpoint_override=None, data=None): """Return records in a mailbox. Args: session (requests.sessions.Session): Authenticated session. endpoint_override (str, optional): Override the default endpoint using this. data (dict, optional): Data to provide as request parameters. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator. """ cls._check_implements('list') return cls( endpoint_override or '/%s.json' % cls.__endpoint__, data=data, session=session, )
python
{ "resource": "" }
q40468
BaseApi.search
train
def search(cls, session, queries, out_type): """Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined. """ cls._check_implements('search') domain = cls.get_search_domain(queries) return cls( '/search/%s.json' % cls.__endpoint__, data={'query': str(domain)}, session=session, out_type=out_type, )
python
{ "resource": "" }
q40469
simple_integrate
train
def simple_integrate(ts, peak_list, base_ts=None, intname='simple'): """ Integrate each peak naively; without regard to overlap. This is used as the terminal step by most of the other integrators. """ peaks = [] for hints in peak_list: t0, t1 = hints['t0'], hints['t1'] hints['int'] = intname pk_ts = ts.twin((t0, t1)) if base_ts is None: # make a two point baseline base = Trace([hints.get('y0', pk_ts[0]), hints.get('y1', pk_ts[-1])], [t0, t1], name=ts.name) else: base = base_ts.twin((t0, t1)) peaks.append(PeakComponent(hints, pk_ts, base)) return peaks
python
{ "resource": "" }
q40470
drop_integrate
train
def drop_integrate(ts, peak_list): """ Resolves overlap by breaking at the minimum value. """ peaks = [] for _, pks in _get_windows(peak_list): temp_pks = [] pks = sorted(pks, key=lambda p: p['t0']) if 'y0' in pks[0] and 'y1' in pks[-1]: y0, y1 = pks[0]['y0'], pks[-1]['y1'] else: y0 = ts.get_point(pks[0]['t0']) y1 = ts.get_point(pks[-1]['t1']) ys = np.array([y0, y1]) xs = np.array([pks[0]['t0'], pks[-1]['t1']]) # go through list of peaks to make sure there's no overlap for hints in pks: t0, t1 = hints['t0'], hints['t1'] # figure out the y values (using a linear baseline) hints['y0'] = np.interp(t0, xs, ys) hints['y1'] = np.interp(t1, xs, ys) # if this peak totally overlaps with an existing one, don't add if sum(1 for p in temp_pks if t1 <= p['t1']) > 0: continue overlap_pks = [p for p in temp_pks if t0 <= p['t1']] if len(overlap_pks) > 0: # find the last of the overlapping peaks overlap_pk = max(overlap_pks, key=lambda p: p['t0']) # get the section of trace and find the lowest point over_ts = ts.twin((t0, overlap_pk['t1'])) min_t = over_ts.index[over_ts.values.argmin()] # delete the existing overlaping peak for i, p in enumerate(temp_pks): if p == overlap_pk: del temp_pks[i] break # interpolate a new y value y_val = np.interp(min_t, xs, ys) overlap_pk['y1'] = y_val hints['y0'] = y_val # add the old and new peak in overlap_pk['t1'] = min_t temp_pks.append(overlap_pk) hints['t0'], hints['t1'] = min_t, t1 temp_pks.append(hints) else: hints['t0'], hints['t1'] = t0, t1 temp_pks.append(hints) # none of our peaks should overlap, so we can just use # simple_integrate now peaks += simple_integrate(ts, temp_pks, intname='drop') return peaks
python
{ "resource": "" }
q40471
_integrate_mpwrap
train
def _integrate_mpwrap(ts_and_pks, integrate, fopts): """ Take a zipped timeseries and peaks found in it and integrate it to return peaks. Used to allow multiprocessing support. """ ts, tpks = ts_and_pks pks = integrate(ts, tpks, **fopts) # for p in pks: # p.info['mz'] = str(ts.name) return pks
python
{ "resource": "" }
q40472
get_bytes
train
def get_bytes(num_bytes): """ Returns a random string of num_bytes length. """ # Is this the way to do it? #s = c_ubyte() # Or this? s = create_string_buffer(num_bytes) # Used to keep track of status. 1 = success, 0 = error. ok = c_int() # Provider? hProv = c_ulong() ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0) ok = windll.Advapi32.CryptGenRandom(hProv, wintypes.DWORD(num_bytes), cast(byref(s), POINTER(c_byte))) return s.raw
python
{ "resource": "" }
q40473
get_long
train
def get_long(): """ Generates a random long. The length of said long varies by platform. """ # The C long type to populate. pbRandomData = c_ulong() # Determine the byte size of this machine's long type. size_of_long = wintypes.DWORD(sizeof(pbRandomData)) # Used to keep track of status. 1 = success, 0 = error. ok = c_int() # Provider? hProv = c_ulong() ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0) ok = windll.Advapi32.CryptGenRandom(hProv, size_of_long, byref(pbRandomData)) return pbRandomData.value
python
{ "resource": "" }
q40474
auto_instantiate
train
def auto_instantiate(*classes): """Creates a decorator that will instantiate objects based on function parameter annotations. The decorator will check every argument passed into ``f``. If ``f`` has an annotation for the specified parameter and the annotation is found in ``classes``, the parameter value passed in will be used to construct a new instance of the expression that is the annotation. An example (Python 3): .. code-block:: python @auto_instantiate(int) def foo(a: int, b: float): pass Any value passed in as ``b`` is left unchanged. Anything passed as the parameter for ``a`` will be converted to :class:`int` before calling the function. Since Python 2 does not support annotations, the :func:`~data.decorators.annotate` function should can be used: .. code-block:: python @auto_instantiate(int) @annotate(a=int) def foo(a, b): pass :param classes: Any number of classes/callables for which auto-instantiation should be performed. If empty, perform for all. :note: When dealing with data, it is almost always more convenient to use the :func:`~data.decorators.data` decorator instead. """ def decorator(f): # collect our argspec sig = signature(f) @wraps(f) def _(*args, **kwargs): bvals = sig.bind(*args, **kwargs) # replace with instance if desired for varname, val in bvals.arguments.items(): anno = sig.parameters[varname].annotation if anno in classes or (len(classes) == 0 and anno != _empty): bvals.arguments[varname] = anno(val) return f(*bvals.args, **bvals.kwargs) # create another layer by wrapping in a FunctionMaker. this is done # to preserve the original signature return FunctionMaker.create( f, 'return _(%(signature)s)', dict(_=_, __wrapped__=f) ) return decorator
python
{ "resource": "" }
q40475
SNRPlanet
train
def SNRPlanet(SNRStar, starPlanetFlux, Nobs, pixPerbin, NVisits=1): r""" Calculate the Signal to Noise Ratio of the planet atmosphere .. math:: \text{SNR}_\text{planet} = \text{SNR}_\text{star} \times \Delta F \times \sqrt{N_\text{obs}} \times \sqrt{N_\text{pixPerbin}} \times \sqrt{N_\text{visits}} Where :math:`\text{SNR}_\star` SNR of the star detection, :math:`\Delta F` ratio of the terminator to the star, :math:`N_\text{obs}` number of exposures per visit, :math:`N_\text{pixPerBin}` number of pixels per wavelength bin, :math:`N_\text{visits}` number of visits. :return: """ SNRplanet = SNRStar * starPlanetFlux * \ sqrt(Nobs) * sqrt(pixPerbin) * sqrt(NVisits) return SNRplanet
python
{ "resource": "" }
q40476
transitDurationCircular
train
def transitDurationCircular(P, R_s, R_p, a, i): r"""Estimation of the primary transit time. Assumes a circular orbit. .. math:: T_\text{dur} = \frac{P}{\pi}\sin^{-1} \left[\frac{R_\star}{a}\frac{\sqrt{(1+k)^2 + b^2}}{\sin{a}} \right] Where :math:`T_\text{dur}` transit duration, P orbital period, :math:`R_\star` radius of the star, a is the semi-major axis, k is :math:`\frac{R_p}{R_s}`, b is :math:`\frac{a}{R_*} \cos{i}` (Seager & Mallen-Ornelas 2003) """ if i is nan: i = 90 * aq.deg i = i.rescale(aq.rad) k = R_p / R_s # lit reference for eclipsing binaries b = (a * cos(i)) / R_s duration = (P / pi) * arcsin(((R_s * sqrt((1 + k) ** 2 - b ** 2)) / (a * sin(i))).simplified) return duration.rescale(aq.min)
python
{ "resource": "" }
q40477
estimateAbsoluteMagnitude
train
def estimateAbsoluteMagnitude(spectralType): """Uses the spectral type to lookup an approximate absolute magnitude for the star. """ from .astroclasses import SpectralType specType = SpectralType(spectralType) if specType.classLetter == '': return np.nan elif specType.classNumber == '': specType.classNumber = 5 # approximation using mid magnitude value if specType.lumType == '': specType.lumType = 'V' # assume main sequence LNum = LClassRef[specType.lumType] classNum = specType.classNumber classLet = specType.classLetter try: return absMagDict[classLet][classNum][LNum] # value not in table. Assume the number isn't there (Key p2.7, Ind p3+) except (KeyError, IndexError): try: classLookup = absMagDict[classLet] values = np.array(list(classLookup.values()))[ :, LNum] # only select the right L Type return np.interp(classNum, list(classLookup.keys()), values) except (KeyError, ValueError): return np.nan
python
{ "resource": "" }
q40478
is_remote_allowed
train
def is_remote_allowed(remote): """ Check if `remote` is allowed to make a CORS request. """ if settings.debug: return True if not remote: return False for domain_pattern in settings.node['cors_whitelist_domains']: if domain_pattern.match(remote): return True return False
python
{ "resource": "" }
q40479
OneHash.generate_challenges
train
def generate_challenges(self, num, root_seed): """ Generate the specified number of hash challenges. :param num: The number of hash challenges we want to generate. :param root_seed: Some value that we use to generate our seeds from. """ # Generate a series of seeds seeds = self.generate_seeds(num, root_seed, self.secret) blocks = self.pick_blocks(num, root_seed) # List of 2-tuples (seed, hash_response) self.challenges = [] # Generate the corresponding hash for each seed for i in range(num): self.challenges.append(Challenge(blocks[i], seeds[i])) response = self.meet_challenge(self.challenges[i]) self.challenges[i].response = response
python
{ "resource": "" }
q40480
OneHash.meet_challenge
train
def meet_challenge(self, challenge): """ Get the SHA256 hash of a specific file block plus the provided seed. The default block size is one tenth of the file. If the file is larger than 10KB, 1KB is used as the block size. :param challenge: challenge as a `Challenge <heartbeat.Challenge>` object """ chunk_size = min(1024, self.file_size // 10) seed = challenge.seed h = hashlib.sha256() self.file_object.seek(challenge.block) if challenge.block > (self.file_size - chunk_size): end_slice = ( challenge.block - (self.file_size - chunk_size) ) h.update(self.file_object.read(end_slice)) self.file_object.seek(0) h.update(self.file_object.read(chunk_size - end_slice)) else: h.update(self.file_object.read(chunk_size)) h.update(seed) return h.digest()
python
{ "resource": "" }
q40481
OneHash.generate_seeds
train
def generate_seeds(num, root_seed, secret): """ Deterministically generate list of seeds from a root seed. :param num: Numbers of seeds to generate as int :param root_seed: Seed to start off with. :return: seed values as a list of length num """ # Generate a starting seed from the root if num < 0: raise HeartbeatError('%s is not greater than 0' % num) if secret is None: raise HeartbeatError('secret can not be of type NoneType') seeds = [] try: tmp_seed = hashlib.sha256(root_seed).digest() except TypeError: tmp_seed = hashlib.sha256(str(root_seed).encode()).digest() # Deterministically generate the rest of the seeds for x in range(num): seeds.append(tmp_seed) h = hashlib.sha256(tmp_seed) h.update(secret) tmp_seed = h.digest() return seeds
python
{ "resource": "" }
q40482
OneHash.pick_blocks
train
def pick_blocks(self, num, root_seed): """ Pick a set of positions to start reading blocks from the file that challenges are created for. This is a deterministic operation. Positions are guaranteed to be within the bounds of the file. :param num: Number of blocks to pick :param root_seed: Seed with which begin picking blocks. :return: block values as a list """ if num < 0: raise HeartbeatError('%s is not greater than 0' % num) blocks = [] random.seed(root_seed) for i in range(num): blocks.append(random.randint(0, self.file_size - 1)) return blocks
python
{ "resource": "" }
q40483
OneHash.check_answer
train
def check_answer(self, hash_answer): """ Check if the returned hash is in our challenges list. :param hash_answer: Hash that we compare to our list of challenges :return: boolean indicating if answer is correct, True, or not, False """ for challenge in self.challenges: if challenge.response == hash_answer: # If we don't discard a used challenge then a node # could fake having the file because it already # knows the proper response self.delete_challenge(hash_answer) return True return False
python
{ "resource": "" }
q40484
HelpScout._load_apis
train
def _load_apis(self): """Find available APIs and set instances property auth proxies.""" helpscout = __import__('helpscout.apis') for class_name in helpscout.apis.__all__: if not class_name.startswith('_'): cls = getattr(helpscout.apis, class_name) api = AuthProxy(self.session, cls) setattr(self, class_name, api) self.__apis__[class_name] = api
python
{ "resource": "" }
q40485
make_response
train
def make_response(response): """Make response tuple Potential features to be added - Parameters validation """ if isinstance(response, unicode) or \ isinstance(response, str): response = (response, 'text/html') return response
python
{ "resource": "" }
q40486
fft
train
def fft(ts): """ Perform a fast-fourier transform on a Trace """ t_step = ts.index[1] - ts.index[0] oc = np.abs(np.fft.fftshift(np.fft.fft(ts.values))) / len(ts.values) t = np.fft.fftshift(np.fft.fftfreq(len(oc), d=t_step)) return Trace(oc, t)
python
{ "resource": "" }
q40487
loads
train
def loads(ast_str): """ Create a Trace from a suitably compressed string. """ data = zlib.decompress(ast_str) li = struct.unpack('<L', data[0:4])[0] lt = struct.unpack('<L', data[4:8])[0] n = data[8:8 + li].decode('utf-8') t = np.fromstring(data[8 + li:8 + li + lt]) d = np.fromstring(data[8 + li + lt:]) return Trace(d, t, name=n)
python
{ "resource": "" }
q40488
dumps
train
def dumps(asts): """ Create a compressed string from an Trace. """ d = asts.values.tostring() t = asts.index.values.astype(float).tostring() lt = struct.pack('<L', len(t)) i = asts.name.encode('utf-8') li = struct.pack('<L', len(i)) try: # python 2 return buffer(zlib.compress(li + lt + i + t + d)) except NameError: # python 3 return zlib.compress(li + lt + i + t + d)
python
{ "resource": "" }
q40489
ts_func
train
def ts_func(f): """ This wraps a function that would normally only accept an array and allows it to operate on a DataFrame. Useful for applying numpy functions to DataFrames. """ def wrap_func(df, *args): # TODO: should vectorize to apply over all columns? return Chromatogram(f(df.values, *args), df.index, df.columns) return wrap_func
python
{ "resource": "" }
q40490
desaturate
train
def desaturate(c, k=0): """ Utility function to desaturate a color c by an amount k. """ from matplotlib.colors import ColorConverter c = ColorConverter().to_rgb(c) intensity = 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2] return [intensity * k + i * (1 - k) for i in c]
python
{ "resource": "" }
q40491
find_spectrum_match
train
def find_spectrum_match(spec, spec_lib, method='euclidian'): """ Find spectrum in spec_lib most similar to spec. """ # filter out any points with abundance below 1 % # spec[spec / np.sum(spec) < 0.01] = 0 # normalize everything to sum to 1 spec = spec / np.max(spec) if method == 'dot': d1 = (spec_lib * lil_matrix(spec).T).sum(axis=1).A ** 2 d2 = np.sum(spec ** 2) * spec_lib.multiply(spec_lib).sum(axis=1).A dist = d1 / d2 elif method == 'euclidian': # st_spc = spectrum[np.newaxis, :].repeat(spec_lib.shape[0], axis=0) st_spc = dia_matrix((spec, [0]), shape=(len(spec), len(spec))) # calculate the residual sum of squares from spectrum to library dist_sp = spec_lib.multiply(spec_lib) - 2 * spec_lib.dot(st_spc) dist = dist_sp.sum(axis=1).A + np.sum(spec ** 2) return (dist.argmin(), dist.min())
python
{ "resource": "" }
q40492
DaemonCLI.get_command
train
def get_command(self, ctx, name): """Get a callable command object.""" if name not in self.daemon_class.list_actions(): return None # The context object is a Daemon object daemon = ctx.obj def subcommand(debug=False): """Call a daemonocle action.""" if daemon.detach and debug: daemon.detach = False daemon.do_action(name) # Override the docstring for the function so that it shows up # correctly in the help output subcommand.__doc__ = daemon.get_action(name).__doc__ if name == 'start': # Add a --debug option for start subcommand = click.option( '--debug', is_flag=True, help='Do NOT detach and run in the background.' )(subcommand) # Make it into a click command subcommand = click.command( name, options_metavar=self.options_metavar)(subcommand) return subcommand
python
{ "resource": "" }
q40493
ConfigurationLoader.cli_help_message
train
def cli_help_message(self, description): ''' Get a user friendly help message that can be dropped in a `click.Command`\ 's epilog. Parameters ---------- description : str Description of the configuration file to include in the message. Returns ------- str A help message that uses :py:mod:`click`\ 's help formatting constructs (e.g. ``\b``). ''' config_files_listing = '\n'.join(' {}. {!s}'.format(i, path) for i, path in enumerate(self._paths, 1)) text = dedent('''\ {config_file}: {description} {config_file} files are read from the following locations: \b {config_files_listing} Any configuration file can override options set by previous configuration files. Some configuration file locations can be changed using the XDG standard (http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html). ''').format( config_file='{}.conf'.format(self._configuration_name), description=description, config_files_listing=config_files_listing ) return text
python
{ "resource": "" }
q40494
Application.start
train
def start(self): """ Start the application, initializing your components. """ current_pedalboard = self.controller(CurrentController).pedalboard if current_pedalboard is None: self.log('Not exists any current pedalboard.') self.log('Use CurrentController to set the current pedalboard') else: self.log('Load current pedalboard - "{}"', current_pedalboard.name) self.mod_host.pedalboard = current_pedalboard for component in self.components: component.init() self.log('Load component - {}', component.__class__.__name__) self.log('Components loaded') atexit.register(self.stop)
python
{ "resource": "" }
q40495
Application.stop
train
def stop(self): """ Stop the application, closing your components. """ for component in self.components: component.close() self.log('Stopping component - {}', component.__class__.__name__) for controller in self.controllers.values(): controller.close() self.log('Stopping controller - {}', controller.__class__.__name__) atexit.unregister(self.stop)
python
{ "resource": "" }
q40496
get_seconds
train
def get_seconds(value, scale): """Convert time scale dict to seconds Given a dictionary with keys for scale and value, convert value into seconds based on scale. """ scales = { 'seconds': lambda x: x, 'minutes': lambda x: x * 60, 'hours': lambda x: x * 60 * 60, 'days': lambda x: x * 60 * 60 * 24, 'weeks': lambda x: x * 60 * 60 * 24 * 7, 'months': lambda x: x * 60 * 60 * 24 * 30, 'years': lambda x: x * 60 * 60 * 24 * 365, } return scales[scale](value)
python
{ "resource": "" }
q40497
_get_col_epsg
train
def _get_col_epsg(mapped_class, geom_attr): """Get the EPSG code associated with a geometry attribute. Arguments: geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. """ col = class_mapper(mapped_class).get_property(geom_attr).columns[0] return col.type.srid
python
{ "resource": "" }
q40498
create_geom_filter
train
def create_geom_filter(request, mapped_class, geom_attr): """Create MapFish geometry filter based on the request params. Either a box or within or geometry filter, depending on the request params. Additional named arguments are passed to the spatial filter. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. """ tolerance = float(request.params.get('tolerance', 0.0)) epsg = None if 'epsg' in request.params: epsg = int(request.params['epsg']) box = request.params.get('bbox') shape = None if box is not None: box = [float(x) for x in box.split(',')] shape = Polygon(((box[0], box[1]), (box[0], box[3]), (box[2], box[3]), (box[2], box[1]), (box[0], box[1]))) elif 'lon' in request.params and 'lat' in request.params: shape = Point(float(request.params['lon']), float(request.params['lat'])) elif 'geometry' in request.params: shape = loads(request.params['geometry'], object_hook=GeoJSON.to_instance) shape = asShape(shape) if shape is None: return None column_epsg = _get_col_epsg(mapped_class, geom_attr) geom_attr = getattr(mapped_class, geom_attr) epsg = column_epsg if epsg is None else epsg if epsg != column_epsg: geom_attr = func.ST_Transform(geom_attr, epsg) geometry = from_shape(shape, srid=epsg) return func.ST_DWITHIN(geom_attr, geometry, tolerance)
python
{ "resource": "" }
q40499
create_filter
train
def create_filter(request, mapped_class, geom_attr, **kwargs): """ Create MapFish default filter based on the request params. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. \\**kwargs additional arguments passed to ``create_geom_filter()``. """ attr_filter = create_attr_filter(request, mapped_class) geom_filter = create_geom_filter(request, mapped_class, geom_attr, **kwargs) if geom_filter is None and attr_filter is None: return None if geom_filter is None: return attr_filter if attr_filter is None: return geom_filter return and_(geom_filter, attr_filter)
python
{ "resource": "" }