_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q40300
SensitivityInput.set_wd_noise
train
def set_wd_noise(self, wd_noise): """Add White Dwarf Background Noise This adds the White Dwarf (WD) Background noise. This can either do calculations with, without, or with and without WD noise. Args: wd_noise (bool or str, optional): Add or remove WD background noise. First option is to have only calculations with the wd_noise. For this, use `yes` or True. Second option is no WD noise. For this, use `no` or False. For both calculations with and without WD noise, use `both`. Raises: ValueError: Input value is not one of the options. """ if isinstance(wd_noise, bool): wd_noise = str(wd_noise) if wd_noise.lower() == 'yes' or wd_noise.lower() == 'true': wd_noise = 'True' elif wd_noise.lower() == 'no' or wd_noise.lower() == 'false': wd_noise = 'False' elif wd_noise.lower() == 'both': wd_noise = 'Both' else: raise ValueError('wd_noise must be yes, no, True, False, or Both.') self.sensitivity_input.add_wd_noise = wd_noise return
python
{ "resource": "" }
q40301
ParallelInput.set_generation_type
train
def set_generation_type(self, num_processors=-1, num_splits=1000, verbose=-1): """Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1. """ self.parallel_input.num_processors = num_processors self.parallel_input.num_splits = num_splits self.parallel_input.verbose = verbose return
python
{ "resource": "" }
q40302
SNRInput.set_signal_type
train
def set_signal_type(self, sig_type): """Set the signal type of interest. Sets the signal type for which the SNR is calculated. This means inspiral, merger, and/or ringdown. Args: sig_type (str or list of str): Signal type desired by user. Choices are `ins`, `mrg`, `rd`, `all` for circular waveforms created with PhenomD. If eccentric waveforms are used, must be `all`. """ if isinstance(sig_type, str): sig_type = [sig_type] self.snr_input.signal_type = sig_type return
python
{ "resource": "" }
q40303
pretty_print
train
def pretty_print(ast, indent_str=' '): """ Simple pretty print function; returns a string rendering of an input AST of an ES5 Program. arguments ast The AST to pretty print indent_str The string used for indentations. Defaults to two spaces. """ return ''.join(chunk.text for chunk in pretty_printer(indent_str)(ast))
python
{ "resource": "" }
q40304
minify_printer
train
def minify_printer( obfuscate=False, obfuscate_globals=False, shadow_funcname=False, drop_semi=False): """ Construct a minimum printer. Arguments obfuscate If True, obfuscate identifiers nested in each scope with a shortened identifier name to further reduce output size. Defaults to False. obfuscate_globals Also do the same to identifiers nested on the global scope; do not enable unless the renaming of global variables in a not fully deterministic manner into something else is guaranteed to not cause problems with the generated code and other code that in the same environment that it will be executed in. Defaults to False for the reason above. drop_semi Drop semicolons whenever possible (e.g. the final semicolons of a given block). """ active_rules = [rules.minify(drop_semi=drop_semi)] if obfuscate: active_rules.append(rules.obfuscate( obfuscate_globals=obfuscate_globals, shadow_funcname=shadow_funcname, reserved_keywords=(Lexer.keywords_dict.keys()) )) return Unparser(rules=active_rules)
python
{ "resource": "" }
q40305
minify_print
train
def minify_print( ast, obfuscate=False, obfuscate_globals=False, shadow_funcname=False, drop_semi=False): """ Simple minify print function; returns a string rendering of an input AST of an ES5 program Arguments ast The AST to minify print obfuscate If True, obfuscate identifiers nested in each scope with a shortened identifier name to further reduce output size. Defaults to False. obfuscate_globals Also do the same to identifiers nested on the global scope; do not enable unless the renaming of global variables in a not fully deterministic manner into something else is guaranteed to not cause problems with the generated code and other code that in the same environment that it will be executed in. Defaults to False for the reason above. drop_semi Drop semicolons whenever possible (e.g. the final semicolons of a given block). """ return ''.join(chunk.text for chunk in minify_printer( obfuscate, obfuscate_globals, shadow_funcname, drop_semi)(ast))
python
{ "resource": "" }
q40306
encode_vlq
train
def encode_vlq(i): """ Encode integer `i` into a VLQ encoded string. """ # shift in the sign to least significant bit raw = (-i << 1) + 1 if i < 0 else i << 1 if raw < VLQ_MULTI_CHAR: # short-circuit simple case as it doesn't need continuation return INT_B64[raw] result = [] while raw: # assume continue result.append(raw & VLQ_BASE_MASK | VLQ_CONT) # shift out processed bits raw = raw >> VLQ_SHIFT # discontinue the last unit result[-1] &= VLQ_BASE_MASK return ''.join(INT_B64[i] for i in result)
python
{ "resource": "" }
q40307
decode_vlqs
train
def decode_vlqs(s): """ Decode str `s` into a list of integers. """ ints = [] i = 0 shift = 0 for c in s: raw = B64_INT[c] cont = VLQ_CONT & raw i = ((VLQ_BASE_MASK & raw) << shift) | i shift += VLQ_SHIFT if not cont: sign = -1 if 1 & i else 1 ints.append((i >> 1) * sign) i = 0 shift = 0 return tuple(ints)
python
{ "resource": "" }
q40308
Primes._findNextPrime
train
def _findNextPrime(self, N): """Generate the first N primes""" primes = self.primes nextPrime = primes[-1]+1 while(len(primes)<N): maximum = nextPrime * nextPrime prime = 1 for i in primes: if i > maximum: break if nextPrime % i == 0: prime = 0 break if prime: primes.append(nextPrime) nextPrime+=1
python
{ "resource": "" }
q40309
vsh
train
def vsh(cmd, *args, **kw): """ Execute a command installed into the active virtualenv. """ args = '" "'.join(i.replace('"', r'\"') for i in args) easy.sh('"%s" "%s"' % (venv_bin(cmd), args))
python
{ "resource": "" }
q40310
install_tools
train
def install_tools(dependencies): """ Install a required tool before using it, if it's missing. Note that C{dependencies} can be a distutils requirement, or a simple name from the C{tools} task configuration, or a (nested) list of such requirements. """ tools = getattr(easy.options, "tools", {}) for dependency in iterutil.flatten(dependencies): dependency = tools.get(dependency, dependency) try: pkg_resources.require(dependency) except pkg_resources.DistributionNotFound: vsh("pip", "install", "-q", dependency) dependency = pkg_resources.require(dependency) easy.info("Installed required tool %s" % (dependency,))
python
{ "resource": "" }
q40311
toplevel_packages
train
def toplevel_packages(): """ Get package list, without sub-packages. """ packages = set(easy.options.setup.packages) for pkg in list(packages): packages -= set(p for p in packages if str(p).startswith(pkg + '.')) return list(sorted(packages))
python
{ "resource": "" }
q40312
Menu.widget_status
train
def widget_status(self): """This method will return the status of all of the widgets in the widget list""" widget_status_list = [] for i in self.widgetlist: widget_status_list += [[i.name, i.status]] return widget_status_list
python
{ "resource": "" }
q40313
Menu.update
train
def update(self, screen, clock): """Event handling loop for the menu""" # If a music file was passed, start playing it on repeat if self.music is not None: pygame.mixer.music.play(-1) while True: clock.tick(30) for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() # Check if any of the buttons were clicked for i in self.buttonlist: if (event.type == pygame.MOUSEBUTTONUP and i.rect.collidepoint(pygame.mouse.get_pos())): if self.music is not None: pygame.mixer.music.stop() if self.widgetlist: return [i(), self.widget_status()] else: return i() # If there is a widget list, check to see if any were clicked if self.widgetlist: for i in self.widgetlist: if (event.type == pygame.MOUSEBUTTONDOWN and i.rect.collidepoint(pygame.mouse.get_pos())): # Call the widget and give it the menu information i(self) screen.blit(self.image, self.pos) pygame.display.flip()
python
{ "resource": "" }
q40314
Textscreens.Screens
train
def Screens(self, text, prog, screen, clock): """Prog = 0 for first page, 1 for middle pages, 2 for last page""" # Initialize the screen class BaseScreen.__init__(self, self.size, self.background) # Determine the mid position of the given screen size and the # y button height xmid = self.size[0]//2 # Create the header text Linesoftext(text, (xmid, 40), xmid=True, surface=self.image, fontsize=30) # Create the buttons self.buttonlist = [] if prog == 0: self.buttonlist += [self.nextbutton] elif prog == 1: self.buttonlist += [self.nextbutton] self.buttonlist += [self.backbutton] elif prog == 2: self.buttonlist += [self.lastbutton] self.buttonlist += [self.backbutton] # Draw the buttons to the screen for i in self.buttonlist: self.image.blit(*i.blitinfo) # Use the menu update method to run the screen and process button clicks return Menu.update(self, screen, clock)
python
{ "resource": "" }
q40315
Commerce.create_commerce
train
def create_commerce(): """ Creates commerce from environment variables ``TBK_COMMERCE_ID``, ``TBK_COMMERCE_KEY`` or for testing purposes ``TBK_COMMERCE_TESTING``. """ commerce_id = os.getenv('TBK_COMMERCE_ID') commerce_key = os.getenv('TBK_COMMERCE_KEY') commerce_testing = os.getenv('TBK_COMMERCE_TESTING') == 'True' if not commerce_testing: if commerce_id is None: raise ValueError("create_commerce needs TBK_COMMERCE_ID environment variable") if commerce_key is None: raise ValueError("create_commerce needs TBK_COMMERCE_KEY environment variable") return Commerce( id=commerce_id or Commerce.TEST_COMMERCE_ID, key=commerce_key, testing=commerce_testing )
python
{ "resource": "" }
q40316
Commerce.get_config_tbk
train
def get_config_tbk(self, confirmation_url): ''' Returns a string with the ``TBK_CONFIG.dat``. :param confirmation_url: URL where callback is made. ''' config = ( "IDCOMERCIO = {commerce_id}\n" "MEDCOM = 1\n" "TBK_KEY_ID = 101\n" "PARAMVERIFCOM = 1\n" "URLCGICOM = {confirmation_path}\n" "SERVERCOM = {confirmation_host}\n" "PORTCOM = {confirmation_port}\n" "WHITELISTCOM = ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz 0123456789./:=&?_\n" "HOST = {confirmation_host}\n" "WPORT = {confirmation_port}\n" "URLCGITRA = /filtroUnificado/bp_revision.cgi\n" "URLCGIMEDTRA = /filtroUnificado/bp_validacion.cgi\n" "SERVERTRA = {webpay_server}\n" "PORTTRA = {webpay_port}\n" "PREFIJO_CONF_TR = HTML_\n" "HTML_TR_NORMAL = http://127.0.0.1/notify\n" ) confirmation_uri = six.moves.urllib.parse.urlparse(confirmation_url) webpay_server = "https://certificacion.webpay.cl" if self.testing else "https://webpay.transbank.cl" webpay_port = 6443 if self.testing else 443 return config.format(commerce_id=self.id, confirmation_path=confirmation_uri.path, confirmation_host=confirmation_uri.hostname, confirmation_port=confirmation_uri.port, webpay_port=webpay_port, webpay_server=webpay_server)
python
{ "resource": "" }
q40317
indent
train
def indent(indent_str=None): """ An example indentation ruleset. """ def indentation_rule(): inst = Indentator(indent_str) return {'layout_handlers': { Indent: inst.layout_handler_indent, Dedent: inst.layout_handler_dedent, Newline: inst.layout_handler_newline, OptionalNewline: inst.layout_handler_newline_optional, OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, }} return indentation_rule
python
{ "resource": "" }
q40318
LocaleURLMiddleware._is_lang_change
train
def _is_lang_change(self, request): """Return True if the lang param is present and URL isn't exempt.""" if 'lang' not in request.GET: return False return not any(request.path.endswith(url) for url in self.exempt_urls)
python
{ "resource": "" }
q40319
BooleanParser.add
train
def add(self, *matches, **kw): # kw=default=None, boolean=False '''Add an argument; this is optional, and mostly useful for setting up aliases or setting boolean=True Apparently `def add(self, *matches, default=None, boolean=False):` is invalid syntax in Python. Not only is this absolutely ridiculous, but the alternative `def add(self, default=None, boolean=False, *matches):` does not do what you would expect. This syntax works as intended in Python 3. If you provide multiple `matches` that are not dash-prefixed, only the first will be used as a positional argument. Specifying any positional arguments and then using `boolean=True` is just weird, and their will be no special consideration for boolean=True in that case for the position-enabled argument. ''' # python syntax hack default = kw.get('default', None) boolean = kw.get('boolean', False) del kw # do not use kw after this line! It's a hack; it should never have been there in the first place. positional = None names = [] for match in matches: if match.startswith('--'): names.append(match[2:]) elif match.startswith('-'): names.append(match[1:]) elif positional: # positional has already been filled names.append(match) else: # first positional: becomes canonical positional positional = match names.append(match) argument = BooleanArgument(names, default, boolean, positional) self.arguments.append(argument) # chainable return self
python
{ "resource": "" }
q40320
GitExtension.parse
train
def parse(self, parser): """Main method to render data into the template.""" lineno = next(parser.stream).lineno if parser.stream.skip_if('name:short'): parser.stream.skip(1) short = parser.parse_expression() else: short = nodes.Const(False) result = self.call_method('_commit_hash', [short], [], lineno=lineno) return nodes.Output([result], lineno=lineno)
python
{ "resource": "" }
q40321
flatten
train
def flatten(nested, containers=(list, tuple)): """ Flatten a nested list by yielding its scalar items. """ for item in nested: if hasattr(item, "next") or isinstance(item, containers): for subitem in flatten(item): yield subitem else: yield item
python
{ "resource": "" }
q40322
get_template_context_processors
train
def get_template_context_processors(exclude=(), append=(), current={'processors': TEMPLATE_CONTEXT_PROCESSORS}): """ Returns TEMPLATE_CONTEXT_PROCESSORS without the processors listed in exclude and with the processors listed in append. The use of a mutable dict is intentional, in order to preserve the state of the TEMPLATE_CONTEXT_PROCESSORS tuple across multiple settings files. """ current['processors'] = tuple( [p for p in current['processors'] if p not in exclude] ) + tuple(append) return current['processors']
python
{ "resource": "" }
q40323
get_middleware
train
def get_middleware(exclude=(), append=(), current={'middleware': MIDDLEWARE_CLASSES}): """ Returns MIDDLEWARE_CLASSES without the middlewares listed in exclude and with the middlewares listed in append. The use of a mutable dict is intentional, in order to preserve the state of the MIDDLEWARE_CLASSES tuple across multiple settings files. """ current['middleware'] = tuple( [m for m in current['middleware'] if m not in exclude] ) + tuple(append) return current['middleware']
python
{ "resource": "" }
q40324
get_apps
train
def get_apps(exclude=(), append=(), current={'apps': INSTALLED_APPS}): """ Returns INSTALLED_APPS without the apps listed in exclude and with the apps listed in append. The use of a mutable dict is intentional, in order to preserve the state of the INSTALLED_APPS tuple across multiple settings files. """ current['apps'] = tuple( [a for a in current['apps'] if a not in exclude] ) + tuple(append) return current['apps']
python
{ "resource": "" }
q40325
invariants
train
def invariants(mol): """Generate initial atom identifiers using atomic invariants""" atom_ids = {} for a in mol.atoms: components = [] components.append(a.number) components.append(len(a.oatoms)) components.append(a.hcount) components.append(a.charge) components.append(a.mass) if len(a.rings) > 0: components.append(1) atom_ids[a.index] = gen_hash(components) return atom_ids
python
{ "resource": "" }
q40326
TransformMixin.connected_subgraphs
train
def connected_subgraphs(self, directed=True, ordered=False): '''Generates connected components as subgraphs. When ordered=True, subgraphs are ordered by number of vertices. ''' num_ccs, labels = self.connected_components(directed=directed) # check the trivial case first if num_ccs == 1: yield self raise StopIteration if ordered: # sort by descending size (num vertices) order = np.argsort(np.bincount(labels))[::-1] else: order = range(num_ccs) # don't use self.subgraph() here, because we can reuse adj adj = self.matrix('dense', 'csr', 'csc') for c in order: mask = labels == c sub_adj = adj[mask][:,mask] yield self.__class__.from_adj_matrix(sub_adj)
python
{ "resource": "" }
q40327
TransformMixin.neighborhood_subgraph
train
def neighborhood_subgraph(self, start_idx, radius=1, weighted=True, directed=True, return_mask=False): '''Returns a subgraph containing only vertices within a given geodesic radius of start_idx.''' adj = self.matrix('dense', 'csr', 'csc') dist = ssc.dijkstra(adj, directed=directed, indices=start_idx, unweighted=(not weighted), limit=radius) mask = np.isfinite(dist) sub_adj = adj[mask][:,mask] g = self.__class__.from_adj_matrix(sub_adj) if return_mask: return g, mask return g
python
{ "resource": "" }
q40328
TransformMixin.isograph
train
def isograph(self, min_weight=None): '''Remove short-circuit edges using the Isograph algorithm. min_weight : float, optional Minimum weight of edges to consider removing. Defaults to max(MST). From "Isograph: Neighbourhood Graph Construction Based On Geodesic Distance For Semi-Supervised Learning" by Ghazvininejad et al., 2011. Note: This uses the non-iterative algorithm which removes edges rather than reweighting them. ''' W = self.matrix('dense') # get candidate edges: all edges - MST edges tree = self.minimum_spanning_subtree() candidates = np.argwhere((W - tree.matrix('dense')) > 0) cand_weights = W[candidates[:,0], candidates[:,1]] # order by increasing edge weight order = np.argsort(cand_weights) cand_weights = cand_weights[order] # disregard edges shorter than a threshold if min_weight is None: min_weight = tree.edge_weights().max() idx = np.searchsorted(cand_weights, min_weight) cand_weights = cand_weights[idx:] candidates = candidates[order[idx:]] # check each candidate edge to_remove = np.zeros_like(cand_weights, dtype=bool) for i, (u,v) in enumerate(candidates): W_uv = np.where(W < cand_weights[i], W, 0) len_uv = ssc.dijkstra(W_uv, indices=u, unweighted=True, limit=2)[v] if len_uv > 2: to_remove[i] = True ii, jj = candidates[to_remove].T return self.remove_edges(ii, jj, copy=True)
python
{ "resource": "" }
q40329
TransformMixin.circle_tear
train
def circle_tear(self, spanning_tree='mst', cycle_len_thresh=5, spt_idx=None, copy=True): '''Circular graph tearing. spanning_tree: one of {'mst', 'spt'} cycle_len_thresh: int, length of longest allowable cycle spt_idx: int, start vertex for shortest_path_subtree, random if None From "How to project 'circular' manifolds using geodesic distances?" by Lee & Verleysen, ESANN 2004. See also: shortest_path_subtree, minimum_spanning_subtree ''' # make the initial spanning tree graph if spanning_tree == 'mst': tree = self.minimum_spanning_subtree().matrix() elif spanning_tree == 'spt': if spt_idx is None: spt_idx = np.random.choice(self.num_vertices()) tree = self.shortest_path_subtree(spt_idx, directed=False).matrix() # find edges in self but not in the tree potential_edges = np.argwhere(ss.triu(self.matrix() - tree)) # remove edges that induce large cycles ii, jj = _find_cycle_inducers(tree, potential_edges, cycle_len_thresh) return self.remove_edges(ii, jj, symmetric=True, copy=copy)
python
{ "resource": "" }
q40330
fuzz_string
train
def fuzz_string(seed_str, runs=100, fuzz_factor=50): """A random fuzzer for a simulated text viewer application. It takes a string as seed and generates <runs> variant of it. :param seed_str: the string to use as seed for fuzzing. :param runs: number of fuzzed variants to supply. :param fuzz_factor: degree of fuzzing = 1 / fuzz_factor. :return: list of fuzzed variants of seed_str. :rtype: [str] """ buf = bytearray(seed_str, encoding="utf8") variants = [] for _ in range(runs): fuzzed = fuzzer(buf, fuzz_factor) variants.append(''.join([chr(b) for b in fuzzed])) logger().info('Fuzzed strings: {}'.format(variants)) return variants
python
{ "resource": "" }
q40331
fuzzer
train
def fuzzer(buffer, fuzz_factor=101): """Fuzz given buffer. Take a buffer of bytes, create a copy, and replace some bytes with random values. Number of bytes to modify depends on fuzz_factor. This code is taken from Charlie Miller's fuzzer code. :param buffer: the data to fuzz. :type buffer: byte array :param fuzz_factor: degree of fuzzing. :type fuzz_factor: int :return: fuzzed buffer. :rtype: byte array """ buf = deepcopy(buffer) num_writes = number_of_bytes_to_modify(len(buf), fuzz_factor) for _ in range(num_writes): random_byte = random.randrange(256) random_position = random.randrange(len(buf)) buf[random_position] = random_byte return buf
python
{ "resource": "" }
q40332
number_of_bytes_to_modify
train
def number_of_bytes_to_modify(buf_len, fuzz_factor): """Calculate number of bytes to modify. :param buf_len: len of data buffer to fuzz. :param fuzz_factor: degree of fuzzing. :return: number of bytes to change. """ return random.randrange(math.ceil((float(buf_len) / fuzz_factor))) + 1
python
{ "resource": "" }
q40333
FuzzExecutor._fuzz_data_file
train
def _fuzz_data_file(self, data_file): """Generate fuzzed variant of given file. :param data_file: path to file to fuzz. :type data_file: str :return: path to fuzzed file. :rtype: str """ buf = bytearray(open(os.path.abspath(data_file), 'rb').read()) fuzzed = fuzzer(buf, self.fuzz_factor) try: _, fuzz_output = mkstemp(prefix='fuzzed_') open(fuzz_output, 'wb').write(fuzzed) finally: pass return fuzz_output
python
{ "resource": "" }
q40334
FuzzExecutor._execute
train
def _execute(self, app_, file_): """Run app with file as input. :param app_: application to run. :param file_: file to run app with. :return: success True, else False :rtype: bool """ app_name = os.path.basename(app_) args = [app_] args.extend(self.args[app_]) args.append(file_) process = subprocess.Popen(args) time.sleep(1) status = {True: Status.SUCCESS, False: Status.FAILED} crashed = process.poll() result = status[crashed is None] self.stats_.add(app_name, result) if result is Status.SUCCESS: # process did not crash, so just terminate it process.terminate()
python
{ "resource": "" }
q40335
FuzzExecutor.__parse_app_list
train
def __parse_app_list(app_list): """Parse list of apps for arguments. :param app_list: list of apps with optional arguments. :return: list of apps and assigned argument dict. :rtype: [String], {String: [String]} """ args = {} apps = [] for app_str in app_list: parts = app_str.split("&") app_path = parts[0].strip() apps.append(app_path) if len(parts) > 1: args[app_path] = [arg.strip() for arg in parts[1].split()] else: args[app_path] = [] return apps, args
python
{ "resource": "" }
q40336
_make_tonnetz_matrix
train
def _make_tonnetz_matrix(): """Return the tonnetz projection matrix.""" pi = np.pi chroma = np.arange(12) # Define each row of the transform matrix fifth_x = r_fifth*(np.sin((7*pi/6) * chroma)) fifth_y = r_fifth*(np.cos((7*pi/6) * chroma)) minor_third_x = r_minor_thirds*(np.sin(3*pi/2 * chroma)) minor_third_y = r_minor_thirds*(np.cos(3*pi/2 * chroma)) major_third_x = r_major_thirds*(np.sin(2*pi/3 * chroma)) major_third_y = r_major_thirds*(np.cos(2*pi/3 * chroma)) # Return the tonnetz matrix return np.vstack((fifth_x, fifth_y, minor_third_x, minor_third_y, major_third_x, major_third_y))
python
{ "resource": "" }
q40337
_to_tonnetz
train
def _to_tonnetz(chromagram): """Project a chromagram on the tonnetz. Returned value is normalized to prevent numerical instabilities. """ if np.sum(np.abs(chromagram)) == 0.: # The input is an empty chord, return zero. return np.zeros(6) _tonnetz = np.dot(__TONNETZ_MATRIX, chromagram) one_norm = np.sum(np.abs(_tonnetz)) # Non-zero value _tonnetz = _tonnetz / float(one_norm) # Normalize tonnetz vector return _tonnetz
python
{ "resource": "" }
q40338
spawn_managed_host
train
def spawn_managed_host(config_file, manager, connect_on_start=True): """ Spawns a managed host, if it is not already running """ data = manager.request_host_status(config_file) is_running = data['started'] # Managed hosts run as persistent processes, so it may already be running if is_running: host_status = json.loads(data['host']['output']) logfile = data['host']['logfile'] else: data = manager.start_host(config_file) host_status = json.loads(data['output']) logfile = data['logfile'] host = JSHost( status=host_status, logfile=logfile, config_file=config_file, manager=manager ) if not is_running and settings.VERBOSITY >= verbosity.PROCESS_START: print('Started {}'.format(host.get_name())) if connect_on_start: host.connect() return host
python
{ "resource": "" }
q40339
parse_input
train
def parse_input(s): """Parse the given input and intelligently transform it into an absolute, non-naive, timezone-aware datetime object for the UTC timezone. The input can be specified as a millisecond-precision UTC timestamp (or delta against Epoch), with or without a terminating 'L'. Alternatively, the input can be specified as a human-readable delta string with unit-separated segments, like '24d6h4m500' (24 days, 6 hours, 4 minutes and 500ms), as long as the segments are in descending unit span order.""" if isinstance(s, six.integer_types): s = str(s) elif not isinstance(s, six.string_types): raise ValueError(s) original = s if s[-1:] == 'L': s = s[:-1] sign = {'-': -1, '=': 0, '+': 1}.get(s[0], None) if sign is not None: s = s[1:] ts = 0 for unit in _SORTED_UNITS: pos = s.find(unit[0]) if pos == 0: raise ValueError(original) elif pos > 0: # If we find a unit letter, we're dealing with an offset. Default # to positive offset if a sign wasn't specified. if sign is None: sign = 1 ts += int(s[:pos]) * __timedelta_millis(unit[1]) s = s[min(len(s), pos + 1):] if s: ts += int(s) return date_from_utc_ts(ts) if not sign else \ utc() + sign * delta(milliseconds=ts)
python
{ "resource": "" }
q40340
render_date
train
def render_date(date, tz=pytz.utc, fmt=_FULL_OUTPUT_FORMAT): """Format the given date for output. The local time render of the given date is done using the given timezone.""" local = date.astimezone(tz) ts = __date_to_millisecond_ts(date) return fmt.format( ts=ts, utc=date.strftime(_DATE_FORMAT), millis=ts % 1000, utc_tz=date.strftime(_TZ_FORMAT), local=local.strftime(_DATE_FORMAT), local_tz=local.strftime(_TZ_FORMAT), delta=render_delta_from_now(date))
python
{ "resource": "" }
q40341
DescriptiveParser.parse
train
def parse(self, complete=True, args=None): '''Parse a list of arguments, returning a dict See BooleanParser.parse for `args`-related documentation. If `complete` is True and there are values in `args` that don't have corresponding arguments, or there are required arguments that don't have args, then raise an error. ''' opts = dict() positions = [argument.positional for argument in self.arguments if argument.positional] if args is None: import sys # skip over the program name with the [1:] slice args = sys.argv[1:] # arglist is a tuple of (is_flag, name) pairs arglist = peekable(parse_tokens(args)) for is_flag, name in arglist: if is_flag is True: argument = self.find_argument(name) # .peek will return the default argument iff there are no more entries next_is_flag, next_name = arglist.peek(default=(None, None)) # next_is_flag will be None if there are no more items, but True/False if there is a next item # if this argument looks for a subsequent (is set as boolean), and the subsequent is not a flag, consume it if argument.boolean is False and next_is_flag is False: opts[name] = next_name # finally, advance our iterator, but since we already have the next values, just discard it arglist.next() else: # if there is no next, or the next thing is a flag all the boolean=False's in the world can't save you then opts[name] = True else: # add positional argument if len(positions) > 0: # we pop the positions off from the left position = positions.pop(0) opts[position] = name else: # the rest of the args now end up as a list in '_' opts.setdefault('_', []).append(name) # propagate aliases and defaults: for argument in self.arguments: # merge provided value from aliases for name in argument.names: if name in opts: value = opts[name] # we simply break on the first match. break else: # if we iterate through all names and fine none in opts, use the default value = argument.default for name in argument.names: opts[name] = value return opts
python
{ "resource": "" }
q40342
Covariance.perturbParams
train
def perturbParams(self, pertSize=1e-3): """ slightly perturbs the values of the parameters """ params = self.getParams() self.setParams(params+pertSize*sp.randn(params.shape[0]))
python
{ "resource": "" }
q40343
Covariance.Kgrad_param_num
train
def Kgrad_param_num(self,i,h=1e-4): """ check discrepancies between numerical and analytical gradients """ params = self.getParams() e = sp.zeros_like(params); e[i] = 1 self.setParams(params-h*e) C_L = self.K() self.setParams(params+h*e) C_R = self.K() self.setParams(params) RV = (C_R-C_L)/(2*h) return RV
python
{ "resource": "" }
q40344
parallel_snr_func
train
def parallel_snr_func(num, binary_args, phenomdwave, signal_type, noise_interpolants, prefactor, verbose): """SNR calulation with PhenomDWaveforms Generate PhenomDWaveforms and calculate their SNR against sensitivity curves. Args: num (int): Process number. If only a single process, num=0. binary_args (tuple): Binary arguments for :meth:`gwsnrcalc.utils.waveforms.EccentricBinaries.__call__`. phenomdwave (obj): Initialized class of :class:`gwsnrcalc.utils.waveforms.PhenomDWaveforms`. signal_type (list of str): List with types of SNR to calculate. Options are `all` for full wavefrom, `ins` for inspiral, `mrg` for merger, and/or `rd` for ringdown. noise_interpolants (dict): All the noise noise interpolants generated by :mod:`gwsnrcalc.utils.sensitivity`. prefactor (float): Prefactor to multiply SNR by (not SNR^2). verbose (int): Notify each time ``verbose`` processes finish. If -1, then no notification. Returns: (dict): Dictionary with the SNR output from the calculation. """ wave = phenomdwave(*binary_args) out_vals = {} for key in noise_interpolants: hn_vals = noise_interpolants[key](wave.freqs) snr_out = csnr(wave.freqs, wave.hc, hn_vals, wave.fmrg, wave.fpeak, prefactor=prefactor) if len(signal_type) == 1: out_vals[key + '_' + signal_type[0]] = snr_out[signal_type[0]] else: for phase in signal_type: out_vals[key + '_' + phase] = snr_out[phase] if verbose > 0 and (num+1) % verbose == 0: print('Process ', (num+1), 'is finished.') return out_vals
python
{ "resource": "" }
q40345
parallel_ecc_snr_func
train
def parallel_ecc_snr_func(num, binary_args, eccwave, signal_type, noise_interpolants, prefactor, verbose): """SNR calulation with eccentric waveforms Generate PhenomDWaveforms and calculate their SNR against sensitivity curves. Args: num (int): Process number. If only a single process, num=0. binary_args (tuple): Binary arguments for :meth:`gwsnrcalc.utils.waveforms.EccentricBinaries.__call__`. eccwave (obj): Initialized class of :class:`gwsnrcalc.utils.waveforms.EccentricBinaries`. signal_type (list of str): List with types of SNR to calculate. `all` for quadrature sum of modes or `modes` for SNR from each mode. This must be `all` if generating contour data with :mod:`gwsnrcalc.generate_contour_data`. noise_interpolants (dict): All the noise noise interpolants generated by :mod:`gwsnrcalc.utils.sensitivity`. prefactor (float): Prefactor to multiply SNR by (not SNR^2). verbose (int): Notify each time ``verbose`` processes finish. If -1, then no notification. Returns: (dict): Dictionary with the SNR output from the calculation. """ wave = eccwave(*binary_args) out_vals = {} for key in noise_interpolants: hn_vals = noise_interpolants[key](wave.freqs) integrand = 1./wave.freqs * (wave.hc**2/hn_vals**2) snr_squared_out = np.trapz(integrand, x=wave.freqs) if 'modes' in signal_type: out_vals[key + '_' + 'modes'] = prefactor*np.sqrt(snr_squared_out) if 'all' in signal_type: out_vals[key + '_' + 'all'] = prefactor*np.sqrt(np.sum(snr_squared_out, axis=-1)) if verbose > 0 and (num+1) % verbose == 0: print('Process ', (num+1), 'is finished.') return out_vals
python
{ "resource": "" }
q40346
snr
train
def snr(*args, **kwargs): """Compute the SNR of binaries. snr is a function that takes binary parameters and sensitivity curves as inputs, and returns snr for chosen phases. Warning: All binary parameters must be either scalar, len-1 arrays, or arrays of the same length. All of these can be used at once. However, you cannot input multiple arrays of different lengths. Arguments: *args: Arguments for :meth:`gwsnrcalc.utils.pyphenomd.PhenomDWaveforms.__call__` **kwargs: Keyword arguments related to parallel generation (see :class:`gwsnrcalc.utils.parallel`), waveforms (see :class:`gwsnrcalc.utils.pyphenomd`), or sensitivity information (see :class:`gwsnrcalc.utils.sensitivity`). Returns: (dict or list of dict): Signal-to-Noise Ratio dictionary for requested phases. """ squeeze = False max_length = 0 for arg in args: try: length = len(arg) if length > max_length: max_length = length except TypeError: pass if max_length == 0: squeeze = True kwargs['length'] = max_length snr_main = SNR(**kwargs) if squeeze: snr_out = snr_main(*args) return {key: np.squeeze(snr_out[key]) for key in snr_out} return snr_main(*args)
python
{ "resource": "" }
q40347
InferentialParser.parse
train
def parse(self, args=None): '''Parse a list of arguments, returning a dict. Flags are only boolean if they are not followed by a non-flag argument. All positional arguments not associable with a flag will be added to the return dictionary's `['_']` field. ''' opts = dict() if args is None: import sys # skip over the program name with the [1:] slice args = sys.argv[1:] # arglist is a tuple of (is_flag, name) pairs arglist = peekable(parse_tokens(args)) for is_flag, name in arglist: if is_flag is True: # .peek will return the default argument iff there are no more entries next_is_flag, next_name = arglist.peek(default=(None, None)) # next_is_flag will be None if there are no more items, but True/False if there is a next item # if this argument looks for a subsequent (is set as boolean), # and the subsequent is not a flag, consume it if next_is_flag is False: opts[name] = next_name # finally, advance our iterator, but since we already have the next values, just discard it arglist.next() else: # if there is no next thing, or the next thing is a flag, # all the boolean=False's in the world can't save you then opts[name] = True else: # add positional argument opts.setdefault('_', []).append(name) return opts
python
{ "resource": "" }
q40348
sq_dist
train
def sq_dist(X1,X2=None): """ computes a matrix of all pariwise squared distances """ if X2==None: X2 = X1 assert X1.shape[1]==X2.shape[1], 'dimensions do not match' n = X1.shape[0] m = X2.shape[0] d = X1.shape[1] # (X1 - X2)**2 = X1**2 + X2**2 - 2X1X2 X1sq = sp.reshape((X1**2).sum(1),n,1) X2sq = sp.reshape((X2**2).sum(1),m,1) K = sp.tile((X1*X1).sum(1),(m,1)).T + sp.tile((X2*X2).sum(1),(n,1)) - 2*sp.dot(X1,X2.T) return K
python
{ "resource": "" }
q40349
Bond.setdbo
train
def setdbo(self, bond1, bond2, dboval): """Set the double bond orientation for bond1 and bond2 based on this bond""" # this bond must be a double bond if self.bondtype != 2: raise FrownsError("To set double bond order, center bond must be double!") assert dboval in [DX_CHI_CIS, DX_CHI_TRANS, DX_CHI_NO_DBO], "bad dboval value" self.dbo.append(bond1, bond2, dboval)
python
{ "resource": "" }
q40350
save_segments
train
def save_segments(outfile, boundaries, beat_intervals, labels=None): """Save detected segments to a .lab file. :parameters: - outfile : str Path to output file - boundaries : list of int Beat indices of detected segment boundaries - beat_intervals : np.ndarray [shape=(n, 2)] Intervals of beats - labels : None or list of str Labels of detected segments """ if labels is None: labels = [('Seg#%03d' % idx) for idx in range(1, len(boundaries))] times = [beat_intervals[beat, 0] for beat in boundaries[:-1]] times.append(beat_intervals[-1, -1]) with open(outfile, 'w') as f: for idx, (start, end, lab) in enumerate(zip(times[:-1], times[1:], labels), 1): f.write('%.3f\t%.3f\t%s\n' % (start, end, lab))
python
{ "resource": "" }
q40351
reverse
train
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None): """Wraps Django's reverse to prepend the correct locale.""" prefixer = get_url_prefix() if prefixer: prefix = prefix or '/' url = django_reverse(viewname, urlconf, args, kwargs, prefix) if prefixer: url = prefixer.fix(url) # Ensure any unicode characters in the URL are escaped. return iri_to_uri(url)
python
{ "resource": "" }
q40352
Prefixer.get_language
train
def get_language(self): """ Return a locale code we support on the site using the user's Accept-Language header to determine which is best. This mostly follows the RFCs but read bug 439568 for details. """ if 'lang' in self.request.GET: lang = self.request.GET['lang'].lower() if lang in settings.LANGUAGE_URL_MAP: return settings.LANGUAGE_URL_MAP[lang] if self.request.META.get('HTTP_ACCEPT_LANGUAGE'): best = self.get_best_language( self.request.META['HTTP_ACCEPT_LANGUAGE']) if best: return best return settings.LANGUAGE_CODE
python
{ "resource": "" }
q40353
Prefixer.get_best_language
train
def get_best_language(self, accept_lang): """Given an Accept-Language header, return the best-matching language.""" LUM = settings.LANGUAGE_URL_MAP langs = dict(LUM.items() + settings.CANONICAL_LOCALES.items()) # Add missing short locales to the list. This will automatically map # en to en-GB (not en-US), es to es-AR (not es-ES), etc. in alphabetical # order. To override this behavior, explicitly define a preferred locale # map with the CANONICAL_LOCALES setting. langs.update((k.split('-')[0], v) for k, v in LUM.items() if k.split('-')[0] not in langs) try: ranked = parse_accept_lang_header(accept_lang) except ValueError: # see https://code.djangoproject.com/ticket/21078 return else: for lang, _ in ranked: lang = lang.lower() if lang in langs: return langs[lang] pre = lang.split('-')[0] if pre in langs: return langs[pre]
python
{ "resource": "" }
q40354
extensions
train
def extensions(): """Returns list of `cython` extensions for `lazy_cythonize`.""" import numpy from Cython.Build import cythonize ext = [ Extension('phydmslib.numutils', ['phydmslib/numutils.pyx'], include_dirs=[numpy.get_include()], extra_compile_args=['-Wno-unused-function']), ] return cythonize(ext)
python
{ "resource": "" }
q40355
plot_main
train
def plot_main(pid, return_fig_ax=False): """Main function for creating these plots. Reads in plot info dict from json file or dictionary in script. Args: return_fig_ax (bool, optional): Return figure and axes objects. Returns: 2-element tuple containing - **fig** (*obj*): Figure object for customization outside of those in this program. - **ax** (*obj*): Axes object for customization outside of those in this program. """ global WORKING_DIRECTORY, SNR_CUT if isinstance(pid, PlotInput): pid = pid.return_dict() WORKING_DIRECTORY = '.' if 'WORKING_DIRECTORY' not in pid['general'].keys(): pid['general']['WORKING_DIRECTORY'] = '.' SNR_CUT = 5.0 if 'SNR_CUT' not in pid['general'].keys(): pid['general']['SNR_CUT'] = SNR_CUT if "switch_backend" in pid['general'].keys(): plt.switch_backend(pid['general']['switch_backend']) running_process = MakePlotProcess( **{**pid, **pid['general'], **pid['plot_info'], **pid['figure']}) running_process.input_data() running_process.setup_figure() running_process.create_plots() # save or show figure if 'save_figure' in pid['figure'].keys(): if pid['figure']['save_figure'] is True: running_process.fig.savefig( pid['general']['WORKING_DIRECTORY'] + '/' + pid['figure']['output_path'], **pid['figure']['savefig_kwargs']) if 'show_figure' in pid['figure'].keys(): if pid['figure']['show_figure'] is True: plt.show() if return_fig_ax is True: return running_process.fig, running_process.ax return
python
{ "resource": "" }
q40356
execute
train
def execute(connection: connection, statement: str) -> Optional[List[Tuple[str, ...]]]: """Execute PGSQL statement and fetches the statement response. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. statement: str PGSQL statement to run against the database. Returns ------- response: list or None List of tuples, where each tuple represents a formatted line of response from the database engine, where each tuple item roughly corresponds to a column. For instance, while a raw SELECT response might include the table headers, psycopg2 returns only the rows that matched. If no response was given, None is returned. """ response = list() # type: List # See the following link for reasoning behind both with statements: # http://initd.org/psycopg/docs/usage.html#with-statement # # Additionally, the with statement makes this library safer to use with # higher-level libraries (e.g. SQLAlchemy) that don't inherently respect # PostGreSQL's autocommit isolation-level, since the transaction is # properly completed for each statement. with connection: with connection.cursor(cursor_factory=Psycopg2Cursor) as cursor: cursor.execute(statement) connection.commit() # Get response try: response = cursor.fetchall() if not response: # Empty response list log('<No Response>', logger_name=_LOGGER_NAME) return None except ProgrammingError as e: if e.args and e.args[0] == 'no results to fetch': # No response available (i.e. no response given) log('<No Response>', logger_name=_LOGGER_NAME) return None # Some other programming error; re-raise raise e log('Response', logger_name=_LOGGER_NAME) log('--------', logger_name=_LOGGER_NAME) for line in response: log(str(line), logger_name=_LOGGER_NAME) return response
python
{ "resource": "" }
q40357
human_size
train
def human_size(size): """ Return a human-readable representation of a byte size. @param size: Number of bytes as an integer or string. @return: String of length 10 with the formatted result. """ if isinstance(size, string_types): size = int(size, 10) if size < 0: return "-??? bytes" if size < 1024: return "%4d bytes" % size for unit in ("KiB", "MiB", "GiB"): size /= 1024.0 if size < 1024: return "%6.1f %s" % (size, unit) return "%6.1f GiB" % size
python
{ "resource": "" }
q40358
iso_datetime
train
def iso_datetime(timestamp=None): """ Convert UNIX timestamp to ISO datetime string. @param timestamp: UNIX epoch value (default: the current time). @return: Timestamp formatted as "YYYY-mm-dd HH:MM:SS". """ if timestamp is None: timestamp = time.time() return datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:19]
python
{ "resource": "" }
q40359
human_duration
train
def human_duration(time1, time2=None, precision=0, short=False): """ Return a human-readable representation of a time delta. @param time1: Relative time value. @param time2: Time base (C{None} for now; 0 for a duration in C{time1}). @param precision: How many time units to return (0 = all). @param short: Use abbreviations, and right-justify the result to always the same length. @return: Formatted duration. """ if time2 is None: time2 = time.time() duration = (time1 or 0) - time2 direction = ( " ago" if duration < 0 else ("+now" if short else " from now") if time2 else "" ) duration = abs(duration) parts = [ ("weeks", duration // (7*86400)), ("days", duration // 86400 % 7), ("hours", duration // 3600 % 24), ("mins", duration // 60 % 60), ("secs", duration % 60), ] # Kill leading zero parts while len(parts) > 1 and parts[0][1] == 0: parts = parts[1:] # Limit to # of parts given by precision if precision: parts = parts[:precision] numfmt = ("%d", "%d"), ("%4d", "%2d") fmt = "%1.1s" if short else " %s" sep = " " if short else ", " result = sep.join((numfmt[bool(short)][bool(idx)] + fmt) % (val, key[:-1] if val == 1 else key) for idx, (key, val) in enumerate(parts) if val #or (short and precision) ) + direction if not time1: result = "never" if time2 else "N/A" if precision and short: return result.rjust(1 + precision*4 + (4 if time2 else 0)) else: return result
python
{ "resource": "" }
q40360
to_unicode
train
def to_unicode(text): """ Return a decoded unicode string. False values are returned untouched. """ if not text or isinstance(text, unicode if PY2 else str): return text try: # Try UTF-8 first return text.decode("UTF-8") except UnicodeError: try: # Then Windows Latin-1 return text.decode("CP1252") except UnicodeError: # Give up, return byte string in the hope things work out return text
python
{ "resource": "" }
q40361
to_utf8
train
def to_utf8(text): """ Enforce UTF8 encoding. """ # return empty/false stuff unaltered if not text: if isinstance(text, string_types): text = "" return text try: # Is it a unicode string, or pure ascii? return text.encode("utf8") except UnicodeDecodeError: try: # Is it a utf8 byte string? if text.startswith(codecs.BOM_UTF8): text = text[len(codecs.BOM_UTF8):] return text.decode("utf8").encode("utf8") except UnicodeDecodeError: # Check BOM if text.startswith(codecs.BOM_UTF16_LE): encoding = "utf-16le" text = text[len(codecs.BOM_UTF16_LE):] elif text.startswith(codecs.BOM_UTF16_BE): encoding = "utf-16be" text = text[len(codecs.BOM_UTF16_BE):] else: # Assume CP-1252 encoding = "cp1252" try: return text.decode(encoding).encode("utf8") except UnicodeDecodeError as exc: for line in text.splitlines(): try: line.decode(encoding).encode("utf8") except UnicodeDecodeError: log.warn("Cannot transcode the following into UTF8 cause of %s: %r" % (exc, line)) break return text
python
{ "resource": "" }
q40362
MeanKronSum.setDesigns
train
def setDesigns(self, F, A): """ set fixed effect designs """ F = to_list(F) A = to_list(A) assert len(A) == len(F), 'MeanKronSum: A and F must have same length!' n_terms = len(F) n_covs = 0 k = 0 l = 0 for ti in range(n_terms): assert F[ti].shape[0] == self._N, 'MeanKronSum: Dimension mismatch' assert A[ti].shape[1] == self._P, 'MeanKronSum: Dimension mismatch' n_covs += F[ti].shape[1] * A[ti].shape[0] k += F[ti].shape[1] l += A[ti].shape[0] self._n_terms = n_terms self._n_covs = n_covs self._k = k self._l = l self._F = F self._A = A self._b = sp.zeros((n_covs, 1)) self.clear_cache('predict_in_sample', 'Yres', 'designs') self._notify('designs') self._notify()
python
{ "resource": "" }
q40363
toRanks
train
def toRanks(A): """ converts the columns of A to ranks """ AA=sp.zeros_like(A) for i in range(A.shape[1]): AA[:,i] = st.rankdata(A[:,i]) AA=sp.array(sp.around(AA),dtype="int")-1 return AA
python
{ "resource": "" }
q40364
regressOut
train
def regressOut(Y, X, return_b=False): """ regresses out X from Y """ Xd = la.pinv(X) b = Xd.dot(Y) Y_out = Y-X.dot(b) if return_b: return Y_out, b else: return Y_out
python
{ "resource": "" }
q40365
remove_dependent_cols
train
def remove_dependent_cols(M, tol=1e-6, display=False): """ Returns a matrix where dependent columsn have been removed """ R = la.qr(M, mode='r')[0][:M.shape[1], :] I = (abs(R.diagonal())>tol) if sp.any(~I) and display: print(('cols ' + str(sp.where(~I)[0]) + ' have been removed because linearly dependent on the others')) R = M[:,I] else: R = M.copy() return R
python
{ "resource": "" }
q40366
bias
train
def bias(mass, z, h=h, Om_M=Om_M, Om_L=Om_L): """Calculate halo bias, from Seljak & Warren 2004. Parameters ---------- mass : ndarray or float Halo mass to calculate bias for. z : ndarray or float Halo z, same type and size as mass. h : float, optional Hubble parameter, defaults to astropy.cosmology.Planck13.h Om_M : float, optional Fractional matter density, defaults to astropy.cosmology.Planck13.Om0 Om_L : float, optional Fractional dark energy density, defaults to 1-Om_M. Returns ---------- ndarray or float Returns the halo bias, of same type and size as input mass_halo and z_halo. Calculated according to Seljak & Warren 2004 for z = 0. For halo z > 0, the non-linear mass is adjusted using the input cosmological parameters. References ---------- Based on fitting formula derived from simulations in: U. Seljak and M.S. Warren, "Large-scale bias and stochasticity of haloes and dark matter," Monthly Notices of the Royal Astronomical Society, Volume 355, Issue 1, pp. 129-136 (2004). """ M_nl_0 = (8.73 / h) * (10. ** 12.) # nonlinear mass today [M_sun] M_nl = M_nl_0 * (Om_M + Om_L / ((1. + z) ** 3.)) # scaled to z_lens x = mass / M_nl b = 0.53 + 0.39 * (x ** 0.45) + 0.13 / (40. * x + 1.) + (5.e-4) * (x ** 1.5) return b
python
{ "resource": "" }
q40367
Confirmation.is_timeout
train
def is_timeout(self): ''' Check if the lapse between initialization and now is more than ``self.timeout``. ''' lapse = datetime.datetime.now() - self.init_time return lapse > datetime.timedelta(seconds=self.timeout)
python
{ "resource": "" }
q40368
improvise
train
def improvise(oracle, seq_len, k=1, LRS=0, weight=None, continuity=1): """ Given an oracle and length, generate an improvised sequence of the given length. :param oracle: an indexed vmo object :param seq_len: the length of the returned improvisation sequence :param k: the starting improvisation time step in oracle :param LRS: the length of minimum longest repeated suffixes allowed to jump :param weight: if None, jump to possible candidate time step uniformly, if "lrs", the probability is proportional to the LRS of each candidate time step :param continuity: the number of time steps guaranteed to continue before next jump is executed :return: the improvised sequence """ s = [] if k + continuity < oracle.n_states - 1: s.extend(range(k, k + continuity)) k = s[-1] seq_len -= continuity while seq_len > 0: s.append(improvise_step(oracle, k, LRS, weight)) k = s[-1] if k + 1 < oracle.n_states - 1: k += 1 else: k = 1 if k + continuity < oracle.n_states - 1: s.extend(range(k, k + continuity)) seq_len -= continuity k = s[-1] seq_len -= 1 return s
python
{ "resource": "" }
q40369
_make_win
train
def _make_win(n, mono=False): """ Generate a window for a given length. :param n: an integer for the length of the window. :param mono: True for a mono window, False for a stereo window. :return: an numpy array containing the window value. """ if mono: win = np.hanning(n) + 0.00001 else: win = np.array([np.hanning(n) + 0.00001, np.hanning(n) + 0.00001]) win = np.transpose(win) return win
python
{ "resource": "" }
q40370
Walker.filter
train
def filter(self, node, condition): """ This method accepts a node and the condition function; a generator will be returned to yield the nodes that got matched by the condition. """ if not isinstance(node, Node): raise TypeError('not a node') for child in node: if condition(child): yield child for subchild in self.filter(child, condition): yield subchild
python
{ "resource": "" }
q40371
Walker.extract
train
def extract(self, node, condition, skip=0): """ Extract a single node that matches the provided condition, otherwise a TypeError is raised. An optional skip parameter can be provided to specify how many matching nodes are to be skipped over. """ for child in self.filter(node, condition): if not skip: return child skip -= 1 raise TypeError('no match found')
python
{ "resource": "" }
q40372
ReprWalker.walk
train
def walk( self, node, omit=( 'lexpos', 'lineno', 'colno', 'rowno'), indent=0, depth=-1, pos=False, _level=0): """ Accepts the standard node argument, along with an optional omit flag - it should be an iterable that lists out all attributes that should be omitted from the repr output. """ if not depth: return '<%s ...>' % node.__class__.__name__ attrs = [] children = node.children() ids = {id(child) for child in children} indentation = ' ' * (indent * (_level + 1)) header = '\n' + indentation if indent else '' joiner = ',\n' + indentation if indent else ', ' tailer = '\n' + ' ' * (indent * _level) if indent else '' for k, v in vars(node).items(): if k.startswith('_'): continue if id(v) in ids: ids.remove(id(v)) if isinstance(v, Node): attrs.append((k, self.walk( v, omit, indent, depth - 1, pos, _level))) elif isinstance(v, list): items = [] for i in v: if id(i) in ids: ids.remove(id(i)) items.append(self.walk( i, omit, indent, depth - 1, pos, _level + 1)) attrs.append( (k, '[' + header + joiner.join(items) + tailer + ']')) else: attrs.append((k, repr_compat(v))) if ids: # for unnamed child nodes. attrs.append(('?children', '[' + header + joiner.join( self.walk(child, omit, indent, depth - 1, pos, _level + 1) for child in children if id(child) in ids) + tailer + ']')) position = ('@%s:%s ' % ( '?' if node.lineno is None else node.lineno, '?' if node.colno is None else node.colno, ) if pos else '') omit_keys = () if not omit else set(omit) return '<%s %s%s>' % (node.__class__.__name__, position, ', '.join( '%s=%s' % (k, v) for k, v in sorted(attrs) if k not in omit_keys ))
python
{ "resource": "" }
q40373
_prune_edges
train
def _prune_edges(G, X, traj_lengths, pruning_thresh=0.1, verbose=False): '''Prune edges in graph G via cosine distance with trajectory edges.''' W = G.matrix('dense', copy=True) degree = G.degree(kind='out', weighted=False) i = 0 num_bad = 0 for n in traj_lengths: s, t = np.nonzero(W[i:i+n-1]) graph_edges = X[t] - X[s+i] traj_edges = np.diff(X[i:i+n], axis=0) traj_edges = np.repeat(traj_edges, degree[i:i+n-1], axis=0) theta = paired_distances(graph_edges, traj_edges, 'cosine') bad_edges = theta > pruning_thresh s, t = s[bad_edges], t[bad_edges] if verbose: # pragma: no cover num_bad += np.count_nonzero(W[s,t]) W[s,t] = 0 i += n if verbose: # pragma: no cover print('removed %d bad edges' % num_bad) return Graph.from_adj_matrix(W)
python
{ "resource": "" }
q40374
Waterfall.make_plot
train
def make_plot(self): """This method creates the waterfall plot. """ # sets levels of main contour plot colors1 = ['None', 'darkblue', 'blue', 'deepskyblue', 'aqua', 'greenyellow', 'orange', 'red', 'darkred'] if len(self.contour_vals) > len(colors1) + 1: raise AttributeError("Reduce number of contours.") # produce filled contour of SNR sc = self.axis.contourf(self.xvals[0], self.yvals[0], self.zvals[0], levels=np.asarray(self.contour_vals), colors=colors1) self.colorbar.setup_colorbars(sc) # check for user desire to show separate contour line if self.snr_contour_value is not None: self.axis.contour(self.xvals[0], self.yvals[0], self.zvals[0], np.array([self.snr_contour_value]), colors='white', linewidths=1.5, linestyles='dashed') return
python
{ "resource": "" }
q40375
Ratio.make_plot
train
def make_plot(self): """Creates the ratio plot. """ # sets colormap for ratio comparison plot cmap = getattr(cm, self.colormap) # set values of ratio comparison contour normval = 2.0 num_contours = 40 # must be even levels = np.linspace(-normval, normval, num_contours) norm = colors.Normalize(-normval, normval) # find Loss/Gain contour and Ratio contour self.set_comparison() diff_out, loss_gain_contour = self.find_difference_contour() cmap.set_bad(color='white', alpha=0.001) # plot ratio contours sc = self.axis.contourf(self.xvals[0], self.yvals[0], diff_out, levels=levels, norm=norm, extend='both', cmap=cmap) self.colorbar.setup_colorbars(sc) # toggle line contours of orders of magnitude of ratio comparisons if self.order_contour_lines: self.axis.contour(self.xvals[0], self.yvals[0], diff_out, np.array( [-2.0, -1.0, 1.0, 2.0]), colors='black', linewidths=1.0) # plot loss gain contour if self.loss_gain_status is True: # if there is no loss/gain contours, this will produce an error, # so we catch the exception. try: # make hatching cs = self.axis.contourf(self.xvals[0], self.yvals[0], loss_gain_contour, levels=[-2, -0.5, 0.5, 2], colors='none', hatches=['x', None, '+']) # make loss/gain contour outline self.axis.contour(self.xvals[0], self.yvals[0], loss_gain_contour, 3, colors='black', linewidths=2) except ValueError: pass if self.add_legend: loss_patch = Patch(fill=None, label='Loss', hatch='x', linestyle='--', linewidth=2) gain_patch = Patch(fill=None, label='Gain', hatch='+', linestyle='-', linewidth=2) legend = self.axis.legend(handles=[loss_patch, gain_patch], **self.legend_kwargs) return
python
{ "resource": "" }
q40376
Ratio.set_comparison
train
def set_comparison(self): """Defines the comparison values for the ratio. This function is added for easier modularity. """ self.comp1 = self.zvals[0] self.comp2 = self.zvals[1] return
python
{ "resource": "" }
q40377
Horizon.make_plot
train
def make_plot(self): """Make the horizon plot. """ self.get_contour_values() # sets levels of main contour plot colors1 = ['blue', 'green', 'red', 'purple', 'orange', 'gold', 'magenta'] # set contour value. Default is SNR_CUT. self.snr_contour_value = (self.SNR_CUT if self.snr_contour_value is None else self.snr_contour_value) # plot contours for j in range(len(self.zvals)): hz = self.axis.contour(self.xvals[j], self.yvals[j], self.zvals[j], np.array([self.snr_contour_value]), colors=colors1[j], linewidths=1., linestyles='solid') # plot invisible lines for purpose of creating a legend if self.legend_labels != []: # plot a curve off of the grid with same color for legend label. self.axis.plot([0.1, 0.2], [0.1, 0.2], color=colors1[j], label=self.legend_labels[j]) if self.add_legend: self.axis.legend(**self.legend_kwargs) return
python
{ "resource": "" }
q40378
AnalysisMixin.bandwidth
train
def bandwidth(self): """Computes the 'bandwidth' of a graph.""" return np.abs(np.diff(self.pairs(), axis=1)).max()
python
{ "resource": "" }
q40379
AnalysisMixin.profile
train
def profile(self): """Measure of bandedness, also known as 'envelope size'.""" leftmost_idx = np.argmax(self.matrix('dense').astype(bool), axis=0) return (np.arange(self.num_vertices()) - leftmost_idx).sum()
python
{ "resource": "" }
q40380
AnalysisMixin.eccentricity
train
def eccentricity(self, directed=None, weighted=None): '''Maximum distance from each vertex to any other vertex.''' sp = self.shortest_path(directed=directed, weighted=weighted) return sp.max(axis=0)
python
{ "resource": "" }
q40381
LabelMixin.color_greedy
train
def color_greedy(self): '''Returns a greedy vertex coloring as an array of ints.''' n = self.num_vertices() coloring = np.zeros(n, dtype=int) for i, nbrs in enumerate(self.adj_list()): nbr_colors = set(coloring[nbrs]) for c in count(1): if c not in nbr_colors: coloring[i] = c break return coloring
python
{ "resource": "" }
q40382
LabelMixin.bicolor_spectral
train
def bicolor_spectral(self): '''Returns an approximate 2-coloring as an array of booleans. From "A Multiscale Pyramid Transform for Graph Signals" by Shuman et al. Note: Assumes a single connected component, and may fail otherwise. ''' lap = self.laplacian().astype(float) vals, vecs = eigs(lap, k=1, which='LM') vec = vecs[:,0].real return vec > 0 if vec[0] > 0 else vec < 0
python
{ "resource": "" }
q40383
LabelMixin.classify_nearest
train
def classify_nearest(self, partial_labels): '''Simple semi-supervised classification, by assigning unlabeled vertices the label of nearest labeled vertex. partial_labels: (n,) array of integer labels, -1 for unlabeled. ''' labels = np.array(partial_labels, copy=True) unlabeled = labels == -1 # compute geodesic distances from unlabeled vertices D_unlabeled = self.shortest_path(weighted=True)[unlabeled] # set distances to other unlabeled vertices to infinity D_unlabeled[:,unlabeled] = np.inf # find shortest distances to labeled vertices idx = D_unlabeled.argmin(axis=1) # apply the label of the closest vertex labels[unlabeled] = labels[idx] return labels
python
{ "resource": "" }
q40384
LabelMixin.classify_lgc
train
def classify_lgc(self, partial_labels, kernel='rbf', alpha=0.2, tol=1e-3, max_iter=30): '''Iterative label spreading for semi-supervised classification. partial_labels: (n,) array of integer labels, -1 for unlabeled. kernel: one of {'none', 'rbf', 'binary'}, for reweighting edges. alpha: scalar, clamping factor. tol: scalar, convergence tolerance. max_iter: integer, cap on the number of iterations performed. From "Learning with local and global consistency" by Zhou et al. in 2004. Based on the LabelSpreading implementation in scikit-learn. ''' # compute the gram matrix gram = -self.kernelize(kernel).laplacian(normed=True) if ss.issparse(gram): gram.data[gram.row == gram.col] = 0 else: np.fill_diagonal(gram, 0) # initialize label distributions partial_labels = np.asarray(partial_labels) unlabeled = partial_labels == -1 label_dists, classes = _onehot(partial_labels, mask=~unlabeled) # initialize clamping terms clamp_weights = np.where(unlabeled, alpha, 1)[:,None] y_static = label_dists * min(1 - alpha, 1) # iterate for it in range(max_iter): old_label_dists = label_dists label_dists = gram.dot(label_dists) label_dists *= clamp_weights label_dists += y_static # check convergence if np.abs(label_dists - old_label_dists).sum() <= tol: break else: warnings.warn("classify_lgc didn't converge in %d iterations" % max_iter) return classes[label_dists.argmax(axis=1)]
python
{ "resource": "" }
q40385
LabelMixin.classify_harmonic
train
def classify_harmonic(self, partial_labels, use_CMN=True): '''Harmonic function method for semi-supervised classification, also known as the Gaussian Mean Fields algorithm. partial_labels: (n,) array of integer labels, -1 for unlabeled. use_CMN : when True, apply Class Mass Normalization From "Semi-Supervised Learning Using Gaussian Fields and Harmonic Functions" by Zhu, Ghahramani, and Lafferty in 2003. Based on the matlab code at: http://pages.cs.wisc.edu/~jerryzhu/pub/harmonic_function.m ''' # prepare labels labels = np.array(partial_labels, copy=True) unlabeled = labels == -1 # convert known labels to one-hot encoding fl, classes = _onehot(labels[~unlabeled]) L = self.laplacian(normed=False) if ss.issparse(L): L = L.tocsr()[unlabeled].toarray() else: L = L[unlabeled] Lul = L[:,~unlabeled] Luu = L[:,unlabeled] fu = -np.linalg.solve(Luu, Lul.dot(fl)) if use_CMN: scale = (1 + fl.sum(axis=0)) / fu.sum(axis=0) fu *= scale # assign new labels labels[unlabeled] = classes[fu.argmax(axis=1)] return labels
python
{ "resource": "" }
q40386
_checkParam
train
def _checkParam(param, value, paramlimits, paramtypes): """Checks if `value` is allowable value for `param`. Raises except if `value` is not acceptable, otherwise returns `None` if value is acceptable. `paramlimits` and `paramtypes` are the `PARAMLIMITS` and `PARAMTYPES` attributes of a `Model`. """ assert param in paramlimits, "Invalid param: {0}".format(param) (lowlim, highlim) = paramlimits[param] paramtype = paramtypes[param] if isinstance(paramtype, tuple): (paramtype, paramshape) = paramtype if not (isinstance(value, paramtype)): raise ValueError("{0} must be {1}, not {2}".format( param, paramtype, type(param))) if value.shape != paramshape: raise ValueError("{0} must have shape {1}, not {2}".format( param, paramshape, value.shape)) if value.dtype != 'float': raise ValueError("{0} must have dtype float, not {1}".format( param, value.dtype)) if not ((lowlim <= value).all() and (value <= highlim).all()): raise ValueError("{0} must be >= {1} and <= {2}, not {3}".format( param, lowlim, highlim, value)) else: if not isinstance(value, paramtype): raise ValueError("{0} must be a {1}, not a {2}".format( param, paramtype, type(value))) if not (lowlim <= value <= highlim): raise ValueError("{0} must be >= {1} and <= {2}, not {3}".format( param, lowlim, highlim, value))
python
{ "resource": "" }
q40387
DiscreteGamma
train
def DiscreteGamma(alpha, beta, ncats): """Returns category means for discretized gamma distribution. The distribution is evenly divided into categories, and the mean of each category is returned. Args: `alpha` (`float` > 0) Shape parameter of gamma distribution. `beta` (`float` > 0) Inverse scale parameter of gamma distribution. `ncats` (`int` > 1) Number of categories in discretized gamma distribution. Returns: `catmeans` (`scipy.ndarray` of `float`, shape `(ncats,)`) `catmeans[k]` is the mean of category `k` where `0 <= k < ncats`. Check that we get values in Figure 1 of Yang, J Mol Evol, 39:306-314 >>> catmeans = DiscreteGamma(0.5, 0.5, 4) >>> scipy.allclose(catmeans, scipy.array([0.0334, 0.2519, 0.8203, 2.8944]), atol=1e-4) True Make sure we get expected mean of alpha / beta >>> alpha = 0.6 >>> beta = 0.8 >>> ncats = 6 >>> catmeans = DiscreteGamma(alpha, beta, ncats) >>> scipy.allclose(catmeans.sum() / ncats, alpha / beta) True """ alpha = float(alpha) beta = float(beta) assert alpha > 0 assert beta > 0 assert ncats > 1 scale = 1.0 / beta catmeans = scipy.ndarray(ncats, dtype='float') for k in range(ncats): if k == 0: lower = 0.0 gammainc_lower = 0.0 else: lower = upper gammainc_lower = gammainc_upper if k == ncats - 1: upper = float('inf') gammainc_upper = 1.0 else: upper = scipy.stats.gamma.ppf((k + 1) / float(ncats), alpha, scale=scale) gammainc_upper = scipy.special.gammainc(alpha + 1, upper * beta) catmeans[k] = alpha * ncats * (gammainc_upper - gammainc_lower) / beta assert scipy.allclose(catmeans.sum() / ncats, alpha / beta), ( "catmeans is {0}, mean of catmeans is {1}, expected mean " "alpha / beta = {2} / {3} = {4}").format(catmeans, catmeans.sum() / ncats, alpha, beta, alpha / beta) return catmeans
python
{ "resource": "" }
q40388
ExpCM.PARAMLIMITS
train
def PARAMLIMITS(self, value): """Set new `PARAMLIMITS` dictionary.""" assert set(value.keys()) == set(self.PARAMLIMITS.keys()), "The \ new parameter limits are not defined for the same set \ of parameters as before." for param in value.keys(): assert value[param][0] < value[param][1], "The new \ minimum value for {0}, {1}, is equal to or \ larger than the new maximum value, {2}"\ .format(param, value[param][0], value[param][1]) self._PARAMLIMITS = value.copy()
python
{ "resource": "" }
q40389
ExpCM._eta_from_phi
train
def _eta_from_phi(self): """Update `eta` using current `phi`.""" self.eta = scipy.ndarray(N_NT - 1, dtype='float') etaprod = 1.0 for w in range(N_NT - 1): self.eta[w] = 1.0 - self.phi[w] / etaprod etaprod *= self.eta[w] _checkParam('eta', self.eta, self.PARAMLIMITS, self.PARAMTYPES)
python
{ "resource": "" }
q40390
ExpCM._update_phi
train
def _update_phi(self): """Update `phi` using current `eta`.""" etaprod = 1.0 for w in range(N_NT - 1): self.phi[w] = etaprod * (1 - self.eta[w]) etaprod *= self.eta[w] self.phi[N_NT - 1] = etaprod
python
{ "resource": "" }
q40391
ExpCM._update_Qxy
train
def _update_Qxy(self): """Update `Qxy` using current `kappa` and `phi`.""" for w in range(N_NT): scipy.copyto(self.Qxy, self.phi[w], where=CODON_NT_MUT[w]) self.Qxy[CODON_TRANSITION] *= self.kappa
python
{ "resource": "" }
q40392
ExpCM._update_pi_vars
train
def _update_pi_vars(self): """Update variables that depend on `pi`. These are `pi_codon`, `ln_pi_codon`, `piAx_piAy`, `piAx_piAy_beta`, `ln_piAx_piAy_beta`. Update using current `pi` and `beta`.""" with scipy.errstate(divide='raise', under='raise', over='raise', invalid='raise'): for r in range(self.nsites): self.pi_codon[r] = self.pi[r][CODON_TO_AA] pim = scipy.tile(self.pi_codon[r], (N_CODON, 1)) # [x][y] is piAy self.piAx_piAy[r] = pim.transpose() / pim self.ln_pi_codon = scipy.log(self.pi_codon) self.piAx_piAy_beta = self.piAx_piAy**self.beta self.ln_piAx_piAy_beta = scipy.log(self.piAx_piAy_beta)
python
{ "resource": "" }
q40393
ExpCM._update_Frxy
train
def _update_Frxy(self): """Update `Frxy` from `piAx_piAy_beta`, `ln_piAx_piAy_beta`, `omega`, `beta`.""" self.Frxy.fill(1.0) self.Frxy_no_omega.fill(1.0) with scipy.errstate(divide='raise', under='raise', over='raise', invalid='ignore'): scipy.copyto(self.Frxy_no_omega, -self.ln_piAx_piAy_beta / (1 - self.piAx_piAy_beta), where=scipy.logical_and( CODON_NONSYN, scipy.fabs(1 - self.piAx_piAy_beta) > ALMOST_ZERO)) scipy.copyto(self.Frxy, self.Frxy_no_omega * self.omega, where=CODON_NONSYN)
python
{ "resource": "" }
q40394
ExpCM._update_Prxy
train
def _update_Prxy(self): """Update `Prxy` using current `Frxy` and `Qxy`.""" self.Prxy = self.Frxy * self.Qxy _fill_diagonals(self.Prxy, self._diag_indices)
python
{ "resource": "" }
q40395
ExpCM._update_Prxy_diag
train
def _update_Prxy_diag(self): """Update `D`, `A`, `Ainv` from `Prxy`, `prx`.""" for r in range(self.nsites): pr_half = self.prx[r]**0.5 pr_neghalf = self.prx[r]**-0.5 #symm_pr = scipy.dot(scipy.diag(pr_half), scipy.dot(self.Prxy[r], scipy.diag(pr_neghalf))) symm_pr = (pr_half * (self.Prxy[r] * pr_neghalf).transpose()).transpose() # assert scipy.allclose(symm_pr, symm_pr.transpose()) (evals, evecs) = scipy.linalg.eigh(symm_pr) # assert scipy.allclose(scipy.linalg.inv(evecs), evecs.transpose()) # assert scipy.allclose(symm_pr, scipy.dot(evecs, scipy.dot(scipy.diag(evals), evecs.transpose()))) self.D[r] = evals self.Ainv[r] = evecs.transpose() * pr_half self.A[r] = (pr_neghalf * evecs.transpose()).transpose()
python
{ "resource": "" }
q40396
ExpCM._update_prx
train
def _update_prx(self): """Update `prx` from `phi`, `pi_codon`, and `beta`.""" qx = scipy.ones(N_CODON, dtype='float') for j in range(3): for w in range(N_NT): qx[CODON_NT[j][w]] *= self.phi[w] frx = self.pi_codon**self.beta self.prx = frx * qx with scipy.errstate(divide='raise', under='raise', over='raise', invalid='raise'): for r in range(self.nsites): self.prx[r] /= self.prx[r].sum()
python
{ "resource": "" }
q40397
ExpCM.spielman_wr
train
def spielman_wr(self, norm=True): """Returns a list of site-specific omega values calculated from the `ExpCM`. Args: `norm` (bool) If `True`, normalize the `omega_r` values by the ExpCM gene-wide `omega`. Returns: `wr` (list) list of `omega_r` values of length `nsites` Following `Spielman and Wilke, MBE, 32:1097-1108 <https://doi.org/10.1093/molbev/msv003>`_, we can predict the `dN/dS` value for each site `r`, :math:`\\rm{spielman}\\omega_r`, from the `ExpCM`. When `norm` is `False`, the `omega_r` values are defined as :math:`\\rm{spielman}\\omega_r = \\frac{\\sum_x \\sum_{y \\in N_x}p_{r,x}\ P_{r,xy}}{\\sum_x \\sum_{y \\in Nx}p_{r,x}Q_{xy}}`, where `r,x,y`, :math:`p_{r,x}`, :math:`P_{r,xy}`, and :math:`Q_{x,y}` have the same definitions as in the main `ExpCM` doc string and :math:`N_{x}` is the set of codons which are non-synonymous to codon `x` and differ from `x` by one nucleotide. When `norm` is `True`, the `omega_r` values above are divided by the ExpCM `omega` value.""" wr = [] for r in range(self.nsites): num = 0 den = 0 for i in range(N_CODON): j = scipy.intersect1d(scipy.where(CODON_SINGLEMUT[i]==True)[0], scipy.where(CODON_NONSYN[i]==True)[0]) p_i = self.stationarystate[r][i] P_xy = self.Prxy[r][i][j].sum() if norm: P_xy = P_xy/self.omega Q_xy = self.Qxy[i][j].sum() num += (p_i * P_xy) den += (p_i * Q_xy) result = num/den wr.append(result) return wr
python
{ "resource": "" }
q40398
ExpCM_fitprefs.dlogprior
train
def dlogprior(self, param): """Value of derivative of prior depends on value of `prior`.""" assert param in self.freeparams, "Invalid param: {0}".format(param) return self._dlogprior[param]
python
{ "resource": "" }
q40399
ExpCM_empirical_phi._update_phi
train
def _update_phi(self): """Compute `phi`, `dphi_dbeta`, and `eta` from `g` and `frxy`.""" self.phi = self._compute_empirical_phi(self.beta) _checkParam('phi', self.phi, self.PARAMLIMITS, self.PARAMTYPES) self._eta_from_phi() dbeta = 1.0e-3 self.dphi_dbeta = scipy.misc.derivative(self._compute_empirical_phi, self.beta, dx=dbeta, n=1, order=5) dphi_dbeta_halfdx = scipy.misc.derivative(self._compute_empirical_phi, self.beta, dx=dbeta / 2, n=1, order=5) assert scipy.allclose(self.dphi_dbeta, dphi_dbeta_halfdx, atol=1e-5, rtol=1e-4), ("The numerical derivative dphi_dbeta differs " "considerably in value for step dbeta = {0} and a step " "half that size, giving values of {1} and {2}.").format( dbeta, self.dphi_dbeta, dphi_dbeta_halfdx)
python
{ "resource": "" }