_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q40100
check_units_and_type
train
def check_units_and_type(input, expected_units, num=None, is_scalar=False): """Check whether variable has expected units and type. If input does not have units and expected units is not None, then the output will be assigned those units. If input has units that conflict with expected units a ValueError will be raised. Parameters ---------- input : array_like or float Variable that will be checked for units and type. Variable should be 1D or scalar. expected_units : astropy.units or None Unit expected for input. num : int, optional Length expected for input, if it is an array or list. is_scalar : bool, optional Sets whether the input is a scalar quantity. Default is False for array_like inputs; set is_scalar=True to check scalar units only. Returns ---------- ndarray or float, with astropy.units Returns the input array or scalar with expected units, unless a conflict of units or array length occurs, which raise errors. """ if hasattr(input, 'unit'): # check units if expected_units is None: raise ValueError('Expecting dimensionless input') elif input.unit != expected_units: raise ValueError('Expecting input units of ' + str(expected_units)) else: dimensionless = input.value else: dimensionless = input # check its a 1D array and/or convert list to array if is_scalar is False: dimensionfull = check_array_or_list(dimensionless) else: dimensionfull = dimensionless # include units if appropriate if expected_units is not None: dimensionfull = dimensionfull * expected_units # check array length if appropriate if num is not None: check_input_size(dimensionfull, num) return dimensionfull
python
{ "resource": "" }
q40101
check_array_or_list
train
def check_array_or_list(input): """Return 1D ndarray, if input can be converted and elements are non-negative.""" if type(input) != np.ndarray: if type(input) == list: output = np.array(input) else: raise TypeError('Expecting input type as ndarray or list.') else: output = input if output.ndim != 1: raise ValueError('Input array must have 1 dimension.') if np.sum(output < 0.) > 0: raise ValueError("Input array values cannot be negative.") return output
python
{ "resource": "" }
q40102
print_object_attributes
train
def print_object_attributes( thing, heading=None, file=None ): ''' Print the attribute names in thing vertically ''' if heading : print( '==', heading, '==', file=file ) print( '\n'.join( object_attributes( thing ) ), file=file )
python
{ "resource": "" }
q40103
_get_a_code_object_from
train
def _get_a_code_object_from( thing ) : ''' Given a thing that might be a property, a class method, a function or a code object, reduce it to code object. If we cannot, return the thing itself. ''' # If we were passed a Method wrapper, get its function if isinstance( thing, types.MethodType ) : thing = thing.__func__ # If we were passed a property object, get its getter function # (no direct support for the fdel or fset functions) if hasattr( thing, 'fget' ) : thing = thing.fget # If we were passed, or now have, a function, get its code object. if isinstance( thing, types.FunctionType ) : thing = thing.__code__ # We should now have a code object, or will never have it. return thing
python
{ "resource": "" }
q40104
printcodelist
train
def printcodelist(thing, to=sys.stdout, heading=None): ''' Write the lines of the codelist string list to the given file, or to the default output. A little Python 3 problem: if the to-file is in binary mode, we need to encode the strings, else a TypeError will be raised. Obvious answer, test for 'b' in to.mode? Nope, only "real" file objects have a mode attribute. StringIO objects, and the variant StringIO used as default sys.stdout, do not have .mode. However, all file-like objects that support string output DO have an encoding attribute. (StringIO has one that is an empty string, but it exists.) So, if hasattr(to,'encoding'), just shove the whole string into it. Otherwise, encode the string utf-8 and shove that bytestring into it. (See? Python 3 not so hard...) ''' # If we were passed a list, assume that it is a CodeList or # a manually-assembled list of code tuples. if not isinstance( thing, list ) : # Passed something else. Reduce it to a CodeList. if isinstance( thing, Code ): thing = thing.code else : # Convert various sources to a code object. thing = _get_a_code_object_from( thing ) try : thing = Code.from_code( thing ).code except Exception as e: raise ValueError('Invalid input to printcodelist') # We have a CodeList or equivalent, # get the whole disassembly as a string. whole_thang = str( thing ) # if destination not a text file, encode it to bytes if not hasattr( to, 'encoding' ) : whole_thang = whole_thang.encode( 'UTF-8' ) if heading : # is not None or empty heading = heading.encode( 'UTF-8' ) # send it on its way if heading : to.write( '===' + heading + '===\n' ) to.write( whole_thang )
python
{ "resource": "" }
q40105
Code._findlinestarts
train
def _findlinestarts(code_object): """ Find the offsets in a byte code which are the start of source lines. Generate pairs (offset, lineno) as described in Python/compile.c. This is a modified version of dis.findlinestarts. This version allows multiple "line starts" with the same line number. (The dis version conditions its yield on a test "if lineno != lastlineno".) FYI: code.co_lnotab is a byte array with one pair of bytes for each effective source line number in the bytecode. An effective line is one that generates code: not blank or comment lines. The first actual line number, typically the number of the "def" statement, is in code.co_firstlineno. An even byte of co_lnotab is the offset to the bytecode generated from the next effective line number. The following odd byte is an increment on the previous line's number to the next line's number. Thus co_firstlineno+co_lnotab[1] is the first effective line's number, and co_lnotab[0] is the number of bytes it generated. Note that an effective line number generates code by definition, hence the even byte cannot be zero; and as line numbers are monotonically increasing, the odd byte cannot be zero either. But what, the curious reader might ask, does Python do if a source line generates more than 255 bytes of code? In that *highly* unlikely case compile.c generates multiple pairs of (255,0) until it has accounted for all the generated code, then a final pair of (offset%256, lineincr). Oh, but what, the curious reader asks, do they do if there is a gap of more than 255 between effective line numbers? It is not unheard of to find blocks of comments larger than 255 lines (like this one?). Then compile.c generates pairs of (0, 255) until it has accounted for the line number difference and a final pair of (offset,lineincr%256). Uh, but...? Yes, what now, annoying reader? Well, does the following code handle these special cases of (255,0) and (0,255) properly? It handles the (0,255) case correctly, because of the "if byte_incr" test which skips the yield() but increments lineno. It does not handle the case of (255,0) correctly; it will yield false pairs (255,0). Fortunately that will only arise e.g. when disassembling some "obfuscated" code where most newlines are replaced with semicolons. Oh, and yes, the to_code() method does properly handle generation of the (255,0) and (0,255) entries correctly. """ # grab the even bytes as integer byte_increments: byte_increments = [c for c in code_object.co_lnotab[0::2]] # grab the odd bytes as integer line_increments: line_increments = [c for c in code_object.co_lnotab[1::2]] lineno = code_object.co_firstlineno addr = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: yield (addr, lineno) addr += byte_incr lineno += line_incr yield (addr, lineno)
python
{ "resource": "" }
q40106
EmbedMixin.isomap
train
def isomap(self, num_dims=None, directed=None): '''Isomap embedding. num_dims : dimension of embedded coordinates, defaults to input dimension directed : used for .shortest_path() calculation ''' W = -0.5 * self.shortest_path(directed=directed) ** 2 kpca = KernelPCA(n_components=num_dims, kernel='precomputed') return kpca.fit_transform(W)
python
{ "resource": "" }
q40107
EmbedMixin.laplacian_eigenmaps
train
def laplacian_eigenmaps(self, num_dims=None, normed=True, val_thresh=1e-8): '''Laplacian Eigenmaps embedding. num_dims : dimension of embedded coordinates, defaults to input dimension normed : used for .laplacian() calculation val_thresh : threshold for omitting vectors with near-zero eigenvalues ''' L = self.laplacian(normed=normed) return _null_space(L, num_dims, val_thresh, overwrite=True)
python
{ "resource": "" }
q40108
EmbedMixin.layout_circle
train
def layout_circle(self): '''Position vertices evenly around a circle.''' n = self.num_vertices() t = np.linspace(0, 2*np.pi, n+1)[:n] return np.column_stack((np.cos(t), np.sin(t)))
python
{ "resource": "" }
q40109
walk
train
def walk(dispatcher, node, definition=None): """ The default, standalone walk function following the standard argument ordering for the unparsing walkers. Arguments: dispatcher a Dispatcher instance, defined earlier in this module. This instance will dispatch out the correct callable for the various object types encountered throughout this recursive function. node the starting Node from asttypes. definition a standalone definition tuple to start working on the node with; if none is provided, an initial definition will be looked up using the dispatcher with the node for the generation of output. While the dispatcher object is able to provide the lookup directly, this extra definition argument allow more flexibility in having Token subtypes being able to provide specific definitions also that may be required, such as the generation of optional rendering output. """ # The inner walk function - this is actually exposed to the token # rule objects so they can also make use of it to process the node # with the dispatcher. nodes = [] sourcepath_stack = [NotImplemented] def _walk(dispatcher, node, definition=None, token=None): if not isinstance(node, Node): for fragment in dispatcher.token( token, nodes[-1], node, sourcepath_stack): yield fragment return push = bool(node.sourcepath) if push: sourcepath_stack.append(node.sourcepath) nodes.append(node) if definition is None: definition = dispatcher.get_optimized_definition(node) for rule in definition: for chunk in rule(_walk, dispatcher, node): yield chunk nodes.pop(-1) if push: sourcepath_stack.pop(-1) # Format layout markers are not handled immediately in the walk - # they will simply be buffered so that a collection of them can be # handled at once. def process_layouts(layout_rule_chunks, last_chunk, chunk): before_text = last_chunk.text if last_chunk else None after_text = chunk.text if chunk else None # the text that was yielded by the previous layout handler prev_text = None # While Layout rules in a typical definition are typically # interspersed with Tokens, certain assumptions with how the # Layouts are specified within there will fail when Tokens fail # to generate anything for any reason. However, the dispatcher # instance will be able to accept and resolve a tuple of Layouts # to some handler function, so that a form of normalization can # be done. For instance, an (Indent, Newline, Dedent) can # simply be resolved to no operations. To achieve this, iterate # through the layout_rule_chunks and generate a normalized form # for the final handling to happen. # the preliminary stack that will be cleared whenever a # normalized layout rule chunk is generated. lrcs_stack = [] # first pass: generate both the normalized/finalized lrcs. for lrc in layout_rule_chunks: lrcs_stack.append(lrc) # check every single chunk from left to right... for idx in range(len(lrcs_stack)): rule = tuple(lrc.rule for lrc in lrcs_stack[idx:]) handler = dispatcher.layout(rule) if handler is not NotImplemented: # not manipulating lrsc_stack from within the same # for loop that it is being iterated upon break else: # which continues back to the top of the outer for loop continue # So a handler is found from inside the rules; extend the # chunks from the stack that didn't get normalized, and # generate a new layout rule chunk. lrcs_stack[:] = lrcs_stack[:idx] lrcs_stack.append(LayoutChunk( rule, handler, layout_rule_chunks[idx].node, )) # second pass: now the processing can be done. for lr_chunk in lrcs_stack: gen = lr_chunk.handler( dispatcher, lr_chunk.node, before_text, after_text, prev_text) if not gen: continue for chunk_from_layout in gen: yield chunk_from_layout prev_text = chunk_from_layout.text # The top level walker implementation def walk(): last_chunk = None layout_rule_chunks = [] for chunk in _walk(dispatcher, node, definition): if isinstance(chunk, LayoutChunk): layout_rule_chunks.append(chunk) else: # process layout rule chunks that had been cached. for chunk_from_layout in process_layouts( layout_rule_chunks, last_chunk, chunk): yield chunk_from_layout layout_rule_chunks[:] = [] yield chunk last_chunk = chunk # process the remaining layout rule chunks. for chunk_from_layout in process_layouts( layout_rule_chunks, last_chunk, None): yield chunk_from_layout for chunk in walk(): yield chunk
python
{ "resource": "" }
q40110
constructFiniteStateMachine
train
def constructFiniteStateMachine(inputs, outputs, states, table, initial, richInputs, inputContext, world, logger=LOGGER): """ Construct a new finite state machine from a definition of its states. @param inputs: Definitions of all input symbols the resulting machine will need to handle, as a L{twisted.python.constants.Names} subclass. @param outputs: Definitions of all output symbols the resulting machine is allowed to emit, as a L{twisted.python.constants.Names} subclass. @param states: Definitions of all possible states the resulting machine will be capable of inhabiting, as a L{twisted.python.constants.Names} subclass. @param table: The state transition table, defining which output and next state results from the receipt of any and all inputs in any and all states. @type table: L{TransitionTable} @param initial: The state the machine will start in (one of the symbols from C{states}). @param richInputs: A L{list} of types which correspond to each of the input symbols from C{inputs}. @type richInputs: L{list} of L{IRichInput} I{providers} @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses describing the requirements of the inputs which lead to them. @param world: An object responsible for turning FSM outputs into observable side-effects. @type world: L{IOutputExecutor} provider @param logger: The logger to which to write messages. @type logger: L{eliot.ILogger} or L{NoneType} if there is no logger. @return: An L{IFiniteStateMachine} provider """ table = table.table _missingExtraCheck( set(table.keys()), set(states.iterconstants()), ExtraTransitionState, MissingTransitionState) _missingExtraCheck( set(i for s in table.values() for i in s), set(inputs.iterconstants()), ExtraTransitionInput, MissingTransitionInput) _missingExtraCheck( set(output for s in table.values() for transition in s.values() for output in transition.output), set(outputs.iterconstants()), ExtraTransitionOutput, MissingTransitionOutput) try: _missingExtraCheck( set(transition.nextState for s in table.values() for transition in s.values()), set(states.iterconstants()), ExtraTransitionNextState, MissingTransitionNextState) except MissingTransitionNextState as e: if e.args != ({initial},): raise if initial not in states.iterconstants(): raise InvalidInitialState(initial) extraInputContext = set(inputContext) - set(outputs.iterconstants()) if extraInputContext: raise ExtraInputContext(extraInputContext) _checkConsistency(richInputs, table, inputContext) fsm = _FiniteStateMachine(inputs, outputs, states, table, initial) executor = IOutputExecutor(world) interpreter = _FiniteStateInterpreter( tuple(richInputs), inputContext, fsm, executor) if logger is not None: interpreter = FiniteStateLogger( interpreter, logger, executor.identifier()) return interpreter
python
{ "resource": "" }
q40111
_checkConsistency
train
def _checkConsistency(richInputs, fsm, inputContext): """ Verify that the outputs that can be generated by fsm have their requirements satisfied by the given rich inputs. @param richInputs: A L{list} of all of the types which will serve as rich inputs to an L{IFiniteStateMachine}. @type richInputs: L{list} of L{IRichInput} providers @param fsm: The L{IFiniteStateMachine} to which these rich inputs are to be delivered. @param inputContext: A L{dict} mapping output symbols to L{Interface} subclasses. Rich inputs which result in these outputs being produced by C{fsm} must provide the corresponding interface. @raise DoesNotImplement: If any of the rich input types fails to implement the interfaces required by the outputs C{fsm} can produce when they are received. """ for richInput in richInputs: for state in fsm: for input in fsm[state]: if richInput.symbol() == input: # This rich input will be supplied to represent this input # symbol in this state. Check to see if it satisfies the # output requirements. outputs = fsm[state][input].output for output in outputs: try: required = inputContext[output] except KeyError: continue # Consider supporting non-interface based checking in # the future: extend this to also allow # issubclass(richInput, required) if required.implementedBy(richInput): continue raise DoesNotImplement( "%r not implemented by %r, " "required by %r in state %r" % ( required, richInput, input, state))
python
{ "resource": "" }
q40112
minify
train
def minify(drop_semi=True): """ Rules for minifying output. Arguments: drop_semi Drop semicolons whenever possible. Note that if Dedent and OptionalNewline has a handler defined, it will stop final break statements from being resolved due to reliance on normalized resolution. """ layout_handlers = { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_minimum, OptionalSpace: layout_handler_space_minimum, RequiredSpace: layout_handler_space_imply, (Space, OpenBlock): layout_handler_openbrace, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, } if drop_semi: # if these are defined, they should be dropped; should really # provide these as a flag. # layout_handlers.update({ # OptionalNewline: None, # Dedent: None, # }) layout_handlers.update({ EndStatement: layout_handler_semicolon_optional, # these two rules rely on the normalized resolution (OptionalSpace, EndStatement): layout_handler_semicolon_optional, (EndStatement, CloseBlock): layout_handler_closebrace, # this is a fallback rule for when Dedent is defined by # some other rule, which won't neuter all optional # semicolons. (EndStatement, Dedent): rule_handler_noop, ((OptionalSpace, EndStatement), CloseBlock): layout_handler_closebrace, }) def minify_rule(): return { 'layout_handlers': layout_handlers, 'deferrable_handlers': { Literal: deferrable_handler_literal_continuation, }, } return minify_rule
python
{ "resource": "" }
q40113
indent
train
def indent(indent_str=None): """ A complete, standalone indent ruleset. Arguments: indent_str The string used for indentation. Defaults to None, which will defer the value used to the one provided by the Dispatcher. """ def indentation_rule(): inst = Indentator(indent_str) return {'layout_handlers': { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_imply, OptionalSpace: layout_handler_space_optional_pretty, RequiredSpace: layout_handler_space_imply, Indent: inst.layout_handler_indent, Dedent: inst.layout_handler_dedent, Newline: inst.layout_handler_newline, OptionalNewline: inst.layout_handler_newline_optional, (Space, OpenBlock): NotImplemented, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, (Indent, Newline, Dedent): rule_handler_noop, }} return indentation_rule
python
{ "resource": "" }
q40114
getPyroleLikeAtoms
train
def getPyroleLikeAtoms(cycle): """cycle->return a dictionary of pyrole nitrogen-like atoms in a cycle or a molecule The dictionary is keyed on the atom.handle""" result = {} # the outgoing bonds might need to be single or aromatic for atom in cycle.atoms: lookup = (atom.symbol, atom.charge, atom.hcount, len(atom.bonds)) if PyroleTable.get(lookup, 0): result[atom.handle] = atom return result
python
{ "resource": "" }
q40115
convert
train
def convert(cycle, pyroleLike, usedPyroles): """cycle, pyroleLike, aromatic=0-> aromatize the cycle pyroleLike is a lookup of the pyrole like atoms in the cycle. return 1 if the cycle was aromatized 2 if the cycle could not be aromatized""" bonds = cycle.bonds atoms = cycle.atoms initialBondStates = [] initialAtomStates = [] _usedPyroles = {} for bond in bonds: # store the initial states but assume the # bond is aromatic initialBondStates.append((bond, bond.symbol, bond.bondorder, bond.bondtype, bond.aromatic, bond.stereo)) # XXX FIX ME # until we get proper conjugation, aromatic bond orders # are 1.5 bond.reset(':', bond.bondorder, 4, bond.fixed, bond.stereo) aromatized = 1 for atom in atoms: initialAtomStates.append((atom, atom.aromatic)) atom.aromatic = 1 nonhydrogens = atom.sumBondOrders() + atom.charge # look for the lowest valence where we don't # have to change the charge of the atom to # fill the valences for valence in atom.valences: neededHydrogens = int(valence - nonhydrogens) if neededHydrogens >= 0: break else: # we can't change the aromaticity and have correct # valence. # # there is one special case of a five membered # ring and a pyrole nitrogen like atom we need # to look for. if len(cycle) == 5 and pyroleLike.has_key(atom.handle): _usedPyroles[atom.handle] = 1 else: # nope, the valences don't work out so # we can't aromatize aromatized = 0 break # sanity check, this should be true because of the # canBeAromatic routine above assert len(_usedPyroles) <=1, "Too many used pyroles!" cycle.aromatic = aromatized if not aromatized: for bond, symbol, order, bondtype, aromatic, stereo in initialBondStates: bond.reset(symbol, order, bondtype, bond.fixed, stereo) for atom, aromatic in initialAtomStates: atom.aromatic = aromatic else: # we used some pyroles, we'll have to send these to # the valence checker later usedPyroles.update(_usedPyroles) return aromatized
python
{ "resource": "" }
q40116
SensitivityContainer._prep_noise_interpolants
train
def _prep_noise_interpolants(self): """Construct interpolated sensitivity curves This will construct the interpolated sensitivity curves using scipy.interpolate.interp1d. It will add wd noise if that is requested. Raises: ValueError: ``len(noise_type_in) != len(sensitivity_curves)`` ValueError: Issue with sensitivity curve type provided. """ noise_lists = {} self.noise_interpolants = {} if isinstance(self.sensitivity_curves, str): self.sensitivity_curves = [self.sensitivity_curves] if isinstance(self.noise_type_in, list): if len(self.noise_type_in) != len(self.sensitivity_curves): raise ValueError('noise_type_in must have same shape as sensitivity_curves if it is' + 'provided as a list.' + 'If all curves are of the same type, provide a string.') else: assert isinstance(self.noise_type_in, str) self.noise_type_in = [self.noise_type_in for _ in self.sensitivity_curves] if isinstance(self.signal_type, str): self.signal_type = [self.signal_type] # read in all the noise curves for num, sc in enumerate(self.sensitivity_curves): if isinstance(sc, str): f, h_n = read_noise_curve(sc, noise_type_in=self.noise_type_in[num], noise_type_out='char_strain') if sc[-4:] == '.txt': key = sc.split('.')[0].split('/')[-1] else: key = sc elif isinstance(sc, list): # TODO: add to docs if inputing special noise curve, make sure its char_strain f, h_n = sc key = str(num) else: raise ValueError('Sensitivity curves must either be string' + 'or list containing f_n and asd_n.') noise_lists[key] = [f, h_n] # add wd noise if str(self.add_wd_noise).lower() in ['true', 'both', 'yes']: if isinstance(self.wd_noise, str): f_n_wd, h_n_wd = read_noise_curve(self.wd_noise, noise_type_in=self.wd_noise_type_in, noise_type_out='char_strain') elif isinstance(self, wd_noise, list): f_n_wd, h_n_wd = self.wd_noise trans_dict = {} for sc in noise_lists.keys(): f_n, h_n = noise_lists[sc] if self.add_wd_noise.lower() == 'both': trans_dict[sc] = [f_n, h_n] f_n, h_n = combine_with_wd_noise(f_n, h_n, f_n_wd, h_n_wd) trans_dict[sc + '_wd'] = [f_n, h_n] noise_lists = trans_dict # interpolate for sc in noise_lists: f_n, h_n = noise_lists[sc] self.noise_interpolants[sc] = (interpolate.interp1d(f_n, h_n, bounds_error=False, fill_value=1e30)) return
python
{ "resource": "" }
q40117
create_selfsim
train
def create_selfsim(oracle, method='rsfx'): """ Create self similarity matrix from attributes of a vmo object :param oracle: a encoded vmo object :param method: "comp":use the compression codes "sfx" - use suffix links "rsfx" - use reverse suffix links "lrs" - use LRS values "seg" - use patterns found :return: the created self-similarity matrix """ len_oracle = oracle.n_states - 1 mat = np.zeros((len_oracle, len_oracle)) if method == 'com': if not oracle.code: print("Codes not generated. Generating codes with encode().") oracle.encode() ind = 0 # index for l, p in oracle.code: # l for length, p for position if l == 0: inc = 1 else: inc = l mat[range(ind, ind + inc), range(p - 1, p - 1 + inc)] = 1 mat[range(p - 1, p - 1 + inc), range(ind, ind + inc)] = 1 ind = ind + l elif method == 'sfx': for i, s in enumerate(oracle.sfx[1:]): if s != 0: mat[i][s - 1] = 1 mat[s - 1][i] = 1 elif method == 'rsfx': for cluster in oracle.latent: p = itertools.product(cluster, repeat=2) for _p in p: mat[_p[0] - 1][_p[1] - 1] = 1 elif method == 'lrs': for i, l in enumerate(oracle.lrs[1:]): if l != 0: s = oracle.sfx[i + 1] mat[range((s - l) + 1, s + 1), range(i - l + 1, i + 1)] = 1 mat[range(i - l + 1, i + 1), range((s - l) + 1, s + 1)] = 1 elif method == 'seg': seg = oracle.segment ind = 0 for l, p in seg: # l for length, p for position if l == 0: inc = 1 else: inc = l mat[range(ind, ind + inc), range(p - 1, p - 1 + inc)] = 1 mat[range(p - 1, p - 1 + inc), range(ind, ind + inc)] = 1 ind = ind + l return mat
python
{ "resource": "" }
q40118
create_transition
train
def create_transition(oracle, method='trn'): """Create a transition matrix based on oracle links""" mat, hist, n = _create_trn_mat_symbolic(oracle, method) return mat, hist, n
python
{ "resource": "" }
q40119
predict
train
def predict(oracle, context, ab=None, verbose=False): """Single symbolic prediction given a context, an oracle and an alphabet. :param oracle: a learned vmo object from a symbolic sequence. :param context: the context precedes the predicted symbol :param ab: alphabet :param verbose: to show if the context if pruned or not :return: a probability distribution over the alphabet for the prediction. """ if verbose: print("original context: ", context) if ab is None: ab = oracle.get_alphabet() _b, _s, context = _test_context(oracle, context) _lrs = [oracle.lrs[k] for k in oracle.rsfx[_s]] context_state = [] while not context_state: for _i, _l in enumerate(_lrs): if _l >= len(context): context_state.append(oracle.rsfx[_s][_i]) if context_state: break else: context = context[1:] _b, _s = oracle.accept(context) _lrs = [oracle.lrs[k] for k in oracle.rsfx[_s]] if verbose: print("final context: ", context) print("context_state: ", context_state) d_count = len(ab) hist = [1.0] * len(ab) # initialize all histograms with 1s. trn_data = [oracle.data[n] for n in oracle.trn[_s]] for k in trn_data: hist[ab[k]] += 1.0 d_count += 1.0 for i in context_state: d_count, hist = _rsfx_count(oracle, i, d_count, hist, ab) return [hist[idx] / d_count for idx in range(len(hist))], context
python
{ "resource": "" }
q40120
log_loss
train
def log_loss(oracle, test_seq, ab=[], m_order=None, verbose=False): """ Evaluate the average log-loss of a sequence given an oracle """ if not ab: ab = oracle.get_alphabet() if verbose: print(' ') logP = 0.0 context = [] increment = np.floor((len(test_seq) - 1) / 100) bar_count = -1 maxContextLength = 0 avgContext = 0 for i, t in enumerate(test_seq): p, c = predict(oracle, context, ab, verbose=False) if len(c) < len(context): context = context[-len(c):] logP -= np.log2(p[ab[t]]) context.append(t) if m_order is not None: if len(context) > m_order: context = context[-m_order:] avgContext += float(len(context)) / len(test_seq) if verbose: percentage = np.mod(i, increment) if percentage == 0: bar_count += 1 if len(context) > maxContextLength: maxContextLength = len(context) sys.stdout.write('\r') sys.stdout.write("\r[" + "=" * bar_count + " " * (100 - bar_count) + "] " + str(bar_count) + "% " + str(i) + "/" + str(len(test_seq) - 1) + " Current max length: " + str( maxContextLength)) sys.stdout.flush() return logP / len(test_seq), avgContext
python
{ "resource": "" }
q40121
_rsfx_count
train
def _rsfx_count(oracle, s, count, hist, ab): """ Accumulate counts for context """ trn_data = [oracle.data[n] for n in oracle.trn[s]] for k in trn_data: hist[ab[k]] += 1.0 count += 1.0 rsfx_candidate = oracle.rsfx[s][:] while rsfx_candidate: s = rsfx_candidate.pop(0) trn_data = [oracle.data[n] for n in oracle.trn[s]] for k in trn_data: hist[ab[k]] += 1.0 count += 1.0 rsfx_candidate.extend(oracle.rsfx[s]) return count, hist
python
{ "resource": "" }
q40122
tracking
train
def tracking(oracle, obs, trn_type=1, reverse_init=False, method='else', decay=1.0): """ Off-line tracking function using sub-optimal query-matching algorithm""" N = len(obs) if reverse_init: r_oracle = create_reverse_oracle(oracle) _ind = [r_oracle.n_states - rsfx for rsfx in r_oracle.rsfx[0][:]] init_ind = [] for i in _ind: s = i while oracle.sfx[s] != 0: s = oracle.sfx[s] init_ind.append(s) K = r_oracle.num_clusters() else: init_ind = oracle.rsfx[0][:] K = oracle.num_clusters() P = np.zeros((N, K), dtype='int') T = np.zeros((N,), dtype='int') map_k_outer = partial(_query_k, oracle=oracle, query=obs) map_query = partial(_query_init, oracle=oracle, query=obs[0], method=method) # map_query = partial(_query_init, oracle=oracle, query=obs[0], method) argmin = np.argmin P[0], C = zip(*map(map_query, init_ind)) C = np.array(C) T[0] = P[0][argmin(C)] if trn_type == 1: trn = _create_trn_self elif trn_type == 2: trn = _create_trn_sfx_rsfx else: trn = _create_trn distance_cache = np.zeros(oracle.n_states) for i in range(1, N): # iterate over the rest of query state_cache = [] dist_cache = distance_cache map_k_inner = partial(map_k_outer, i=i, P=P, trn=trn, state_cache=state_cache, dist_cache=dist_cache) P[i], _c = zip(*map(map_k_inner, range(K))) C = decay * C + np.array(_c) T[i] = P[i][argmin(C)] return T
python
{ "resource": "" }
q40123
_query_init
train
def _query_init(k, oracle, query, method='all'): """A helper function for query-matching function initialization.""" if method == 'all': a = np.subtract(query, [oracle.f_array[t] for t in oracle.latent[oracle.data[k]]]) dvec = (a * a).sum(axis=1) # Could skip the sqrt _d = dvec.argmin() return oracle.latent[oracle.data[k]][_d], dvec[_d] else: a = np.subtract(query, oracle.f_array[k]) dvec = (a * a).sum() # Could skip the sqrt return k, dvec
python
{ "resource": "" }
q40124
_dist_obs_oracle
train
def _dist_obs_oracle(oracle, query, trn_list): """A helper function calculating distances between a feature and frames in oracle.""" a = np.subtract(query, [oracle.f_array[t] for t in trn_list]) return (a * a).sum(axis=1)
python
{ "resource": "" }
q40125
safe_shell_out
train
def safe_shell_out(cmd, verbose=False, **kwargs): """run cmd and return True if it went ok, False if something went wrong. Suppress all output. """ # TODO rename this suppressed_shell_out ? # TODO this should probably return 1 if there's an error (i.e. vice-versa). # print("cmd %s" % cmd) try: with open(os.devnull, "w") as fnull: with captured_output(): check_output(cmd, stderr=fnull, **kwargs) return True except (CalledProcessError, OSError) as e: if verbose: cprint(" Error running command %s" % ' '.join(cmd), 'err') print(e.output) return False except Exception as e: # TODO no idea # Can this be if you try and unistall pip? (don't do that) return False
python
{ "resource": "" }
q40126
prev_deps
train
def prev_deps(env): """Naively gets the dependancies from the last time ctox was run.""" # TODO something more clever. if not os.path.isfile(env.envctoxfile): return [] with open(env.envctoxfile) as f: return f.read().split()
python
{ "resource": "" }
q40127
make_dist
train
def make_dist(toxinidir, toxdir, package): """zip up the package into the toxdir.""" dist = os.path.join(toxdir, "dist") # Suppress warnings. success = safe_shell_out(["python", "setup.py", "sdist", "--quiet", "--formats=zip", "--dist-dir", dist], cwd=toxinidir) if success: return os.path.join(dist, package + ".zip")
python
{ "resource": "" }
q40128
print_pretty_command
train
def print_pretty_command(env, command): """This is a hack for prettier printing. Rather than "{envpython} foo.py" we print "python foo.py". """ cmd = abbr_cmd = command[0] if cmd.startswith(env.envbindir): abbr_cmd = os.path.relpath(cmd, env.envbindir) if abbr_cmd == ".": # TODO are there more edge cases? abbr_cmd = cmd command[0] = abbr_cmd print('(%s)$ %s' % (env.name, ' '.join(['"%s"' % c if " " in c else c for c in command]))) command[0] = cmd return abbr_cmd, cmd, command
python
{ "resource": "" }
q40129
TreeLikelihood.tree
train
def tree(self): """Tree with branch lengths in codon substitutions per site. The tree is a `Bio.Phylo.BaseTree.Tree` object. This is the current tree after whatever optimizations have been performed so far. """ bs = self.model.branchScale for node in self._tree.find_clades(): if node != self._tree.root: node.branch_length = self.t[self.name_to_nodeindex[node]] * bs return self._tree
python
{ "resource": "" }
q40130
TreeLikelihood.paramsarraybounds
train
def paramsarraybounds(self): """Bounds for parameters in `paramsarray`.""" bounds = [] for (i, param) in self._index_to_param.items(): if isinstance(param, str): bounds.append(self.model.PARAMLIMITS[param]) elif isinstance(param, tuple): bounds.append(self.model.PARAMLIMITS[param[0]]) else: raise ValueError("Invalid param type") bounds = [(tup[0] + ALMOST_ZERO, tup[1] - ALMOST_ZERO) for tup in bounds] assert len(bounds) == len(self._index_to_param) return tuple(bounds)
python
{ "resource": "" }
q40131
TreeLikelihood.paramsarray
train
def paramsarray(self): """All free model parameters as 1-dimensional `numpy.ndarray`. You are allowed to update model parameters by direct assignment of this property.""" # Return copy of `_paramsarray` because setter checks if changed if self._paramsarray is not None: return self._paramsarray.copy() nparams = len(self._index_to_param) self._paramsarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): self._paramsarray[i] = getattr(self.model, param) elif isinstance(param, tuple): self._paramsarray[i] = getattr(self.model, param[0])[param[1]] else: raise ValueError("Invalid param type") return self._paramsarray.copy()
python
{ "resource": "" }
q40132
TreeLikelihood.paramsarray
train
def paramsarray(self, value): """Set new `paramsarray` and update via `updateParams`.""" nparams = len(self._index_to_param) assert (isinstance(value, scipy.ndarray) and value.ndim == 1), ( "paramsarray must be 1-dim ndarray") assert len(value) == nparams, ("Assigning paramsarray to ndarray " "of the wrong length.") if (self._paramsarray is not None) and all(value == self._paramsarray): return # do not need to do anything if value has not changed # build `newvalues` to pass to `updateParams` newvalues = {} vectorized_params = {} for (i, param) in self._index_to_param.items(): if isinstance(param, str): newvalues[param] = float(value[i]) elif isinstance(param, tuple): (iparam, iparamindex) = param if iparam in vectorized_params: assert iparamindex not in vectorized_params[iparam] vectorized_params[iparam][iparamindex] = float(value[i]) else: vectorized_params[iparam] = {iparamindex:float(value[i])} else: raise ValueError("Invalid param type") for (param, paramd) in vectorized_params.items(): assert set(paramd.keys()) == set(range(len(paramd))) newvalues[param] = scipy.array([paramd[i] for i in range(len(paramd))], dtype='float') self.updateParams(newvalues) self._paramsarray = self.paramsarray
python
{ "resource": "" }
q40133
TreeLikelihood.dtcurrent
train
def dtcurrent(self, value): """Set value of `dtcurrent`, update derivatives if needed.""" assert isinstance(value, bool) if value and self.dparamscurrent: raise RuntimeError("Can't set both dparamscurrent and dtcurrent True") if value != self.dtcurrent: self._dtcurrent = value self._updateInternals()
python
{ "resource": "" }
q40134
TreeLikelihood.t
train
def t(self, value): """Set new branch lengths, update likelihood and derivatives.""" assert (isinstance(value, scipy.ndarray) and (value.dtype == 'float') and (value.shape == self.t.shape)) if (self._t != value).any(): self._t = value.copy() self._updateInternals()
python
{ "resource": "" }
q40135
TreeLikelihood.dloglikarray
train
def dloglikarray(self): """Derivative of `loglik` with respect to `paramsarray`.""" assert self.dparamscurrent, "dloglikarray requires paramscurrent == True" nparams = len(self._index_to_param) dloglikarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): dloglikarray[i] = self.dloglik[param] elif isinstance(param, tuple): dloglikarray[i] = self.dloglik[param[0]][param[1]] return dloglikarray
python
{ "resource": "" }
q40136
TreeLikelihood.updateParams
train
def updateParams(self, newvalues): """Update model parameters and re-compute likelihoods. This method is the **only** acceptable way to update model parameters. The likelihood is re-computed as needed by this method. Args: `newvalues` (dict) A dictionary keyed by param name and with value as new value to set. Each parameter name must either be a valid model parameter (in `model.freeparams`). """ for (param, value) in newvalues.items(): if param not in self.model.freeparams: raise RuntimeError("Can't handle param: {0}".format( param)) if newvalues: self.model.updateParams(newvalues) self._updateInternals() self._paramsarray = None
python
{ "resource": "" }
q40137
TreeLikelihood._M
train
def _M(self, k, t, tips=None, gaps=None): """Returns matrix exponential `M`.""" if self._distributionmodel: return self.model.M(k, t, tips, gaps) else: return self.model.M(t, tips, gaps)
python
{ "resource": "" }
q40138
TreeLikelihood._dM
train
def _dM(self, k, t, param, M, tips=None, gaps=None): """Returns derivative of matrix exponential.""" if self._distributionmodel: return self.model.dM(k, t, param, M, tips, gaps) else: return self.model.dM(t, param, M, tips, gaps)
python
{ "resource": "" }
q40139
TreeLikelihood._stationarystate
train
def _stationarystate(self, k): """Returns the stationarystate .""" if self._distributionmodel: return self.model.stationarystate(k) else: return self.model.stationarystate
python
{ "resource": "" }
q40140
TreeLikelihood._dstationarystate
train
def _dstationarystate(self, k, param): """Returns the dstationarystate .""" if self._distributionmodel: return self.model.dstationarystate(k, param) else: return self.model.dstationarystate(param)
python
{ "resource": "" }
q40141
TreeLikelihood._paramlist_PartialLikelihoods
train
def _paramlist_PartialLikelihoods(self): """List of parameters looped over in `_computePartialLikelihoods`.""" if self._distributionmodel: return [param for param in self.model.freeparams + [self.model.distributedparam] if param not in self.model.distributionparams] else: return self.model.freeparams
python
{ "resource": "" }
q40142
TreeLikelihood._sub_index_param
train
def _sub_index_param(self, param): """Returns list of sub-indexes for `param`. Used in computing partial likelihoods; loop over these indices.""" if self._distributionmodel and (param == self.model.distributedparam): indices = [()] else: paramvalue = getattr(self.model, param) if isinstance(paramvalue, float): indices = [()] elif (isinstance(paramvalue, scipy.ndarray) and paramvalue.ndim == 1 and paramvalue.shape[0] > 1): indices = [(j,) for j in range(len(paramvalue))] else: raise RuntimeError("Invalid param: {0}, {1}".format( param, paramvalue)) return indices
python
{ "resource": "" }
q40143
JSHost.send_request
train
def send_request(self, *args, **kwargs): """ Intercept connection errors which suggest that a managed host has crashed and raise an exception indicating the location of the log """ try: return super(JSHost, self).send_request(*args, **kwargs) except RequestsConnectionError as e: if ( self.manager and self.has_connected and self.logfile and 'unsafe' not in kwargs ): raise ProcessError( '{} appears to have crashed, you can inspect the log file at {}'.format( self.get_name(), self.logfile, ) ) raise six.reraise(RequestsConnectionError, RequestsConnectionError(*e.args), sys.exc_info()[2])
python
{ "resource": "" }
q40144
_updateB
train
def _updateB(oldB, B, W, degrees, damping, inds, backinds): # pragma: no cover '''belief update function.''' for j,d in enumerate(degrees): kk = inds[j] bk = backinds[j] if d == 0: B[kk,bk] = -np.inf continue belief = W[kk,bk] + W[j] oldBj = oldB[j] if d == oldBj.shape[0]: bth = quickselect(-oldBj, d-1) bplus = -1 else: bth,bplus = quickselect(-oldBj, d-1, d) belief -= np.where(oldBj >= oldBj[bth], oldBj[bplus], oldBj[bth]) B[kk,bk] = damping*belief + (1-damping)*oldB[kk,bk]
python
{ "resource": "" }
q40145
make_constants
train
def make_constants(builtin_only=False, stoplist=[], verbose=False): """ Return a decorator for optimizing global references. Verify that the first argument is a function. """ if type(builtin_only) == type(make_constants): raise ValueError("The make_constants decorator must have arguments.") return lambda f: _make_constants(f, builtin_only, stoplist, verbose)
python
{ "resource": "" }
q40146
pause
train
def pause(): '''Pause playback. Calls PlaybackController.pause()''' server = getServer() server.core.playback.pause() pos = server.core.playback.get_time_position() print('Paused at {}'.format(formatTimeposition(pos)))
python
{ "resource": "" }
q40147
play_backend_uri
train
def play_backend_uri(argv=None): '''Get album or track from backend uri and play all tracks found. uri is a string which represents some directory belonging to a backend. Calls LibraryController.browse(uri) to get an album and LibraryController.lookup(uri) to get track''' if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser(description='Browse directories and tracks at the given uri and play them/it.') parser.add_argument('uri', help='The key that represents some directory belonging to a backend. E.g. plex:album:2323 or spotify:album:xxxx') parser.add_argument('-l', '--loglevel', help='Logging level. Default: %(default)s.', choices=('debug', 'info', 'warning', 'error'), default='warning') args = parse_args_and_apply_logging_level(parser, argv) server = getServer() hits = server.core.library.browse(args.uri) # browse(): Returns a list of mopidy.models.Ref objects for the directories and tracks at the given uri. logging.info('Got hits from browse(): %r', hits) if len(hits) == 0: # try track lookup hits = server.core.library.lookup(args.uri) logging.info('Got hits from lookup() : %r', hits) if len(hits) == 0: print('No hits for "{}"'.format(args.uri)) else: server.core.tracklist.clear() logging.debug('got special uris: %r', [t['uri'] for t in hits]) server.core.tracklist.add(uris=[t['uri'] for t in hits]) server.core.playback.play()
python
{ "resource": "" }
q40148
peekable.peek
train
def peek(self, default=None): '''Returns `default` is there is no subsequent item''' try: result = self.pointer.next() # immediately push it back onto the front of the iterable self.pointer = itertools.chain([result], self.pointer) return result except StopIteration: # nothing to put back; iterating doesn't change anything past the end return default
python
{ "resource": "" }
q40149
Payment.token
train
def token(self): """ Token given by Transbank for payment initialization url. Will raise PaymentError when an error ocurred. """ if not self._token: self._token = self.fetch_token() logger.payment(self) return self._token
python
{ "resource": "" }
q40150
Payment.transaction_id
train
def transaction_id(self): """ Transaction ID for Transbank, a secure random int between 0 and 999999999. """ if not self._transaction_id: self._transaction_id = random.randint(0, 10000000000 - 1) return self._transaction_id
python
{ "resource": "" }
q40151
PhenomDWaveforms._broadcast_and_set_attrs
train
def _broadcast_and_set_attrs(self, local_dict): """Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1 """ del local_dict['self'] self.remove_axis = False max_length = 0 for key in local_dict: try: length = len(local_dict[key]) if length > max_length: max_length = length except TypeError: pass if max_length == 0: self.remove_axis = True for key in local_dict: setattr(self, key, np.array([local_dict[key]])) # check for bad length arrays else: for key in local_dict: try: if len(local_dict[key]) < max_length and len(local_dict[key]) > 1: raise ValueError("Casting parameters not correct." + " Need all at a maximum shape and the rest being" + "len-1 arrays or scalars") except TypeError: pass # broadcast arrays for key in local_dict: try: if len(local_dict[key]) == max_length: setattr(self, key, local_dict[key]) elif len(local_dict[key]) == 1: setattr(self, key, np.full((max_length,), local_dict[key][0])) except TypeError: setattr(self, key, np.full((max_length,), local_dict[key])) return
python
{ "resource": "" }
q40152
PhenomDWaveforms._create_waveforms
train
def _create_waveforms(self): """Create frequency domain waveforms. Method to create waveforms for PhenomDWaveforms class. It adds waveform information in the form of attributes. """ c_obj = ctypes.CDLL(self.exec_call) # prepare ctypes arrays freq_amp_cast = ctypes.c_double*self.num_points*self.length freqs = freq_amp_cast() hc = freq_amp_cast() fmrg_fpeak_cast = ctypes.c_double*self.length fmrg = fmrg_fpeak_cast() fpeak = fmrg_fpeak_cast() # Find hc c_obj.Amplitude(ctypes.byref(freqs), ctypes.byref(hc), ctypes.byref(fmrg), ctypes.byref(fpeak), self.m1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.m2.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.chi_1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.chi_2.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.dist.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.z.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.st.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), self.et.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ctypes.c_int(self.length), ctypes.c_int(self.num_points)) # turn output into numpy arrays self.freqs, self.hc = np.ctypeslib.as_array(freqs), np.ctypeslib.as_array(hc) self.fmrg, self.fpeak = np.ctypeslib.as_array(fmrg), np.ctypeslib.as_array(fpeak) # remove an axis if inputs were scalar. if self.remove_axis: self.freqs, self.hc, = np.squeeze(self.freqs), np.squeeze(self.hc) self.fmrg, self.fpeak = self.fmrg[0], self.fpeak[0] return
python
{ "resource": "" }
q40153
EccentricBinaries._convert_units
train
def _convert_units(self): """Convert units to geometrized units. Change to G=c=1 (geometrized) units for ease in calculations. """ self.m1 = self.m1*M_sun*ct.G/ct.c**2 self.m2 = self.m2*M_sun*ct.G/ct.c**2 initial_cond_type_conversion = { 'time': ct.c*ct.Julian_year, 'frequency': 1./ct.c, 'separation': ct.parsec, } self.initial_point = self.initial_point*initial_cond_type_conversion[self.initial_cond_type] self.t_obs = self.t_obs*ct.c*ct.Julian_year return
python
{ "resource": "" }
q40154
EccentricBinaries._t_of_e
train
def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0): """Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time. """ if ef is None: ef = np.ones_like(self.e0)*0.0000001 beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2) e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points) for i in range(len(self.e0))]) integrand = self._find_integrand(e_vals) integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:]) for i in range(e_vals.shape[1])]).T if a0 is None and f0 is None: a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1]) elif a0 is None: a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.) c0 = self._c0_func(a0, self.e0) a_vals = c0[:, np.newaxis]*self._f_e(e_vals) delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral return e_vals, a_vals, delta_t
python
{ "resource": "" }
q40155
EccentricBinaries._chirp_mass
train
def _chirp_mass(self): """Chirp mass calculation """ return (self.m1*self.m2)**(3./5.)/(self.m1+self.m2)**(1./5.)
python
{ "resource": "" }
q40156
EccentricBinaries._g_func
train
def _g_func(self): """Eq. 20 in Peters and Mathews 1963. """ return (self.n**4./32. * ((jv(self.n-2., self.n*self.e_vals) - 2. * self.e_vals*jv(self.n-1., self.n*self.e_vals) + 2./self.n * jv(self.n, self.n*self.e_vals) + 2.*self.e_vals*jv(self.n+1., self.n*self.e_vals) - jv(self.n+2., self.n*self.e_vals))**2. + (1.-self.e_vals**2.) * (jv(self.n-2., self.n*self.e_vals) - 2.*jv(self.n, self.n*self.e_vals) + jv(self.n+2., self.n*self.e_vals))**2. + 4./(3.*self.n**2.)*(jv(self.n, self.n*self.e_vals))**2.))
python
{ "resource": "" }
q40157
EccentricBinaries._hcn_func
train
def _hcn_func(self): """Eq. 56 from Barack and Cutler 2004 """ self.hc = 1./(np.pi*self.dist)*np.sqrt(2.*self._dEndfr()) return
python
{ "resource": "" }
q40158
_create_oracle
train
def _create_oracle(oracle_type, **kwargs): """A routine for creating a factor oracle.""" if oracle_type == 'f': return FO(**kwargs) elif oracle_type == 'a': return MO(**kwargs) else: return MO(**kwargs)
python
{ "resource": "" }
q40159
FactorOracle.segment
train
def segment(self): """An non-overlap version Compror""" if not self.seg: j = 0 else: j = self.seg[-1][1] last_len = self.seg[-1][0] if last_len + j > self.n_states: return i = j while j < self.n_states - 1: while not (not (i < self.n_states - 1) or not (self.lrs[i + 1] >= i - j + 1)): i += 1 if i == j: i += 1 self.seg.append((0, i)) else: if (self.sfx[i] + self.lrs[i]) <= i: self.seg.append((i - j, self.sfx[i] - i + j + 1)) else: _i = j + i - self.sfx[i] self.seg.append((_i - j, self.sfx[i] - i + j + 1)) _j = _i while not (not (_i < i) or not (self.lrs[_i + 1] - self.lrs[_j] >= _i - _j + 1)): _i += 1 if _i == _j: _i += 1 self.seg.append((0, _i)) else: self.seg.append((_i - _j, self.sfx[_i] - _i + _j + 1)) j = i return self.seg
python
{ "resource": "" }
q40160
Cycle.set_aromatic
train
def set_aromatic(self): """set the cycle to be an aromatic ring""" #XXX FIX ME # this probably shouldn't be here for atom in self.atoms: atom.aromatic = 1 for bond in self.bonds: bond.aromatic = 1 bond.bondorder = 1.5 bond.bondtype = 4 bond.symbol = ":" bond.fixed = 1 self.aromatic = 1
python
{ "resource": "" }
q40161
set_debug
train
def set_debug(enabled: bool): """Enable or disable debug logs for the entire package. Parameters ---------- enabled: bool Whether debug should be enabled or not. """ global _DEBUG_ENABLED if not enabled: log('Disabling debug output...', logger_name=_LOGGER_NAME) _DEBUG_ENABLED = False else: _DEBUG_ENABLED = True log('Enabling debug output...', logger_name=_LOGGER_NAME)
python
{ "resource": "" }
q40162
log
train
def log(message: str, *args: str, category: str='info', logger_name: str='pgevents'): """Log a message to the given logger. If debug has not been enabled, this method will not log a message. Parameters ---------- message: str Message, with or without formatters, to print. args: Any Arguments to use with the message. args must either be a series of arguments that match up with anonymous formatters (i.e. "%<FORMAT-CHARACTER>") in the format string, or a dictionary with key-value pairs that match up with named formatters (i.e. "%(key)s") in the format string. logger_name: str Name of logger to which the message should be logged. """ global _DEBUG_ENABLED if _DEBUG_ENABLED: level = logging.INFO else: level = logging.CRITICAL + 1 with _create_logger(logger_name, level) as logger: log_fn = getattr(logger, category, None) if log_fn is None: raise ValueError('Invalid log category "{}"'.format(category)) log_fn(message, *args)
python
{ "resource": "" }
q40163
toposort
train
def toposort(initialAtoms, initialBonds): """initialAtoms, initialBonds -> atoms, bonds Given the list of atoms and bonds in a ring return the topologically sorted atoms and bonds. That is each atom is connected to the following atom and each bond is connected to the following bond in the following manner a1 - b1 - a2 - b2 - ... """ atoms = [] a_append = atoms.append bonds = [] b_append = bonds.append # for the atom and bond hashes # we ignore the first atom since we # would have deleted it from the hash anyway ahash = {} bhash = {} for atom in initialAtoms[1:]: ahash[atom.handle] = 1 for bond in initialBonds: bhash[bond.handle] = bond next = initialAtoms[0] a_append(next) # do until all the atoms are gone while ahash: # traverse to all the connected atoms for atom in next.oatoms: # both the bond and the atom have to be # in our list of atoms and bonds to use # ugg, nested if's... There has to be a # better control structure if ahash.has_key(atom.handle): bond = next.findbond(atom) assert bond # but wait! the bond has to be in our # list of bonds we can use! if bhash.has_key(bond.handle): a_append(atom) b_append(bond) del ahash[atom.handle] next = atom break else: raise RingException("Atoms are not in ring") assert len(initialAtoms) == len(atoms) assert len(bonds) == len(atoms) - 1 lastBond = atoms[0].findbond(atoms[-1]) assert lastBond b_append(lastBond) return atoms, bonds
python
{ "resource": "" }
q40164
checkEdges
train
def checkEdges(ringSet, lookup, oatoms): """atoms, lookup -> ring atoms must be in the order of traversal around a ring! break an optimal non N2 node and return the largest ring found """ bondedAtoms = map( None, ringSet[:-1], ringSet[1:] ) bondedAtoms += [ (ringSet[-1], ringSet[0]) ] # form a lookup for the ringSet list atomSet = {} for atomID in ringSet: atomSet[atomID] = 1 results = [] # for each bond in the ring, break it and find the smallest # rings starting on either side of the bond # keep the largest but rememeber to add the bond back at the # end for atom1, atom2 in bondedAtoms: # break a single edge in the ring handle1 = atom1.handle handle2 = atom2.handle oatoms1 = oatoms[handle1] oatoms2 = oatoms[handle2] index1 = oatoms1.index(atom2) index2 = oatoms2.index(atom1) # break the bond del oatoms1[index1] del oatoms2[index2] ring1 = getRing(atom1, atomSet, lookup, oatoms) ring2 = getRing(atom2, atomSet, lookup, oatoms) # keep the larger of the two rings if len(ring1) > len(ring2): results.append((len(ring1), handle1, handle2, ring1)) else: results.append((len(ring2), handle2, handle1, ring2)) # retie the bond oatoms1.insert(index1, atom2) oatoms2.insert(index2, atom1) if not results: return None # find the smallest ring size, incidentHandle, adjacentHandle, smallestRing = min(results) # dereference the handles incident, adjacent = lookup[incidentHandle], lookup[adjacentHandle] # break the bond between the incident and adjacent atoms oatomsI = oatoms[incidentHandle] oatomsA = oatoms[adjacentHandle] assert incident in oatomsA assert adjacent in oatomsI oatomsI.remove(adjacent) oatomsA.remove(incident)
python
{ "resource": "" }
q40165
diffPrefsPrior
train
def diffPrefsPrior(priorstring): """Parses `priorstring` and returns `prior` tuple.""" assert isinstance(priorstring, str) prior = priorstring.split(',') if len(prior) == 3 and prior[0] == 'invquadratic': [c1, c2] = [float(x) for x in prior[1 : ]] assert c1 > 0 and c2 > 0, "C1 and C2 must be > 1 for invquadratic prior" return ('invquadratic', c1, c2) else: raise ValueError("Invalid diffprefsprior: {0}".format(priorstring))
python
{ "resource": "" }
q40166
ExistingFileOrNone
train
def ExistingFileOrNone(fname): """Like `Existingfile`, but if `fname` is string "None" then return `None`.""" if os.path.isfile(fname): return fname elif fname.lower() == 'none': return None else: raise ValueError("%s must specify a valid file name or 'None'" % fname)
python
{ "resource": "" }
q40167
PhyDMSLogoPlotParser
train
def PhyDMSLogoPlotParser(): """Returns `argparse.ArgumentParser` for ``phydms_logoplot``.""" parser = ArgumentParserNoArgHelp(description= "Make logo plot of preferences or differential preferences. " "Uses weblogo (http://weblogo.threeplusone.com/). " "{0} Version {1}. Full documentation at {2}".format( phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--prefs', type=ExistingFile, help="File with " "amino-acid preferences; same format as input to 'phydms'.") group.add_argument('--diffprefs', type=ExistingFile, help="File with " "differential preferences; in format output by 'phydms'.") parser.add_argument('outfile', help='Name of created PDF logo plot.') parser.add_argument('--stringency', type=FloatGreaterThanEqualToZero, default=1, help="Stringency parameter to re-scale prefs.") parser.add_argument('--nperline', type=IntGreaterThanZero, default=70, help="Number of sites per line.") parser.add_argument('--numberevery', type=IntGreaterThanZero, default=10, help="Number sites at this interval.") parser.add_argument('--mapmetric', default='functionalgroup', choices=['kd', 'mw', 'charge', 'functionalgroup'], help='Metric used to color ' 'amino-acid letters. kd = Kyte-Doolittle hydrophobicity; ' 'mw = molecular weight; functionalgroup = divide in 7 ' 'groups; charge = charge at neutral pH.') parser.add_argument('--colormap', type=str, default='jet', help="Name of `matplotlib` color map for amino acids " "when `--mapmetric` is 'kd' or 'mw'.") parser.add_argument('--diffprefheight', type=FloatGreaterThanZero, default=1.0, help="Height of diffpref logo in each direction.") parser.add_argument('--omegabysite', help="Overlay omega on " "logo plot. Specify '*_omegabysite.txt' file from 'phydms'.", type=ExistingFileOrNone) parser.add_argument('--minP', type=FloatGreaterThanZero, default=1e-4, help="Min plotted P-value for '--omegabysite' overlay.") parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
python
{ "resource": "" }
q40168
ArgumentParserNoArgHelp.error
train
def error(self, message): """Prints error message, then help.""" sys.stderr.write('error: %s\n\n' % message) self.print_help() sys.exit(2)
python
{ "resource": "" }
q40169
Graph.add_edges
train
def add_edges(self, from_idx, to_idx, weight=1, symmetric=False, copy=False): '''Adds all from->to edges. weight may be a scalar or 1d array. If symmetric=True, also adds to->from edges with the same weights.''' raise NotImplementedError()
python
{ "resource": "" }
q40170
Graph.add_self_edges
train
def add_self_edges(self, weight=None, copy=False): '''Adds all i->i edges. weight may be a scalar or 1d array.''' ii = np.arange(self.num_vertices()) return self.add_edges(ii, ii, weight=weight, symmetric=False, copy=copy)
python
{ "resource": "" }
q40171
Graph.reweight
train
def reweight(self, weight, edges=None, copy=False): '''Replaces existing edge weights. weight may be a scalar or 1d array. edges is a mask or index array that specifies a subset of edges to modify''' if not self.is_weighted(): warnings.warn('Cannot supply weights for unweighted graph; ' 'ignoring call to reweight') return self if edges is None: return self._update_edges(weight, copy=copy) ii, jj = self.pairs()[edges].T return self.add_edges(ii, jj, weight=weight, symmetric=False, copy=copy)
python
{ "resource": "" }
q40172
Graph.to_igraph
train
def to_igraph(self, weighted=None): '''Converts this Graph object to an igraph-compatible object. Requires the python-igraph library.''' # Import here to avoid ImportErrors when igraph isn't available. import igraph ig = igraph.Graph(n=self.num_vertices(), edges=self.pairs().tolist(), directed=self.is_directed()) if weighted is not False and self.is_weighted(): ig.es['weight'] = self.edge_weights() return ig
python
{ "resource": "" }
q40173
Graph.to_graph_tool
train
def to_graph_tool(self): '''Converts this Graph object to a graph_tool-compatible object. Requires the graph_tool library. Note that the internal ordering of graph_tool seems to be column-major.''' # Import here to avoid ImportErrors when graph_tool isn't available. import graph_tool gt = graph_tool.Graph(directed=self.is_directed()) gt.add_edge_list(self.pairs()) if self.is_weighted(): weights = gt.new_edge_property('double') for e,w in zip(gt.edges(), self.edge_weights()): weights[e] = w gt.edge_properties['weight'] = weights return gt
python
{ "resource": "" }
q40174
Graph.to_networkx
train
def to_networkx(self, directed=None): '''Converts this Graph object to a networkx-compatible object. Requires the networkx library.''' import networkx as nx directed = directed if directed is not None else self.is_directed() cls = nx.DiGraph if directed else nx.Graph adj = self.matrix() if ss.issparse(adj): return nx.from_scipy_sparse_matrix(adj, create_using=cls()) return nx.from_numpy_matrix(adj, create_using=cls())
python
{ "resource": "" }
q40175
_check_inputs
train
def _check_inputs(z, m): """Check inputs are arrays of same length or array and a scalar.""" try: nz = len(z) z = np.array(z) except TypeError: z = np.array([z]) nz = len(z) try: nm = len(m) m = np.array(m) except TypeError: m = np.array([m]) nm = len(m) if (z < 0).any() or (m < 0).any(): raise ValueError('z and m must be positive') if nz != nm and nz > 1 and nm > 1: raise ValueError('z and m arrays must be either equal in length, \ OR of different length with one of length 1.') else: if type(z) != np.ndarray: z = np.array(z) if type(m) != np.ndarray: m = np.array(m) return z, m
python
{ "resource": "" }
q40176
Label._set_label
train
def _set_label(self, which, label, **kwargs): """Private method for setting labels. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `xlabel`/`ylabel`, and `title`. label (str): The label to be added. fontsize (int, optional): Fontsize for associated label. Default is None. """ prop_default = { 'fontsize': 18, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) setattr(self.label, which, label) setattr(self.label, which + '_kwargs', kwargs) return
python
{ "resource": "" }
q40177
Limits._set_axis_limits
train
def _set_axis_limits(self, which, lims, d, scale, reverse=False): """Private method for setting axis limits. Sets the axis limits on each axis for an individual plot. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lims (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ setattr(self.limits, which + 'lims', lims) setattr(self.limits, 'd' + which, d) setattr(self.limits, which + 'scale', scale) if reverse: setattr(self.limits, 'reverse_' + which + '_axis', True) return
python
{ "resource": "" }
q40178
Limits.set_xlim
train
def set_xlim(self, xlims, dx, xscale, reverse=False): """Set x limits for plot. This will set the limits for the x axis for the specific plot. Args: xlims (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ self._set_axis_limits('x', xlims, dx, xscale, reverse) return
python
{ "resource": "" }
q40179
Limits.set_ylim
train
def set_ylim(self, xlims, dx, xscale, reverse=False): """Set y limits for plot. This will set the limits for the y axis for the specific plot. Args: ylims (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False. """ self._set_axis_limits('y', xlims, dx, xscale, reverse) return
python
{ "resource": "" }
q40180
Legend.add_legend
train
def add_legend(self, labels=None, **kwargs): """Specify legend for a plot. Adds labels and basic legend specifications for specific plot. For the optional Args, refer to https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html for more information. # TODO: Add legend capabilities for Loss/Gain plots. This is possible using the return_fig_ax kwarg in the main plotting function. Args: labels (list of str): String representing each item in plot that will be added to the legend. Keyword Arguments: loc (str, int, len-2 list of floats, optional): Location of legend. See matplotlib documentation for more detail. Default is None. bbox_to_anchor (2-tuple or 4-tuple of floats, optional): Specify position and size of legend box. 2-tuple will specify (x,y) coordinate of part of box specified with `loc` kwarg. 4-tuple will specify (x, y, width, height). See matplotlib documentation for more detail. Default is None. size (float, optional): Set size of legend using call to `prop` dict in legend call. See matplotlib documentaiton for more detail. Default is None. ncol (int, optional): Number of columns in the legend. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html """ if 'size' in kwargs: if 'prop' not in kwargs: kwargs['prop'] = {'size': kwargs['size']} else: kwargs['prop']['size'] = kwargs['size'] del kwargs['size'] self.legend.add_legend = True self.legend.legend_labels = labels self.legend.legend_kwargs = kwargs return
python
{ "resource": "" }
q40181
DataImport.add_dataset
train
def add_dataset(self, name=None, label=None, x_column_label=None, y_column_label=None, index=None, control=False): """Add a dataset to a specific plot. This method adds a dataset to a plot. Its functional use is imperative to the plot generation. It handles adding new files as well as indexing to files that are added to other plots. All Args default to None. However, these are note the defaults in the code. See DataImportContainer attributes for defaults in code. Args: name (str, optional): Name (path) for file. Required if reading from a file (at least one). Required if file_name is not in "general". Must be ".txt" or ".hdf5". Can include path from working directory. label (str, optional): Column label in the dataset corresponding to desired SNR value. Required if reading from a file (at least one). x_column_label/y_column_label (str, optional): Column label from input file identifying x/y values. This can override setting in "general". Default is `x`/`y`. index (int, optional): Index of plot with preloaded data. Required if not loading a file. control (bool, optional): If True, this dataset is set to the control. This is needed for Ratio plots. It sets the baseline. Default is False. Raises: ValueError: If no options are passes. This means no file indication nor index. """ if name is None and label is None and index is None: raise ValueError("Attempting to add a dataset without" + "supplying index or file information.") if index is None: trans_dict = DataImportContainer() if name is not None: trans_dict.file_name = name if label is not None: trans_dict.label = label if x_column_label is not None: trans_dict.x_column_label = x_column_label if y_column_label is not None: trans_dict.y_column_label = y_column_label if control: self.control = trans_dict else: # need to append file to file list. if 'file' not in self.__dict__: self.file = [] self.file.append(trans_dict) else: if control: self.control = DataImportContainer() self.control.index = index else: # need to append index to index list. if 'indices' not in self.__dict__: self.indices = [] self.indices.append(index) return
python
{ "resource": "" }
q40182
Figure.savefig
train
def savefig(self, output_path, **kwargs): """Save figure during generation. This method is used to save a completed figure during the main function run. It represents a call to ``matplotlib.pyplot.fig.savefig``. # TODO: Switch to kwargs for matplotlib.pyplot.savefig Args: output_path (str): Relative path to the WORKING_DIRECTORY to save the figure. Keyword Arguments: dpi (int, optional): Dots per inch of figure. Default is 200. Note: Other kwargs are available. See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html """ self.figure.save_figure = True self.figure.output_path = output_path self.figure.savefig_kwargs = kwargs return
python
{ "resource": "" }
q40183
Figure.set_fig_size
train
def set_fig_size(self, width, height=None): """Set the figure size in inches. Sets the figure size with a call to fig.set_size_inches. Default in code is 8 inches for each. Args: width (float): Dimensions for figure width in inches. height (float, optional): Dimensions for figure height in inches. Default is None. """ self.figure.figure_width = width self.figure.figure_height = height return
python
{ "resource": "" }
q40184
Figure.set_spacing
train
def set_spacing(self, space): """Set the figure spacing. Sets whether in general there is space between subplots. If all axes are shared, this can be `tight`. Default in code is `wide`. The main difference is the tick labels extend to the ends if space==`wide`. If space==`tight`, the edge tick labels are cut off for clearity. Args: space (str): Sets spacing for subplots. Either `wide` or `tight`. """ self.figure.spacing = space if 'subplots_adjust_kwargs' not in self.figure.__dict__: self.figure.subplots_adjust_kwargs = {} if space == 'wide': self.figure.subplots_adjust_kwargs['hspace'] = 0.3 self.figure.subplots_adjust_kwargs['wspace'] = 0.3 else: self.figure.subplots_adjust_kwargs['hspace'] = 0.0 self.figure.subplots_adjust_kwargs['wspace'] = 0.0 return
python
{ "resource": "" }
q40185
Figure.subplots_adjust
train
def subplots_adjust(self, **kwargs): """Adjust subplot spacing and dimensions. Adjust bottom, top, right, left, width in between plots, and height in between plots with a call to ``plt.subplots_adjust``. See https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots_adjust.html for more information. Keyword Arguments: bottom (float, optional): Sets position of bottom of subplots in figure coordinates. Default is 0.1. top (float, optional): Sets position of top of subplots in figure coordinates. Default is 0.85. left (float, optional): Sets position of left edge of subplots in figure coordinates. Default is 0.12. right (float, optional): Sets position of right edge of subplots in figure coordinates. Default is 0.79. wspace (float, optional): The amount of width reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3. hspace (float, optional): The amount of height reserved for space between subplots, It is expressed as a fraction of the average axis width. Default is 0.3. """ prop_default = { 'bottom': 0.1, 'top': 0.85, 'right': 0.9, 'left': 0.12, 'hspace': 0.3, 'wspace': 0.3, } if 'subplots_adjust_kwargs' in self.figure.__dict__: for key, value in self.figure.subplots_adjust_kwargs.items(): prop_default[key] = value for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self.figure.subplots_adjust_kwargs = kwargs return
python
{ "resource": "" }
q40186
Figure.set_fig_x_label
train
def set_fig_x_label(self, xlabel, **kwargs): """Set overall figure x. Set label for x axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: xlabel (str): xlabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.01 for x and 0.51 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'center'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `vertical`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext """ prop_default = { 'x': 0.01, 'y': 0.51, 'fontsize': 20, 'rotation': 'vertical', 'va': 'center', } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self._set_fig_label('x', xlabel, **kwargs) return
python
{ "resource": "" }
q40187
Figure.set_fig_y_label
train
def set_fig_y_label(self, ylabel, **kwargs): """Set overall figure y. Set label for y axis on overall figure. This is not for a specific plot. It will place the label on the figure at the left with a call to ``fig.text``. Args: ylabel (str): ylabel for entire figure. Keyword Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.45 for x and 0.02 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int): The font size of the text. Default is 20. rotation (float or str): Rotation of label. Options are angle in degrees, `horizontal`, or `vertical`. Default is `horizontal`. Note: Other kwargs are available. See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext """ prop_default = { 'x': 0.45, 'y': 0.02, 'fontsize': 20, 'rotation': 'horizontal', 'ha': 'center', } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self._set_fig_label('y', ylabel, **kwargs) return
python
{ "resource": "" }
q40188
Figure.set_fig_title
train
def set_fig_title(self, title, **kwargs): """Set overall figure title. Set title for overall figure. This is not for a specific plot. It will place the title at the top of the figure with a call to ``fig.suptitle``. Args: title (str): Figure title. Keywork Arguments: x/y (float, optional): The x/y location of the text in figure coordinates. Defaults are 0.5 for x and 0.98 for y. horizontalalignment/ha (str, optional): The horizontal alignment of the text relative to (x, y). Optionas are 'center', 'left', or 'right'. Default is 'center'. verticalalignment/va (str, optional): The vertical alignment of the text relative to (x, y). Optionas are 'top', 'center', 'bottom', or 'baseline'. Default is 'top'. fontsize/size (int, optional): The font size of the text. Default is 20. """ prop_default = { 'fontsize': 20, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) self.figure.fig_title = title self.figure.fig_title_kwargs = kwargs return
python
{ "resource": "" }
q40189
Figure.set_colorbar
train
def set_colorbar(self, plot_type, **kwargs): """Setup colorbar for specific type of plot. Specify a plot type to customize its corresponding colorbar in the figure. See the ColorbarContainer class attributes for more specific explanations. Args: plot_type (str): Type of plot to adjust. e.g. `Ratio` label (str, optional): Label for colorbar. Default is None. label_fontsize (int, optional): Fontsize for colorbar label. Default is None. ticks_fontsize (int, optional): Fontsize for colorbar tick labels. Default is None. pos (int, optional): Set a position for colorbar based on defaults. Default is None. colorbar_axes (len-4 list of floats): List for custom axes placement of the colorbar. See fig.add_axes from matplotlib. url: https://matplotlib.org/2.0.0/api/figure_api.html Raises: UserWarning: User calls set_colorbar without supplying any Args. This will not stop the code. """ prop_default = { 'cbar_label': None, 'cbar_ticks_fontsize': 15, 'cbar_label_fontsize': 20, 'cbar_axes': [], 'cbar_ticks': [], 'cbar_tick_labels': [], 'cbar_pos': 'use_default', 'cbar_label_pad': None, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop[5:], default) if prop[5:] in kwargs: del kwargs[prop[5:]] if 'colorbars' not in self.figure.__dict__: self.figure.colorbars = {} self.figure.colorbars[plot_type] = kwargs return
python
{ "resource": "" }
q40190
General.set_all_file_column_labels
train
def set_all_file_column_labels(self, xlabel=None, ylabel=None): """Indicate general x,y column labels. This sets the general x and y column labels into data files for all plots. It can be overridden for specific plots. Args: xlabel/ylabel (str, optional): String indicating column label for x,y values into the data files. Default is None. Raises: UserWarning: If xlabel and ylabel are both not specified, The user will be alerted, but the code will not stop. """ if xlabel is not None: self.general.x_column_label = xlabel if ylabel is not None: self.general.y_column_label = ylabel if xlabel is None and ylabel is None: warnings.warn("is not specifying x or y lables even" + "though column labels function is called.", UserWarning) return
python
{ "resource": "" }
q40191
General._set_all_lims
train
def _set_all_lims(self, which, lim, d, scale, fontsize=None): """Set limits and ticks for an axis for whole figure. This will set axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `x` and `y`. lim (len-2 list of floats): The limits for the axis. d (float): Amount to increment by between the limits. scale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for associated axis tick marks. Default is None. """ setattr(self.general, which + 'lims', lim) setattr(self.general, 'd' + which, d) setattr(self.general, which + 'scale', scale) if fontsize is not None: setattr(self.general, which + '_tick_label_fontsize', fontsize) return
python
{ "resource": "" }
q40192
General.set_all_xlims
train
def set_all_xlims(self, xlim, dx, xscale, fontsize=None): """Set limits and ticks for x axis for whole figure. This will set x axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: xlim (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for x axis tick marks. Default is None. """ self._set_all_lims('x', xlim, dx, xscale, fontsize) return
python
{ "resource": "" }
q40193
General.set_all_ylims
train
def set_all_ylims(self, ylim, dy, yscale, fontsize=None): """Set limits and ticks for y axis for whole figure. This will set y axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: ylim (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for y axis tick marks. Default is None. """ self._set_all_lims('y', ylim, dy, yscale, fontsize) return
python
{ "resource": "" }
q40194
General.reverse_axis
train
def reverse_axis(self, axis_to_reverse): """Reverse an axis in all figure plots. This will reverse the tick marks on an axis for each plot in the figure. It can be overridden in SinglePlot class. Args: axis_to_reverse (str): Axis to reverse. Supports `x` and `y`. Raises: ValueError: The string representing the axis to reverse is not `x` or `y`. """ if axis_to_reverse.lower() == 'x': self.general.reverse_x_axis = True if axis_to_reverse.lower() == 'y': self.general.reverse_y_axis = True if axis_to_reverse.lower() != 'x' or axis_to_reverse.lower() != 'y': raise ValueError('Axis for reversing needs to be either x or y.') return
python
{ "resource": "" }
q40195
MainContainer.return_dict
train
def return_dict(self): """Output dictionary for ``make_plot.py`` input. Iterates through the entire MainContainer class turning its contents into dictionary form. This dictionary becomes the input for ``make_plot.py``. If `print_input` attribute is True, the entire dictionary will be printed prior to returning the dicitonary. Returns: - **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``. """ output_dict = {} output_dict['general'] = self._iterate_through_class(self.general.__dict__) output_dict['figure'] = self._iterate_through_class(self.figure.__dict__) if self.total_plots > 1: trans_dict = ({ str(i): self._iterate_through_class(axis.__dict__) for i, axis in enumerate(self.ax)}) output_dict['plot_info'] = trans_dict else: output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)} if self.print_input: print(output_dict) return output_dict
python
{ "resource": "" }
q40196
MainContainer._iterate_through_class
train
def _iterate_through_class(self, class_dict): """Recursive function for output dictionary creation. Function will check each value in a dictionary to see if it is a class, list, or dictionary object. The idea is to turn all class objects into dictionaries. If it is a class object it will pass its ``class.__dict__`` recursively through this function again. If it is a dictionary, it will pass the dictionary recursively through this functin again. If the object is a list, it will iterate through entries checking for class or dictionary objects and pass them recursively through this function. This uses the knowledge of the list structures in the code. Args: class_dict (obj): Dictionary to iteratively check. Returns: Dictionary with all class objects turned into dictionaries. """ output_dict = {} for key in class_dict: val = class_dict[key] try: val = val.__dict__ except AttributeError: pass if type(val) is dict: val = self._iterate_through_class(val) if type(val) is list: temp_val = [] for val_i in val: try: val_i = val_i.__dict__ except AttributeError: pass if type(val_i) is dict: val_i = self._iterate_through_class(val_i) temp_val.append(val_i) val = temp_val output_dict[key] = val return output_dict
python
{ "resource": "" }
q40197
ReadInData.txt_read_in
train
def txt_read_in(self): """Read in txt files. Method for reading in text or csv files. This uses ascii class from astropy.io for flexible input. It is slower than numpy, but has greater flexibility with less input. """ # read in data = ascii.read(self.WORKING_DIRECTORY + '/' + self.file_name) # find number of distinct x and y points. num_x_pts = len(np.unique(data[self.x_column_label])) num_y_pts = len(np.unique(data[self.y_column_label])) # create 2D arrays of x,y,z self.xvals = np.reshape(np.asarray(data[self.x_column_label]), (num_y_pts, num_x_pts)) self.yvals = np.reshape(np.asarray(data[self.y_column_label]), (num_y_pts, num_x_pts)) self.zvals = np.reshape(np.asarray(data[self.z_column_label]), (num_y_pts, num_x_pts)) return
python
{ "resource": "" }
q40198
ReadInData.hdf5_read_in
train
def hdf5_read_in(self): """Method for reading in hdf5 files. """ with h5py.File(self.WORKING_DIRECTORY + '/' + self.file_name) as f: # read in data = f['data'] # find number of distinct x and y points. num_x_pts = len(np.unique(data[self.x_column_label][:])) num_y_pts = len(np.unique(data[self.y_column_label][:])) # create 2D arrays of x,y,z self.xvals = np.reshape(data[self.x_column_label][:], (num_y_pts, num_x_pts)) self.yvals = np.reshape(data[self.y_column_label][:], (num_y_pts, num_x_pts)) self.zvals = np.reshape(data[self.z_column_label][:], (num_y_pts, num_x_pts)) return
python
{ "resource": "" }
q40199
GenProcess.set_parameters
train
def set_parameters(self): """Setup all the parameters for the binaries to be evaluated. Grid values and store necessary parameters for input into the SNR function. """ # declare 1D arrays of both paramters if self.xscale != 'lin': self.xvals = np.logspace(np.log10(float(self.x_low)), np.log10(float(self.x_high)), self.num_x) else: self.xvals = np.linspace(float(self.x_low), float(self.x_high), self.num_x) if self.yscale != 'lin': self.yvals = np.logspace(np.log10(float(self.y_low)), np.log10(float(self.y_high)), self.num_y) else: self.yvals = np.linspace(float(self.y_low), float(self.y_high), self.num_y) self.xvals, self.yvals = np.meshgrid(self.xvals, self.yvals) self.xvals, self.yvals = self.xvals.ravel(), self.yvals.ravel() for which in ['x', 'y']: setattr(self, getattr(self, which + 'val_name'), getattr(self, which + 'vals')) self.ecc = 'eccentricity' in self.__dict__ if self.ecc: if 'observation_time' not in self.__dict__: if 'start_time' not in self.__dict__: raise ValueError('If no observation time is provided, the time before' + 'merger must be the inital starting condition.') self.observation_time = self.start_time # small number so it is not zero else: if 'spin' in self.__dict__: self.spin_1 = self.spin self.spin_2 = self.spin for key in ['redshift', 'luminosity_distance', 'comoving_distance']: if key in self.__dict__: self.dist_type = key self.z_or_dist = getattr(self, key) if self.ecc: for key in ['start_frequency', 'start_time', 'start_separation']: if key in self.__dict__: self.initial_cond_type = key.split('_')[-1] self.initial_point = getattr(self, key) # add m1 and m2 self.m1 = (self.total_mass / (1. + self.mass_ratio)) self.m2 = (self.total_mass * self.mass_ratio / (1. + self.mass_ratio)) return
python
{ "resource": "" }