code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def unicode2encode(text, charmap): ''' charmap : dictionary which has both encode as key, unicode as value ''' if isinstance(text, (list, tuple)): unitxt = '' for line in text: for val,key in charmap.items(): if key in line: line = line.replace(key, val) # end of if val in text: unitxt += line # end of for line in text: return unitxt elif isinstance(text, str): for val,key in charmap.items(): if key in text: text = text.replace(key, val) return text
charmap : dictionary which has both encode as key, unicode as value
def set(self, name: str, value: Union[str, List[str]]) -> None: """ 设置 header """ self._headers[name] = value
设置 header
def radiance_to_bt(arr, wc_, a__, b__): """Convert to BT. """ return a__ + b__ * (C2 * wc_ / (da.log(1 + (C1 * (wc_ ** 3) / arr))))
Convert to BT.
def detach(self): """Detach this socket from the server. This should be done in conjunction with kill(), once all the jobs are dead, detach the socket for garbage collection.""" log.debug("Removing %s from server sockets" % self) if self.sessid in self.server.sockets: self.server.sockets.pop(self.sessid)
Detach this socket from the server. This should be done in conjunction with kill(), once all the jobs are dead, detach the socket for garbage collection.
def function(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): """ :param x: set of x-coordinates :type x: array of size (n) :param theta_E: Einstein radius of lense :type theta_E: float. :param gamma: power law slope of mass profifle :type gamma: <2 float :param q: Axis ratio :type q: 0<q<1 :param phi_G: position angel of SES :type q: 0<phi_G<pi/2 :returns: function :raises: AttributeError, KeyError """ phi_G, q = param_util.ellipticity2phi_q(e1, e2) gamma, q = self._param_bounds(gamma, q) theta_E *= q x_shift = x - center_x y_shift = y - center_y E = theta_E / (((3 - gamma) / 2.) ** (1. / (1 - gamma)) * np.sqrt(q)) #E = phi_E eta = -gamma+3 xt1 = np.cos(phi_G)*x_shift+np.sin(phi_G)*y_shift xt2 = -np.sin(phi_G)*x_shift+np.cos(phi_G)*y_shift p2 = xt1**2+xt2**2/q**2 s2 = 0. # softening return 2 * E**2/eta**2 * ((p2 + s2)/E**2)**(eta/2)
:param x: set of x-coordinates :type x: array of size (n) :param theta_E: Einstein radius of lense :type theta_E: float. :param gamma: power law slope of mass profifle :type gamma: <2 float :param q: Axis ratio :type q: 0<q<1 :param phi_G: position angel of SES :type q: 0<phi_G<pi/2 :returns: function :raises: AttributeError, KeyError
def match(self, pattern, context=None): """ This method returns a (possibly empty) list of strings that match the regular expression ``pattern`` provided. You can also provide a ``context`` as described above. This method calls ``choices`` to get a list of all possible choices and then filters the list by performing a regular expression search on each choice using the supplied ``pattern``. """ matches = [] regex = pattern if regex == '*': regex = '.*' regex = re.compile(regex) for choice in self.choices(context): if regex.search(choice): matches.append(choice) return matches
This method returns a (possibly empty) list of strings that match the regular expression ``pattern`` provided. You can also provide a ``context`` as described above. This method calls ``choices`` to get a list of all possible choices and then filters the list by performing a regular expression search on each choice using the supplied ``pattern``.
def strip_prefix(s, prefix, strict=False): """Removes the prefix, if it's there, otherwise returns input string unchanged. If strict is True, also ensures the prefix was present""" if s.startswith(prefix): return s[len(prefix) :] elif strict: raise WimpyError("string doesn't start with prefix") return s
Removes the prefix, if it's there, otherwise returns input string unchanged. If strict is True, also ensures the prefix was present
def _encode_bool(name, value, dummy0, dummy1): """Encode a python boolean (True/False).""" return b"\x08" + name + (value and b"\x01" or b"\x00")
Encode a python boolean (True/False).
def print_param_defaults(self_): """Print the default values of all cls's Parameters.""" cls = self_.cls for key,val in cls.__dict__.items(): if isinstance(val,Parameter): print(cls.__name__+'.'+key+ '='+ repr(val.default))
Print the default values of all cls's Parameters.
def ephem(self, *args, **kwargs): """Create an Ephem object which is a subset of this one Take the same keyword arguments as :py:meth:`ephemeris` Return: Ephem: """ return self.__class__(self.ephemeris(*args, **kwargs))
Create an Ephem object which is a subset of this one Take the same keyword arguments as :py:meth:`ephemeris` Return: Ephem:
def _attach_original_exception(self, exc): """ Often, a retry will be raised inside an "except" block. This Keep track of the first exception for debugging purposes """ original_exception = sys.exc_info() if original_exception[0] is not None: exc.original_exception = original_exception
Often, a retry will be raised inside an "except" block. This Keep track of the first exception for debugging purposes
def register_target(self, target: Target): """Register a `target` instance in this build context. A registered target is saved in the `targets` map and in the `targets_by_module` map, but is not added to the target graph until target extraction is completed (thread safety considerations). """ if target.name in self.targets: first = self.targets[target.name] raise NameError( 'Target with name "{0.name}" ({0.builder_name} from module ' '"{1}") already exists - defined first as ' '{2.builder_name} in module "{3}"'.format( target, split_build_module(target.name), first, split_build_module(first.name))) self.targets[target.name] = target self.targets_by_module[split_build_module(target.name)].add( target.name)
Register a `target` instance in this build context. A registered target is saved in the `targets` map and in the `targets_by_module` map, but is not added to the target graph until target extraction is completed (thread safety considerations).
def score_samples(self, X, lengths=None): """Compute the log probability under the model and compute posteriors. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- logprob : float Log likelihood of ``X``. posteriors : array, shape (n_samples, n_components) State-membership probabilities for each sample in ``X``. See Also -------- score : Compute the log probability under the model. decode : Find most likely state sequence corresponding to ``X``. """ check_is_fitted(self, "startprob_") self._check() X = check_array(X) n_samples = X.shape[0] logprob = 0 posteriors = np.zeros((n_samples, self.n_components)) for i, j in iter_from_X_lengths(X, lengths): framelogprob = self._compute_log_likelihood(X[i:j]) logprobij, fwdlattice = self._do_forward_pass(framelogprob) logprob += logprobij bwdlattice = self._do_backward_pass(framelogprob) posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice) return logprob, posteriors
Compute the log probability under the model and compute posteriors. Parameters ---------- X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- logprob : float Log likelihood of ``X``. posteriors : array, shape (n_samples, n_components) State-membership probabilities for each sample in ``X``. See Also -------- score : Compute the log probability under the model. decode : Find most likely state sequence corresponding to ``X``.
def getPrevUrl(self, url, data): """Find previous URL.""" prevUrl = None if self.prevSearch: try: prevUrl = self.fetchUrl(url, data, self.prevSearch) except ValueError as msg: # assume there is no previous URL, but print a warning out.warn(u"%s Assuming no previous comic strips exist." % msg) else: prevUrl = self.prevUrlModifier(prevUrl) out.debug(u"Found previous URL %s" % prevUrl) getHandler().comicPageLink(self.getName(), url, prevUrl) return prevUrl
Find previous URL.
def get_diskinfo(opts, show_all=False, local_only=False): ''' Returns a list holding the current disk info. Stats are divided by the outputunit. ''' disks = [] outunit = opts.outunit label_map = get_label_map(opts) # get mount info try: with open(mntfname) as infile: lines = infile.readlines() lines.sort() except IOError: return None # build list of disks for i, line in enumerate(lines): device, mntp, fmt, mntops, *_ = line.split() if device in ('cgroup',): # never want these continue disk = DiskInfo() dev = basename(device) # short name disk.isnet = ':' in device # cheesy but works if local_only and disk.isnet: continue disk.isimg = is_img = dev.startswith('loop') # could be better is_tmpfs = (device == 'tmpfs') # lots of junk here, so we throw away most entries for selector in selectors: if selector in device: if show_all: if is_tmpfs: disk.isram = True else: # skip these: if (is_img or is_tmpfs or mntp == '/boot/efi'): continue break # found a useful entry, stop here else: # no-break, nothing was found continue # skip this one disk.dev = dev disk.fmt = fmt disk.mntp = mntp = decode_mntp(mntp) if '\\' in mntp else mntp disk.ismntd = bool(mntp) disk.isopt = check_optical(disk) if device[0] == '/': # .startswith('/dev'): disk.isrem = check_removable(dev, opts) disk.label = label_map.get(device) # get disk usage information # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/statvfs.h.html # convert to bytes, then output units stat = os.statvfs(mntp) disk.ocap = stat.f_frsize * stat.f_blocks # keep for later disk.cap = disk.ocap / outunit disk.free = stat.f_frsize * stat.f_bavail / outunit disk.oused = stat.f_frsize * (stat.f_blocks - stat.f_bfree) # for later disk.used = disk.oused / outunit disk.pcnt = disk.oused / disk.ocap * 100 if mntops.startswith('rw'): # read only disk.rw = True elif mntops.startswith('ro'): disk.rw = False else: disk.rw = not bool(stat.f_flag & os.ST_RDONLY) disks.append(disk) if show_all: # look at /dev/disks again for the unmounted for devname in label_map: dev = basename(devname) exists = [ disk for disk in disks if disk.dev == dev ] if not exists: disk = DiskInfo( cap=0, free=0, ocap=0, pcnt=0, used=0, dev = dev, ismntd = False, mntp = '', isnet = False, isopt = check_optical(DiskInfo(dev=dev, fmt=None)), isram = False, # no such thing? isrem = check_removable(dev, opts), label = label_map[devname], rw = None, ) disks.append(disk) disks.sort(key=lambda disk: disk.dev) # sort again :-/ if opts.debug: print() for disk in disks: print(disk.dev, disk) print() return disks
Returns a list holding the current disk info. Stats are divided by the outputunit.
def unpack_unordered_pairs(self, pairs): """Unpack an unordered list of value pairs taking DelayPacking exceptions into account to resolve circular references . Used to unpack dictionary items when the order is not guarennteed by the serializer. When item order change between packing and unpacking, references are not guaranteed to appear before dereferences anymore. So if unpacking an item fail because of unknown dereference, we must keep it aside, continue unpacking the other items and continue later.""" items = [(False, k, v) for k, v in pairs] result = [] # Try to unpack items more than one time to resolve cross references max_loop = 2 while items and max_loop: next_items = [] for key_unpacked, key_data, value_data in items: if key_unpacked: key = key_data else: blob = self._begin() try: # Try unpacking the key key = self.unpack_data(key_data) self._commit(blob) except DelayPacking: self._rollback(blob) # If it is delayed keep it for later next_items.append((False, key_data, value_data)) continue blob = self._begin() try: # try unpacking the value value = self.unpack_data(value_data) self._commit(blob) except DelayPacking: self._rollback(blob) # If it is delayed keep it for later next_items.append((True, key, value_data)) continue # Update the container with the unpacked value and key result.append((key, value)) items = next_items max_loop -= 1 if items: # Not all items were resolved raise DelayPacking() return result
Unpack an unordered list of value pairs taking DelayPacking exceptions into account to resolve circular references . Used to unpack dictionary items when the order is not guarennteed by the serializer. When item order change between packing and unpacking, references are not guaranteed to appear before dereferences anymore. So if unpacking an item fail because of unknown dereference, we must keep it aside, continue unpacking the other items and continue later.
def relcurveto(self, h1x, h1y, h2x, h2y, x, y): '''Draws a curve relatively to the last point. ''' if self._path is None: raise ShoebotError(_("No current path. Use beginpath() first.")) self._path.relcurveto(h1x, h1y, h2x, h2y, x, y)
Draws a curve relatively to the last point.
def next_doc_with_tag(self, doc_tag): """ Returns the next document with the specified tag. Empty string is no doc is found. """ while True: try: doc = next(self) if doc.tag == doc_tag: return doc except StopIteration: raise
Returns the next document with the specified tag. Empty string is no doc is found.
def surrounding_nodes(self, position): """ Returns nearest node indices and direction of opposite node. :param position: Position inside the mesh to search nearest node for as (x,y,z) :return: Nearest node indices and direction of opposite node. """ n_node_index, n_node_position, n_node_error = self.nearest_node(position) if n_node_error == 0.0: index_mod = [] for i in range(len(n_node_index)): new_point = np.asarray(n_node_position) new_point[i] += 1.e-5*np.abs(new_point[i]) try: self.nearest_node(tuple(new_point)) index_mod.append(-1) except ValueError: index_mod.append(1) else: # Check if node_position is larger or smaller in resp. axes than position index_mod = [] for i in range(len(n_node_index)): if n_node_position[i] > position[i]: index_mod.append(-1) else: index_mod.append(1) return tuple(n_node_index), tuple(index_mod)
Returns nearest node indices and direction of opposite node. :param position: Position inside the mesh to search nearest node for as (x,y,z) :return: Nearest node indices and direction of opposite node.
def get_maxloss_rupture(dstore, loss_type): """ :param dstore: a DataStore instance :param loss_type: a loss type string :returns: EBRupture instance corresponding to the maximum loss for the given loss type """ lti = dstore['oqparam'].lti[loss_type] ridx = dstore.get_attr('rup_loss_table', 'ridx')[lti] [rgetter] = gen_rupture_getters(dstore, slice(ridx, ridx + 1)) [ebr] = rgetter.get_ruptures() return ebr
:param dstore: a DataStore instance :param loss_type: a loss type string :returns: EBRupture instance corresponding to the maximum loss for the given loss type
def bit_reversal(qubits: List[int]) -> Program: """ Generate a circuit to do bit reversal. :param qubits: Qubits to do bit reversal with. :return: A program to do bit reversal. """ p = Program() n = len(qubits) for i in range(int(n / 2)): p.inst(SWAP(qubits[i], qubits[-i - 1])) return p
Generate a circuit to do bit reversal. :param qubits: Qubits to do bit reversal with. :return: A program to do bit reversal.
def tangent_bundle(self): """The tangent bundle associated with `domain` using `partition`. The tangent bundle of a space ``X`` of functions ``R^d --> F`` can be interpreted as the space of vector-valued functions ``R^d --> F^d``. This space can be identified with the power space ``X^d`` as used in this implementation. """ if self.ndim == 0: return ProductSpace(field=self.field) else: return ProductSpace(self, self.ndim)
The tangent bundle associated with `domain` using `partition`. The tangent bundle of a space ``X`` of functions ``R^d --> F`` can be interpreted as the space of vector-valued functions ``R^d --> F^d``. This space can be identified with the power space ``X^d`` as used in this implementation.
def temporary(self, path): """Establishes a temporary build root, restoring the prior build root on exit.""" if path is None: raise ValueError('Can only temporarily establish a build root given a path.') prior = self._root_dir self._root_dir = path try: yield finally: self._root_dir = prior
Establishes a temporary build root, restoring the prior build root on exit.
def copy_non_ecf(props, target): # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any] """ Copies non-ECF properties from ``props`` to ``target`` :param props: An input dictionary :param target: The dictionary to copy non-ECF properties to :return: The ``target`` dictionary """ target.update( {key: value for key, value in props.items() if key not in ECFPROPNAMES} ) return target
Copies non-ECF properties from ``props`` to ``target`` :param props: An input dictionary :param target: The dictionary to copy non-ECF properties to :return: The ``target`` dictionary
def derive_fields(self): """ Derives our fields. """ if self.fields: return self.fields else: fields = [] for field in self.object_list.model._meta.fields: if field.name != 'id': fields.append(field.name) return fields
Derives our fields.
def get_contigs(self): '''Returns a dictionary of contig_name -> pyfastaq.Sequences.Fasta object''' contigs = {} pyfastaq.tasks.file_to_dict(self.contigs_fasta, contigs) return contigs
Returns a dictionary of contig_name -> pyfastaq.Sequences.Fasta object
def cleanup(self): """ Cleans up the key value store. In detail: 1. Deletes all key store references for image_files that do not exist and all key references for its thumbnails *and* their image_files. 2. Deletes or updates all invalid thumbnail keys """ for key in self._find_keys(identity='image'): image_file = self._get(key) if image_file and not image_file.exists(): self.delete(image_file) for key in self._find_keys(identity='thumbnails'): # We do not need to check for file existence in here since we # already did that above for all image references image_file = self._get(key) if image_file: # if there is an image_file then we check all of its thumbnails # for existence thumbnail_keys = self._get(key, identity='thumbnails') or [] thumbnail_keys_set = set(thumbnail_keys) for thumbnail_key in thumbnail_keys: if not self._get(thumbnail_key): thumbnail_keys_set.remove(thumbnail_key) thumbnail_keys = list(thumbnail_keys_set) if thumbnail_keys: self._set(key, thumbnail_keys, identity='thumbnails') continue # if there is no image_file then this thumbnails key is just # hangin' loose, If the thumbnail_keys ended up empty there is no # reason for keeping it either self._delete(key, identity='thumbnails')
Cleans up the key value store. In detail: 1. Deletes all key store references for image_files that do not exist and all key references for its thumbnails *and* their image_files. 2. Deletes or updates all invalid thumbnail keys
def check_index(i): """ Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...] """ i = asarray(i) if (i.ndim > 1) or (size(i) < 1): raise Exception("Index must be one-dimensional and non-singleton") return i
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
def _get(self, operation, field): """Get tracked position for a given operation and field.""" self._check_exists() query = {Mark.FLD_OP: operation.name, Mark.FLD_MARK + "." + field: {"$exists": True}} return self._track.find_one(query)
Get tracked position for a given operation and field.
def read_data(self, scaling_factor=1E-9, strain_headers=None): ''' Reads the data from the csv file :param float scaling_factor: Scaling factor used for all strain values (default 1E-9 for nanostrain) :param list strain_headers: List of the variables in the file that correspond to strain parameters :returns: strain - Strain model as an instance of the :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain ''' if strain_headers: self.strain.data_variables = strain_headers else: self.strain.data_variables = STRAIN_VARIABLES datafile = open(self.filename, 'r') reader = csv.DictReader(datafile) self.strain.data = dict([(name, []) for name in reader.fieldnames]) for row in reader: for name in row.keys(): if 'region' in name.lower(): self.strain.data[name].append(row[name]) elif name in self.strain.data_variables: self.strain.data[name].append( scaling_factor * float(row[name])) else: self.strain.data[name].append(float(row[name])) for key in self.strain.data.keys(): if 'region' in key: self.strain.data[key] = np.array(self.strain.data[key], dtype='S13') else: self.strain.data[key] = np.array(self.strain.data[key]) self._check_invalid_longitudes() if 'region' not in self.strain.data: print('No tectonic regionalisation found in input file!') self.strain.data_variables = self.strain.data.keys() # Update data with secondary data (i.e. 2nd invariant, e1h, e2h etc. self.strain.get_secondary_strain_data() return self.strain
Reads the data from the csv file :param float scaling_factor: Scaling factor used for all strain values (default 1E-9 for nanostrain) :param list strain_headers: List of the variables in the file that correspond to strain parameters :returns: strain - Strain model as an instance of the :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain
def from_stream(cls, stream, marker_code, offset): """ Return a generic |_Marker| instance for the marker at *offset* in *stream* having *marker_code*. """ if JPEG_MARKER_CODE.is_standalone(marker_code): segment_length = 0 else: segment_length = stream.read_short(offset) return cls(marker_code, offset, segment_length)
Return a generic |_Marker| instance for the marker at *offset* in *stream* having *marker_code*.
def get_call_repr(func, *args, **kwargs): """Return the string representation of the function call. :param func: A callable (e.g. function, method). :type func: callable :param args: Positional arguments for the callable. :param kwargs: Keyword arguments for the callable. :return: String representation of the function call. :rtype: str """ # Functions, builtins and methods if ismethod(func) or isfunction(func) or isbuiltin(func): func_repr = '{}.{}'.format(func.__module__, func.__qualname__) # A callable class instance elif not isclass(func) and hasattr(func, '__call__'): func_repr = '{}.{}'.format(func.__module__, func.__class__.__name__) else: func_repr = repr(func) args_reprs = [repr(arg) for arg in args] kwargs_reprs = [k + '=' + repr(v) for k, v in sorted(kwargs.items())] return '{}({})'.format(func_repr, ', '.join(args_reprs + kwargs_reprs))
Return the string representation of the function call. :param func: A callable (e.g. function, method). :type func: callable :param args: Positional arguments for the callable. :param kwargs: Keyword arguments for the callable. :return: String representation of the function call. :rtype: str
def transition(self): """ Implements a QAM-like transition. This function assumes ``program`` and ``program_counter`` instance variables are set appropriately, and that the wavefunction simulator and classical memory ``ram`` instance variables are in the desired QAM input state. :return: whether the QAM should halt after this transition. """ instruction = self.program[self.program_counter] if isinstance(instruction, Gate): if instruction.name in self.defined_gates: self.wf_simulator.do_gate_matrix(matrix=self.defined_gates[instruction.name], qubits=[q.index for q in instruction.qubits]) else: self.wf_simulator.do_gate(gate=instruction) for noise_type, noise_prob in self.post_gate_noise_probabilities.items(): self.wf_simulator.do_post_gate_noise(noise_type, noise_prob, qubits=[q.index for q in instruction.qubits]) self.program_counter += 1 elif isinstance(instruction, Measurement): measured_val = self.wf_simulator.do_measurement(qubit=instruction.qubit.index) x = instruction.classical_reg # type: MemoryReference self.ram[x.name][x.offset] = measured_val self.program_counter += 1 elif isinstance(instruction, Declare): if instruction.shared_region is not None: raise NotImplementedError("SHARING is not (yet) implemented.") self.ram[instruction.name] = np.zeros(instruction.memory_size, dtype=QUIL_TO_NUMPY_DTYPE[ instruction.memory_type]) self.program_counter += 1 elif isinstance(instruction, Pragma): # TODO: more stringent checks for what's being pragma'd and warnings self.program_counter += 1 elif isinstance(instruction, Jump): # unconditional Jump; go directly to Label self.program_counter = self.find_label(instruction.target) elif isinstance(instruction, JumpTarget): # Label; pass straight over self.program_counter += 1 elif isinstance(instruction, JumpConditional): # JumpConditional; check classical reg x = instruction.condition # type: MemoryReference cond = self.ram[x.name][x.offset] if not isinstance(cond, (bool, np.bool, np.int8)): raise ValueError("{} requires a data type of BIT; not {}" .format(instruction.op, type(cond))) dest_index = self.find_label(instruction.target) if isinstance(instruction, JumpWhen): jump_if_cond = True elif isinstance(instruction, JumpUnless): jump_if_cond = False else: raise TypeError("Invalid JumpConditional") if not (cond ^ jump_if_cond): # jumping: set prog counter to JumpTarget self.program_counter = dest_index else: # not jumping: hop over this JumpConditional self.program_counter += 1 elif isinstance(instruction, UnaryClassicalInstruction): # UnaryClassicalInstruction; set classical reg target = instruction.target # type:MemoryReference old = self.ram[target.name][target.offset] if isinstance(instruction, ClassicalNeg): if not isinstance(old, (int, float, np.int, np.float)): raise ValueError("NEG requires a data type of REAL or INTEGER; not {}" .format(type(old))) self.ram[target.name][target.offset] *= -1 elif isinstance(instruction, ClassicalNot): if not isinstance(old, (bool, np.bool)): raise ValueError("NOT requires a data type of BIT; not {}" .format(type(old))) self.ram[target.name][target.offset] = not old else: raise TypeError("Invalid UnaryClassicalInstruction") self.program_counter += 1 elif isinstance(instruction, (LogicalBinaryOp, ArithmeticBinaryOp, ClassicalMove)): left_ind = instruction.left # type: MemoryReference left_val = self.ram[left_ind.name][left_ind.offset] if isinstance(instruction.right, MemoryReference): right_ind = instruction.right # type: MemoryReference right_val = self.ram[right_ind.name][right_ind.offset] else: right_val = instruction.right if isinstance(instruction, ClassicalAnd): new_val = left_val & right_val elif isinstance(instruction, ClassicalInclusiveOr): new_val = left_val | right_val elif isinstance(instruction, ClassicalExclusiveOr): new_val = left_val ^ right_val elif isinstance(instruction, ClassicalAdd): new_val = left_val + right_val elif isinstance(instruction, ClassicalSub): new_val = left_val - right_val elif isinstance(instruction, ClassicalMul): new_val = left_val * right_val elif isinstance(instruction, ClassicalDiv): new_val = left_val / right_val elif isinstance(instruction, ClassicalMove): new_val = right_val else: raise ValueError("Unknown BinaryOp {}".format(type(instruction))) self.ram[left_ind.name][left_ind.offset] = new_val self.program_counter += 1 elif isinstance(instruction, ClassicalExchange): left_ind = instruction.left # type: MemoryReference right_ind = instruction.right # type: MemoryReference tmp = self.ram[left_ind.name][left_ind.offset] self.ram[left_ind.name][left_ind.offset] = self.ram[right_ind.name][right_ind.offset] self.ram[right_ind.name][right_ind.offset] = tmp self.program_counter += 1 elif isinstance(instruction, Reset): self.wf_simulator.reset() self.program_counter += 1 elif isinstance(instruction, ResetQubit): # TODO raise NotImplementedError("Need to implement in wf simulator") self.program_counter += 1 elif isinstance(instruction, Wait): warnings.warn("WAIT does nothing for a noiseless simulator") self.program_counter += 1 elif isinstance(instruction, Nop): # well that was easy self.program_counter += 1 elif isinstance(instruction, DefGate): if instruction.parameters is not None and len(instruction.parameters) > 0: raise NotImplementedError("PyQVM does not support parameterized DEFGATEs") self.defined_gates[instruction.name] = instruction.name self.program_counter += 1 elif isinstance(instruction, RawInstr): raise NotImplementedError("PyQVM does not support raw instructions. " "Parse your program") elif isinstance(instruction, Halt): return True else: raise ValueError("Unsupported instruction type: {}".format(instruction)) # return HALTED (i.e. program_counter is end of program) return self.program_counter == len(self.program)
Implements a QAM-like transition. This function assumes ``program`` and ``program_counter`` instance variables are set appropriately, and that the wavefunction simulator and classical memory ``ram`` instance variables are in the desired QAM input state. :return: whether the QAM should halt after this transition.
def _merge_args(qCmd, parsed_args, _extra_values, value_specs): """Merge arguments from _extra_values into parsed_args. If an argument value are provided in both and it is a list, the values in _extra_values will be merged into parsed_args. @param parsed_args: the parsed args from known options @param _extra_values: the other parsed arguments in unknown parts @param values_specs: the unparsed unknown parts """ temp_values = _extra_values.copy() for key, value in six.iteritems(temp_values): if hasattr(parsed_args, key): arg_value = getattr(parsed_args, key) if arg_value is not None and value is not None: if isinstance(arg_value, list): if value and isinstance(value, list): if (not arg_value or isinstance(arg_value[0], type(value[0]))): arg_value.extend(value) _extra_values.pop(key)
Merge arguments from _extra_values into parsed_args. If an argument value are provided in both and it is a list, the values in _extra_values will be merged into parsed_args. @param parsed_args: the parsed args from known options @param _extra_values: the other parsed arguments in unknown parts @param values_specs: the unparsed unknown parts
def data(ctx, path): """List EDC data for [STUDY] [ENV] [SUBJECT]""" _rws = partial(rws_call, ctx) if len(path) == 0: _rws(ClinicalStudiesRequest(), default_attr='oid') elif len(path) == 1: _rws(StudySubjectsRequest(path[0], 'Prod'), default_attr='subjectkey') elif len(path) == 2: _rws(StudySubjectsRequest(path[0], path[1]), default_attr='subjectkey') elif len(path) == 3: try: click.echo(get_data(ctx, path[0], path[1], path[2])) except RWSException as e: click.echo(str(e)) except requests.exceptions.HTTPError as e: click.echo(str(e)) else: click.echo('Too many arguments')
List EDC data for [STUDY] [ENV] [SUBJECT]
def find_window_id(pattern, method='mru', error='raise'): """ xprop -id 0x00a00007 | grep "WM_CLASS(STRING)" """ import utool as ut winid_candidates = XCtrl.findall_window_ids(pattern) if len(winid_candidates) == 0: if error == 'raise': available_windows = ut.cmd2('wmctrl -l')['out'] msg = 'No window matches pattern=%r' % (pattern,) msg += '\navailable windows are:\n%s' % (available_windows,) print(msg) raise Exception(msg) win_id = None elif len(winid_candidates) == 1: win_id = winid_candidates[0] else: # print('Multiple (%d) windows matches pattern=%r' % ( # len(winid_list), pattern,)) # Find most recently used window with the focus name. win_id = XCtrl.sort_window_ids(winid_candidates, method)[0] return win_id
xprop -id 0x00a00007 | grep "WM_CLASS(STRING)"
def classify(label_dict,image_fname=None,image_label=None): '''tries to classify a DICOM image based on known string patterns (with fuzzy matching) Takes the label from the DICOM header and compares to the entries in ``label_dict``. If it finds something close it will return the image type, otherwise it will return ``None``. Alternatively, you can supply your own string, ``image_label``, and it will try to match that. ``label_dict`` is a dictionary where the keys are dataset types and the values are lists of strings that match that type. For example:: { 'anatomy': ['SPGR','MPRAGE','anat','anatomy'], 'dti': ['DTI'], 'field_map': ['fieldmap','TE7','B0'] } ''' min_acceptable_match = 80 if image_fname: label_info = info_for_tags(image_fname,[(0x8,0x103e)]) image_label = label_info[(0x8,0x103e)] # creates a list of tuples: (type, keyword) flat_dict = [i for j in [[(b,x) for x in label_dict[b]] for b in label_dict] for i in j] best_match = process.extractOne(image_label,[x[1] for x in flat_dict]) if best_match[1]<min_acceptable_match: return None else: return [x[0] for x in flat_dict if x[1]==best_match[0]][0]
tries to classify a DICOM image based on known string patterns (with fuzzy matching) Takes the label from the DICOM header and compares to the entries in ``label_dict``. If it finds something close it will return the image type, otherwise it will return ``None``. Alternatively, you can supply your own string, ``image_label``, and it will try to match that. ``label_dict`` is a dictionary where the keys are dataset types and the values are lists of strings that match that type. For example:: { 'anatomy': ['SPGR','MPRAGE','anat','anatomy'], 'dti': ['DTI'], 'field_map': ['fieldmap','TE7','B0'] }
def diff_encode(line, transform): """ Differentially encode a shapely linestring or ring. """ coords = [transform(x, y) for (x, y) in line.coords] pairs = zip(coords[:], coords[1:]) diffs = [(x2 - x1, y2 - y1) for ((x1, y1), (x2, y2)) in pairs] return coords[:1] + [(x, y) for (x, y) in diffs if (x, y) != (0, 0)]
Differentially encode a shapely linestring or ring.
def set_board_options(self, options, team_context, id): """SetBoardOptions. Update board options :param {str} options: options to updated :param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation :param str id: identifier for board, either category plural name (Eg:"Stories") or guid :rtype: {str} """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') if id is not None: route_values['id'] = self._serialize.url('id', id, 'str') content = self._serialize.body(options, '{str}') response = self._send(http_method='PUT', location_id='23ad19fc-3b8e-4877-8462-b3f92bc06b40', version='5.0', route_values=route_values, content=content) return self._deserialize('{str}', self._unwrap_collection(response))
SetBoardOptions. Update board options :param {str} options: options to updated :param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation :param str id: identifier for board, either category plural name (Eg:"Stories") or guid :rtype: {str}
async def remove_all_pumps_async(self, reason): """ Stops all partition pumps (Note this might be wrong and need to await all tasks before returning done). :param reason: A reason for closing. :type reason: str :rtype: bool """ pump_tasks = [self.remove_pump_async(p_id, reason) for p_id in self.partition_pumps] await asyncio.gather(*pump_tasks) return True
Stops all partition pumps (Note this might be wrong and need to await all tasks before returning done). :param reason: A reason for closing. :type reason: str :rtype: bool
def parse_info(raw_info, apply_tag=None): """ Parse raw rdoinfo metadata inplace. :param raw_info: raw info to parse :param apply_tag: tag to apply :returns: dictionary containing all packages in rdoinfo """ parse_releases(raw_info) parse_packages(raw_info, apply_tag=apply_tag) return raw_info
Parse raw rdoinfo metadata inplace. :param raw_info: raw info to parse :param apply_tag: tag to apply :returns: dictionary containing all packages in rdoinfo
def action(self, *args, **kwargs): """the default action for Support Classifiers invokes any derivied _action function, trapping any exceptions raised in the process. We are obligated to catch these exceptions to give subsequent rules the opportunity to act and perhaps mitigate the error. An error during the action application is a failure of the rule, not a failure of the classification system itself.""" try: return self._action(*args, **kwargs) except KeyError, x: self.config.logger.debug( 'Rule %s action failed because of missing key "%s"', to_str(self.__class__), x, ) except Exception, x: self.config.logger.debug( 'Rule %s action failed because of "%s"', to_str(self.__class__), x, exc_info=True ) return False
the default action for Support Classifiers invokes any derivied _action function, trapping any exceptions raised in the process. We are obligated to catch these exceptions to give subsequent rules the opportunity to act and perhaps mitigate the error. An error during the action application is a failure of the rule, not a failure of the classification system itself.
def _aggregate_on_chunks(x, f_agg, chunk_len): """ Takes the time series x and constructs a lower sampled version of it by applying the aggregation function f_agg on consecutive chunks of length chunk_len :param x: the time series to calculate the aggregation of :type x: numpy.ndarray :param f_agg: The name of the aggregation function that should be an attribute of the pandas.Series :type f_agg: str :param chunk_len: The size of the chunks where to aggregate the time series :type chunk_len: int :return: A list of the aggregation function over the chunks :return type: list """ return [getattr(x[i * chunk_len: (i + 1) * chunk_len], f_agg)() for i in range(int(np.ceil(len(x) / chunk_len)))]
Takes the time series x and constructs a lower sampled version of it by applying the aggregation function f_agg on consecutive chunks of length chunk_len :param x: the time series to calculate the aggregation of :type x: numpy.ndarray :param f_agg: The name of the aggregation function that should be an attribute of the pandas.Series :type f_agg: str :param chunk_len: The size of the chunks where to aggregate the time series :type chunk_len: int :return: A list of the aggregation function over the chunks :return type: list
def get_parent_data(tree, node, current): """ Recurse up the tree getting parent data :param tree: The tree :param node: The current node :param current: The current list :return: The hierarchical dictionary """ if not current: current = [] parent = tree.parent(node.identifier) if parent.is_root(): return current current.insert(0, (parent.tag, parent.data)) return PlateManager.get_parent_data(tree, parent, current)
Recurse up the tree getting parent data :param tree: The tree :param node: The current node :param current: The current list :return: The hierarchical dictionary
def gradient_rgb( self, text=None, fore=None, back=None, style=None, start=None, stop=None, step=1, linemode=True, movefactor=0): """ Return a black and white gradient. Arguments: text : String to colorize. fore : Foreground color, background will be gradient. back : Background color, foreground will be gradient. style : Name of style to use for the gradient. start : Starting rgb value. stop : Stopping rgb value. step : Number of characters to colorize per color. This allows a "wider" gradient. This will always be greater than 0. linemode : Colorize each line in the input. Default: True movefactor : Amount to shift gradient for each line when `linemode` is set. """ gradargs = { 'step': step, 'fore': fore, 'back': back, 'style': style, } start = start or (0, 0, 0) stop = stop or (255, 255, 255) if linemode: method = self._gradient_rgb_lines gradargs['movefactor'] = movefactor else: method = self._gradient_rgb_line if text: return self.__class__( ''.join(( self.data or '', method( text, start, stop, **gradargs ), )) ) # Operating on self.data. return self.__class__( method( self.stripped(), start, stop, **gradargs ) )
Return a black and white gradient. Arguments: text : String to colorize. fore : Foreground color, background will be gradient. back : Background color, foreground will be gradient. style : Name of style to use for the gradient. start : Starting rgb value. stop : Stopping rgb value. step : Number of characters to colorize per color. This allows a "wider" gradient. This will always be greater than 0. linemode : Colorize each line in the input. Default: True movefactor : Amount to shift gradient for each line when `linemode` is set.
def read_backend(self, client=None): '''The read :class:`stdnet.BackendDatServer` for this instance. It can be ``None``. ''' session = self.session if session: return session.model(self).read_backend
The read :class:`stdnet.BackendDatServer` for this instance. It can be ``None``.
def average_over_area(q, x, y): """Averages a quantity `q` over a rectangular area given a 2D array and the x and y vectors for sample locations, using the trapezoidal rule""" area = (np.max(x) - np.min(x))*(np.max(y) - np.min(y)) integral = np.trapz(np.trapz(q, y, axis=0), x) return integral/area
Averages a quantity `q` over a rectangular area given a 2D array and the x and y vectors for sample locations, using the trapezoidal rule
def create_api_vlan(self): """Get an instance of Api Vlan services facade.""" return ApiVlan( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of Api Vlan services facade.
def get_valid_filename(s, max_length=FILENAME_MAX_LENGTH): """ Returns the given string converted to a string that can be used for a clean filename. Removes leading and trailing spaces; converts anything that is not an alphanumeric, dash or underscore to underscore; converts behave examples separator ` -- @` to underscore. It also cuts the resulting name to `max_length`. @see https://github.com/django/django/blob/master/django/utils/text.py """ s = str(s).strip().replace(' -- @', '_') s = re.sub(r'(?u)[^-\w]', '_', s).strip('_') return s[:max_length]
Returns the given string converted to a string that can be used for a clean filename. Removes leading and trailing spaces; converts anything that is not an alphanumeric, dash or underscore to underscore; converts behave examples separator ` -- @` to underscore. It also cuts the resulting name to `max_length`. @see https://github.com/django/django/blob/master/django/utils/text.py
def get_absolute_url_link(self, text=None, cls=None, icon_class=None, **attrs): """Gets the html link for the object.""" if text is None: text = self.get_link_text() return build_link(href=self.get_absolute_url(), text=text, cls=cls, icon_class=icon_class, **attrs)
Gets the html link for the object.
def FanOut(self, obj, parent=None): """Expand values from various attribute types. Strings are returned as is. Dictionaries are returned with a key string, and an expanded set of values. Other iterables are expanded until they flatten out. Other items are returned in string format. Args: obj: The object to expand out. parent: The parent object: Used to short-circuit infinite recursion. Returns: a list of expanded values as strings. """ # Catch cases where RDFs are iterable but return themselves. if parent and obj == parent: results = [utils.SmartUnicode(obj).strip()] elif isinstance(obj, (string_types, rdf_structs.EnumNamedValue)): results = [utils.SmartUnicode(obj).strip()] elif isinstance(obj, rdf_protodict.DataBlob): results = self.FanOut(obj.GetValue()) elif isinstance(obj, (collections.Mapping, rdf_protodict.Dict)): results = [] # rdf_protodict.Dict only has items, not iteritems. for k, v in iteritems(obj): expanded_v = [utils.SmartUnicode(r) for r in self.FanOut(v)] results.append("%s:%s" % (utils.SmartUnicode(k), ",".join(expanded_v))) elif isinstance(obj, (collections.Iterable, rdf_structs.RepeatedFieldHelper)): results = [] for rslt in [self.FanOut(o, obj) for o in obj]: results.extend(rslt) else: results = [utils.SmartUnicode(obj).strip()] return results
Expand values from various attribute types. Strings are returned as is. Dictionaries are returned with a key string, and an expanded set of values. Other iterables are expanded until they flatten out. Other items are returned in string format. Args: obj: The object to expand out. parent: The parent object: Used to short-circuit infinite recursion. Returns: a list of expanded values as strings.
def get_old_filename(diff_part): """ Returns the filename for the original file that was changed in a diff part. """ regexps = ( # e.g. "+++ a/foo/bar" r'^--- a/(.*)', # e.g. "+++ /dev/null" r'^\-\-\- (.*)', ) for regexp in regexps: r = re.compile(regexp, re.MULTILINE) match = r.search(diff_part) if match is not None: return match.groups()[0] raise MalformedGitDiff("No old filename in diff part found. " "Examined diff part: {}".format(diff_part))
Returns the filename for the original file that was changed in a diff part.
def multi_zset(self, name, **kvs): """ Return a dictionary mapping key/value by ``keys`` from zset ``names`` :param string name: the zset name :param list keys: a list of keys :return: the number of successful creation :rtype: int >>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70) 4 >>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70) 0 >>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70, e=60) 1 """ for k,v in kvs.items(): kvs[k] = get_integer(k, int(v)) return self.execute_command('multi_zset', name, *dict_to_list(kvs))
Return a dictionary mapping key/value by ``keys`` from zset ``names`` :param string name: the zset name :param list keys: a list of keys :return: the number of successful creation :rtype: int >>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70) 4 >>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70) 0 >>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70, e=60) 1
def load_learner(path:PathOrStr, file:PathLikeOrBinaryStream='export.pkl', test:ItemList=None, **db_kwargs): "Load a `Learner` object saved with `export_state` in `path/file` with empty data, optionally add `test` and load on `cpu`. `file` can be file-like (file or buffer)" source = Path(path)/file if is_pathlike(file) else file state = torch.load(source, map_location='cpu') if defaults.device == torch.device('cpu') else torch.load(source) model = state.pop('model') src = LabelLists.load_state(path, state.pop('data')) if test is not None: src.add_test(test) data = src.databunch(**db_kwargs) cb_state = state.pop('cb_state') clas_func = state.pop('cls') res = clas_func(data, model, **state) res.callback_fns = state['callback_fns'] #to avoid duplicates res.callbacks = [load_callback(c,s, res) for c,s in cb_state.items()] return res
Load a `Learner` object saved with `export_state` in `path/file` with empty data, optionally add `test` and load on `cpu`. `file` can be file-like (file or buffer)
def verify_signature(args): """ Verify a previously signed binary image, using the ECDSA public key """ key_data = args.keyfile.read() if b"-BEGIN EC PRIVATE KEY" in key_data: sk = ecdsa.SigningKey.from_pem(key_data) vk = sk.get_verifying_key() elif b"-BEGIN PUBLIC KEY" in key_data: vk = ecdsa.VerifyingKey.from_pem(key_data) elif len(key_data) == 64: vk = ecdsa.VerifyingKey.from_string(key_data, curve=ecdsa.NIST256p) else: raise esptool.FatalError("Verification key does not appear to be an EC key in PEM format or binary EC public key data. Unsupported") if vk.curve != ecdsa.NIST256p: raise esptool.FatalError("Public key uses incorrect curve. ESP32 Secure Boot only supports NIST256p (openssl calls this curve 'prime256v1") binary_content = args.datafile.read() data = binary_content[0:-68] sig_version, signature = struct.unpack("I64s", binary_content[-68:]) if sig_version != 0: raise esptool.FatalError("Signature block has version %d. This version of espsecure only supports version 0." % sig_version) print("Verifying %d bytes of data" % len(data)) try: if vk.verify(signature, data, hashlib.sha256): print("Signature is valid") else: raise esptool.FatalError("Signature is not valid") except ecdsa.keys.BadSignatureError: raise esptool.FatalError("Signature is not valid")
Verify a previously signed binary image, using the ECDSA public key
def parse_model_file(path): """Parse a file as a list of model reactions The file format is detected and the file is parsed accordinly. The file is specified as a file path that will be opened for reading. Path can be given as a string or a context. """ context = FilePathContext(path) format = resolve_format(None, context.filepath) if format == 'tsv': logger.debug('Parsing model file {} as TSV'.format(context.filepath)) with context.open('r') as f: for reaction_id in parse_model_table_file(context, f): yield reaction_id elif format == 'yaml': logger.debug('Parsing model file {} as YAML'.format(context.filepath)) with context.open('r') as f: for reaction_id in parse_model_yaml_file(context, f): yield reaction_id
Parse a file as a list of model reactions The file format is detected and the file is parsed accordinly. The file is specified as a file path that will be opened for reading. Path can be given as a string or a context.
def _busy_wait_ms(self, ms): """Busy wait for the specified number of milliseconds.""" start = time.time() delta = ms/1000.0 while (time.time() - start) <= delta: pass
Busy wait for the specified number of milliseconds.
def open_recruitment(self, n=1): """Return initial experiment URL list, plus instructions for finding subsequent recruitment events in experiemnt logs. """ logger.info("Opening HotAir recruitment for {} participants".format(n)) recruitments = self.recruit(n) message = "Recruitment requests will open browser windows automatically." return {"items": recruitments, "message": message}
Return initial experiment URL list, plus instructions for finding subsequent recruitment events in experiemnt logs.
def AddOperands(self, lhs, rhs): """Add an operand.""" if isinstance(lhs, Expression) and isinstance(rhs, Expression): self.args = [lhs, rhs] else: raise errors.ParseError( 'Expected expression, got {0:s} {1:s} {2:s}'.format( lhs, self.operator, rhs))
Add an operand.
def get_batch_unlock_gain( channel_state: NettingChannelState, ) -> UnlockGain: """Collect amounts for unlocked/unclaimed locks and onchain unlocked locks. Note: this function does not check expiry, so the values make only sense during settlement. Returns: gain_from_partner_locks: locks amount received and unlocked on-chain gain_from_our_locks: locks amount which are unlocked or unclaimed """ gain_from_partner_locks = TokenAmount(sum( unlock.lock.amount for unlock in channel_state.partner_state.secrethashes_to_onchain_unlockedlocks.values() )) """ The current participant will gain from unlocking its own locks when: - The partner never managed to provide the secret to unlock the locked amount. - The partner provided the secret to claim the locked amount but the current participant node never sent out the unlocked balance proof and the partner did not unlock the lock on-chain. """ our_locked_locks_amount = sum( lock.amount for lock in channel_state.our_state.secrethashes_to_lockedlocks.values() ) our_unclaimed_locks_amount = sum( lock.amount for lock in channel_state.our_state.secrethashes_to_unlockedlocks.values() ) gain_from_our_locks = TokenAmount(our_locked_locks_amount + our_unclaimed_locks_amount) return UnlockGain( from_partner_locks=gain_from_partner_locks, from_our_locks=gain_from_our_locks, )
Collect amounts for unlocked/unclaimed locks and onchain unlocked locks. Note: this function does not check expiry, so the values make only sense during settlement. Returns: gain_from_partner_locks: locks amount received and unlocked on-chain gain_from_our_locks: locks amount which are unlocked or unclaimed
def copy_contents(self, fileinstance, progress_callback=None, chunk_size=None, **kwargs): """Copy this file instance into another file instance.""" if not fileinstance.readable: raise ValueError('Source file instance is not readable.') if not self.size == 0: raise ValueError('File instance has data.') self.set_uri( *self.storage(**kwargs).copy( fileinstance.storage(**kwargs), chunk_size=chunk_size, progress_callback=progress_callback))
Copy this file instance into another file instance.
def _item_sources(self): """List of places to look-up items for key-completion""" return [self.data_vars, self.coords, {d: self[d] for d in self.dims}, LevelCoordinatesSource(self)]
List of places to look-up items for key-completion
def evaluate(self, dataset, metric='auto', missing_value_action='auto'): """ Evaluate the model on the given dataset. Parameters ---------- dataset : SFrame Dataset in the same format used for training. The columns names and types of the dataset must be the same as that used in training. metric : str, optional Name of the evaluation metric. Possible values are: 'auto' : Compute all metrics. 'rmse' : Rooted mean squared error. 'max_error' : Maximum error. missing_value_action : str, optional Action to perform when missing values are encountered. Can be one of: - 'auto': By default the model will treat missing value as is. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : dict A dictionary containing the evaluation result. See Also -------- create, predict Examples -------- .. sourcecode:: python >>> results = model.evaluate(test_data, 'rmse') """ _raise_error_evaluation_metric_is_valid( metric, ['auto', 'rmse', 'max_error']) return super(RandomForestRegression, self).evaluate(dataset, missing_value_action=missing_value_action, metric=metric)
Evaluate the model on the given dataset. Parameters ---------- dataset : SFrame Dataset in the same format used for training. The columns names and types of the dataset must be the same as that used in training. metric : str, optional Name of the evaluation metric. Possible values are: 'auto' : Compute all metrics. 'rmse' : Rooted mean squared error. 'max_error' : Maximum error. missing_value_action : str, optional Action to perform when missing values are encountered. Can be one of: - 'auto': By default the model will treat missing value as is. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : dict A dictionary containing the evaluation result. See Also -------- create, predict Examples -------- .. sourcecode:: python >>> results = model.evaluate(test_data, 'rmse')
def _index2word(self): """Mapping from indices to words. WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab. :return: a list of strings """ # TODO(kelvinguu): it would be nice to just use `dict.viewkeys`, but unfortunately those are not indexable compute_index2word = lambda: self.keys() # this works because self is an OrderedDict # create if it doesn't exist try: self._index2word_cache except AttributeError: self._index2word_cache = compute_index2word() # update if it is out of date if len(self._index2word_cache) != len(self): self._index2word_cache = compute_index2word() return self._index2word_cache
Mapping from indices to words. WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab. :return: a list of strings
def is_likely_link(text): '''Return whether the text is likely to be a link. This function assumes that leading/trailing whitespace has already been removed. Returns: bool ''' text = text.lower() # Check for absolute or relative URLs if ( text.startswith('http://') or text.startswith('https://') or text.startswith('ftp://') or text.startswith('/') or text.startswith('//') or text.endswith('/') or text.startswith('../') ): return True # Check if it has a alphanumeric file extension and not a decimal number dummy, dot, file_extension = text.rpartition('.') if dot and file_extension and len(file_extension) <= 4: file_extension_set = frozenset(file_extension) if file_extension_set \ and file_extension_set <= ALPHANUMERIC_CHARS \ and not file_extension_set <= NUMERIC_CHARS: if file_extension in COMMON_TLD: return False file_type = mimetypes.guess_type(text, strict=False)[0] if file_type: return True else: return False
Return whether the text is likely to be a link. This function assumes that leading/trailing whitespace has already been removed. Returns: bool
def forward(self, agent_qs, states): """Forward pass for the mixer. Arguments: agent_qs: Tensor of shape [B, T, n_agents, n_actions] states: Tensor of shape [B, T, state_dim] """ bs = agent_qs.size(0) states = states.reshape(-1, self.state_dim) agent_qs = agent_qs.view(-1, 1, self.n_agents) # First layer w1 = th.abs(self.hyper_w_1(states)) b1 = self.hyper_b_1(states) w1 = w1.view(-1, self.n_agents, self.embed_dim) b1 = b1.view(-1, 1, self.embed_dim) hidden = F.elu(th.bmm(agent_qs, w1) + b1) # Second layer w_final = th.abs(self.hyper_w_final(states)) w_final = w_final.view(-1, self.embed_dim, 1) # State-dependent bias v = self.V(states).view(-1, 1, 1) # Compute final output y = th.bmm(hidden, w_final) + v # Reshape and return q_tot = y.view(bs, -1, 1) return q_tot
Forward pass for the mixer. Arguments: agent_qs: Tensor of shape [B, T, n_agents, n_actions] states: Tensor of shape [B, T, state_dim]
def _get_table_info(self): """Inspect the base to get field names""" self.fields = [] self.field_info = {} self.cursor.execute('PRAGMA table_info (%s)' %self.name) for field_info in self.cursor.fetchall(): fname = field_info[1].encode('utf-8') self.fields.append(fname) ftype = field_info[2].encode('utf-8') info = {'type':ftype} # can be null ? info['NOT NULL'] = field_info[3] != 0 # default value default = field_info[4] if isinstance(default,unicode): default = guess_default_fmt(default) info['DEFAULT'] = default self.field_info[fname] = info
Inspect the base to get field names
def requirements(collector): """Just print out the requirements""" out = sys.stdout artifact = collector.configuration['dashmat'].artifact if artifact not in (None, "", NotSpecified): if isinstance(artifact, six.string_types): out = open(artifact, 'w') else: out = artifact for active in collector.configuration['__imported__'].values(): for requirement in active.requirements(): out.write("{0}\n".format(requirement))
Just print out the requirements
async def create( cls, node: Union[Node, str], interface_type: InterfaceType = InterfaceType.PHYSICAL, *, name: str = None, mac_address: str = None, tags: Iterable[str] = None, vlan: Union[Vlan, int] = None, parent: Union[Interface, int] = None, parents: Iterable[Union[Interface, int]] = None, mtu: int = None, accept_ra: bool = None, autoconf: bool = None, bond_mode: str = None, bond_miimon: int = None, bond_downdelay: int = None, bond_updelay: int = None, bond_lacp_rate: str = None, bond_xmit_hash_policy: str = None, bridge_stp: bool = None, bridge_fd: int = None): """ Create a `Interface` in MAAS. :param node: Node to create the interface on. :type node: `Node` or `str` :param interface_type: Type of interface to create (optional). :type interface_type: `InterfaceType` :param name: The name for the interface (optional). :type name: `str` :param tags: List of tags to add to the interface. :type tags: sequence of `str` :param mtu: The MTU for the interface (optional). :type mtu: `int` :param vlan: VLAN the interface is connected to (optional). :type vlan: `Vlan` or `int` :param accept_ra: True if the interface should accepted router advertisements. (optional) :type accept_ra: `bool` :param autoconf: True if the interface should auto configure. :type autoconf: `bool` Following parameters specific to physical interface. :param mac_address: The MAC address for the interface. :type mac_address: `str` Following parameters specific to a bond interface. :param parents: Parent interfaces that make up the bond. :type parents: sequence of `Interface` or `int` :param mac_address: MAC address to use for the bond (optional). :type mac_address: `str` :param bond_mode: The operating mode of the bond (optional). :type bond_mode: `str` :param bond_miimon: The link monitoring freqeuncy in milliseconds (optional). :type bond_miimon: `int` :param bond_downdelay: Specifies the time, in milliseconds, to wait before disabling a slave after a link failure has been detected (optional). :type bond_downdelay: `int` :param bond_updelay: Specifies the time, in milliseconds, to wait before enabling a slave after a link recovery has been detected. :type bond_updelay: `int` :param bond_lacp_rate: Option specifying the rate in which we'll ask our link partner to transmit LACPDU packets in 802.3ad mode (optional). :type bond_lacp_rate: `str` :param bond_xmit_hash_policy: The transmit hash policy to use for slave selection in balance-xor, 802.3ad, and tlb modes(optional). :type bond_xmit_hash_policy: `str` Following parameters specific to a VLAN interface. :param parent: Parent interface for this VLAN interface. :type parent: `Interface` or `int` Following parameters specific to a Bridge interface. :param parent: Parent interface for this bridge interface. :type parent: `Interface` or `int` :param mac_address: The MAC address for the interface (optional). :type mac_address: `str` :param bridge_stp: Turn spanning tree protocol on or off (optional). :type bridge_stp: `bool` :param bridge_fd: Set bridge forward delay to time seconds (optional). :type bridge_fd: `int` :returns: The created Interface. :rtype: `Interface` """ params = {} if isinstance(node, str): params['system_id'] = node elif isinstance(node, Node): params['system_id'] = node.system_id else: raise TypeError( 'node must be a Node or str, not %s' % ( type(node).__name__)) if name is not None: params['name'] = name if tags is not None: params['tags'] = tags if mtu is not None: params['mtu'] = mtu if vlan is not None: if isinstance(vlan, Vlan): params['vlan'] = vlan.id elif isinstance(vlan, int): params['vlan'] = vlan else: raise TypeError( 'vlan must be a Vlan or int, not %s' % ( type(vlan).__name__)) if accept_ra is not None: params['accept_ra'] = accept_ra if autoconf is not None: params['autoconf'] = autoconf handler = None if not isinstance(interface_type, InterfaceType): raise TypeError( 'interface_type must be an InterfaceType, not %s' % ( type(interface_type).__name__)) if interface_type == InterfaceType.PHYSICAL: handler = cls._handler.create_physical if mac_address: params['mac_address'] = mac_address else: raise ValueError( 'mac_address required for physical interface') elif interface_type == InterfaceType.BOND: handler = cls._handler.create_bond if parent is not None: raise ValueError("use parents not parent for bond interface") if not isinstance(parents, Iterable): raise TypeError( 'parents must be a iterable, not %s' % ( type(parents).__name__)) if len(parents) == 0: raise ValueError( 'at least one parent required for bond interface') params['parents'] = list(gen_parents(parents)) if not name: raise ValueError('name is required for bond interface') if mac_address is not None: params['mac_address'] = mac_address if bond_mode is not None: params['bond_mode'] = bond_mode if bond_miimon is not None: params['bond_miimon'] = bond_miimon if bond_downdelay is not None: params['bond_downdelay'] = bond_downdelay if bond_updelay is not None: params['bond_updelay'] = bond_updelay if bond_lacp_rate is not None: params['bond_lacp_rate'] = bond_lacp_rate if bond_xmit_hash_policy is not None: params['bond_xmit_hash_policy'] = bond_xmit_hash_policy elif interface_type == InterfaceType.VLAN: handler = cls._handler.create_vlan if parents is not None: raise ValueError("use parent not parents for VLAN interface") if parent is None: raise ValueError("parent is required for VLAN interface") params['parent'] = get_parent(parent) if vlan is None: raise ValueError("vlan is required for VLAN interface") elif interface_type == InterfaceType.BRIDGE: handler = cls._handler.create_bridge if parents is not None: raise ValueError("use parent not parents for bridge interface") if parent is None: raise ValueError("parent is required for bridge interface") params['parent'] = get_parent(parent) if not name: raise ValueError('name is required for bridge interface') if mac_address is not None: params['mac_address'] = mac_address if bridge_stp is not None: params['bridge_stp'] = bridge_stp if bridge_fd is not None: params['bridge_fd'] = bridge_fd else: raise ValueError( "cannot create an interface of type: %s" % interface_type) return cls._object(await handler(**params))
Create a `Interface` in MAAS. :param node: Node to create the interface on. :type node: `Node` or `str` :param interface_type: Type of interface to create (optional). :type interface_type: `InterfaceType` :param name: The name for the interface (optional). :type name: `str` :param tags: List of tags to add to the interface. :type tags: sequence of `str` :param mtu: The MTU for the interface (optional). :type mtu: `int` :param vlan: VLAN the interface is connected to (optional). :type vlan: `Vlan` or `int` :param accept_ra: True if the interface should accepted router advertisements. (optional) :type accept_ra: `bool` :param autoconf: True if the interface should auto configure. :type autoconf: `bool` Following parameters specific to physical interface. :param mac_address: The MAC address for the interface. :type mac_address: `str` Following parameters specific to a bond interface. :param parents: Parent interfaces that make up the bond. :type parents: sequence of `Interface` or `int` :param mac_address: MAC address to use for the bond (optional). :type mac_address: `str` :param bond_mode: The operating mode of the bond (optional). :type bond_mode: `str` :param bond_miimon: The link monitoring freqeuncy in milliseconds (optional). :type bond_miimon: `int` :param bond_downdelay: Specifies the time, in milliseconds, to wait before disabling a slave after a link failure has been detected (optional). :type bond_downdelay: `int` :param bond_updelay: Specifies the time, in milliseconds, to wait before enabling a slave after a link recovery has been detected. :type bond_updelay: `int` :param bond_lacp_rate: Option specifying the rate in which we'll ask our link partner to transmit LACPDU packets in 802.3ad mode (optional). :type bond_lacp_rate: `str` :param bond_xmit_hash_policy: The transmit hash policy to use for slave selection in balance-xor, 802.3ad, and tlb modes(optional). :type bond_xmit_hash_policy: `str` Following parameters specific to a VLAN interface. :param parent: Parent interface for this VLAN interface. :type parent: `Interface` or `int` Following parameters specific to a Bridge interface. :param parent: Parent interface for this bridge interface. :type parent: `Interface` or `int` :param mac_address: The MAC address for the interface (optional). :type mac_address: `str` :param bridge_stp: Turn spanning tree protocol on or off (optional). :type bridge_stp: `bool` :param bridge_fd: Set bridge forward delay to time seconds (optional). :type bridge_fd: `int` :returns: The created Interface. :rtype: `Interface`
def format_help(help): """Formats the help string.""" help = help.replace("Options:", str(crayons.normal("Options:", bold=True))) help = help.replace( "Usage: pipenv", str("Usage: {0}".format(crayons.normal("pipenv", bold=True))) ) help = help.replace(" check", str(crayons.red(" check", bold=True))) help = help.replace(" clean", str(crayons.red(" clean", bold=True))) help = help.replace(" graph", str(crayons.red(" graph", bold=True))) help = help.replace(" install", str(crayons.magenta(" install", bold=True))) help = help.replace(" lock", str(crayons.green(" lock", bold=True))) help = help.replace(" open", str(crayons.red(" open", bold=True))) help = help.replace(" run", str(crayons.yellow(" run", bold=True))) help = help.replace(" shell", str(crayons.yellow(" shell", bold=True))) help = help.replace(" sync", str(crayons.green(" sync", bold=True))) help = help.replace(" uninstall", str(crayons.magenta(" uninstall", bold=True))) help = help.replace(" update", str(crayons.green(" update", bold=True))) additional_help = """ Usage Examples: Create a new project using Python 3.7, specifically: $ {1} Remove project virtualenv (inferred from current directory): $ {9} Install all dependencies for a project (including dev): $ {2} Create a lockfile containing pre-releases: $ {6} Show a graph of your installed dependencies: $ {4} Check your installed dependencies for security vulnerabilities: $ {7} Install a local setup.py into your virtual environment/Pipfile: $ {5} Use a lower-level pip command: $ {8} Commands:""".format( crayons.red("pipenv --three"), crayons.red("pipenv --python 3.7"), crayons.red("pipenv install --dev"), crayons.red("pipenv lock"), crayons.red("pipenv graph"), crayons.red("pipenv install -e ."), crayons.red("pipenv lock --pre"), crayons.red("pipenv check"), crayons.red("pipenv run pip freeze"), crayons.red("pipenv --rm"), ) help = help.replace("Commands:", additional_help) return help
Formats the help string.
def __if_not_basestring(text_object): """Convert to str""" converted_str = text_object if not isinstance(text_object, str): converted_str = str(text_object) return converted_str
Convert to str
def _ReadMemberFooter(self, file_object): """Reads a member footer. Args: file_object (FileIO): file-like object to read from. Raises: FileFormatError: if the member footer cannot be read. """ file_offset = file_object.get_offset() member_footer = self._ReadStructure( file_object, file_offset, self._MEMBER_FOOTER_SIZE, self._MEMBER_FOOTER, 'member footer') self.uncompressed_data_size = member_footer.uncompressed_data_size
Reads a member footer. Args: file_object (FileIO): file-like object to read from. Raises: FileFormatError: if the member footer cannot be read.
def characters (self, data): """ Print characters. @param data: the character data @type data: string @return: None """ data = data.encode(self.encoding, "ignore") self.fd.write(data)
Print characters. @param data: the character data @type data: string @return: None
def load_modes(self, input_modes=None): """ Loads modes (GameMode objects) to be supported by the game object. Four default modes are provided (normal, easy, hard, and hex) but others could be provided either by calling load_modes directly or passing a list of GameMode objects to the instantiation call. :param input_modes: A list of GameMode objects; nb: even if only one new GameMode object is provided, it MUST be passed as a list - for example, passing GameMode gm1 would require passing [gm1] NOT gm1. :return: A list of GameMode objects (both defaults and any added). """ # Set default game modes _modes = [ GameMode( mode="normal", priority=2, digits=4, digit_type=DigitWord.DIGIT, guesses_allowed=10 ), GameMode( mode="easy", priority=1, digits=3, digit_type=DigitWord.DIGIT, guesses_allowed=6 ), GameMode( mode="hard", priority=3, digits=6, digit_type=DigitWord.DIGIT, guesses_allowed=6 ), GameMode( mode="hex", priority=4, digits=4, digit_type=DigitWord.HEXDIGIT, guesses_allowed=10 ) ] if input_modes is not None: if not isinstance(input_modes, list): raise TypeError("Expected list of input_modes") for mode in input_modes: if not isinstance(mode, GameMode): raise TypeError("Expected list to contain only GameMode objects") _modes.append(mode) self._game_modes = copy.deepcopy(_modes)
Loads modes (GameMode objects) to be supported by the game object. Four default modes are provided (normal, easy, hard, and hex) but others could be provided either by calling load_modes directly or passing a list of GameMode objects to the instantiation call. :param input_modes: A list of GameMode objects; nb: even if only one new GameMode object is provided, it MUST be passed as a list - for example, passing GameMode gm1 would require passing [gm1] NOT gm1. :return: A list of GameMode objects (both defaults and any added).
def multi_subplots_time(DataArray, SubSampleN=1, units='s', xlim=None, ylim=None, LabelArray=[], show_fig=True): """ plot the time trace on multiple axes Parameters ---------- DataArray : array-like array of DataObject instances for which to plot the PSDs SubSampleN : int, optional Number of intervals between points to remove (to sub-sample data so that you effectively have lower sample rate to make plotting easier and quicker. xlim : array-like, optional 2 element array specifying the lower and upper x limit for which to plot the time signal LabelArray : array-like, optional array of labels for each data-set to be plotted show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created axs : list of matplotlib.axes.Axes objects The list of axes object created """ unit_prefix = units[:-1] # removed the last char NumDataSets = len(DataArray) if LabelArray == []: LabelArray = ["DataSet {}".format(i) for i in _np.arange(0, len(DataArray), 1)] fig, axs = _plt.subplots(NumDataSets, 1) for i, data in enumerate(DataArray): axs[i].plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN], alpha=0.8, label=LabelArray[i]) axs[i].set_xlabel("time ({})".format(units)) axs[i].grid(which="major") axs[i].legend(loc="best") axs[i].set_ylabel("voltage (V)") if xlim != None: axs[i].set_xlim(xlim) if ylim != None: axs[i].set_ylim(ylim) if show_fig == True: _plt.show() return fig, axs
plot the time trace on multiple axes Parameters ---------- DataArray : array-like array of DataObject instances for which to plot the PSDs SubSampleN : int, optional Number of intervals between points to remove (to sub-sample data so that you effectively have lower sample rate to make plotting easier and quicker. xlim : array-like, optional 2 element array specifying the lower and upper x limit for which to plot the time signal LabelArray : array-like, optional array of labels for each data-set to be plotted show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created axs : list of matplotlib.axes.Axes objects The list of axes object created
def logout(self): """logout func (quit browser)""" try: self.browser.quit() except Exception: raise exceptions.BrowserException(self.brow_name, "not started") return False self.vbro.stop() logger.info("logged out") return True
logout func (quit browser)
def auth_user(self, username, password): """ Authenticate the user in database :param username: Username/Login :param password: User password :return: Returns a dict represrnting the user """ password_hash = hashlib.sha512(password.encode("utf-8")).hexdigest() user = self._database.users.find_one( {"username": username, "password": password_hash, "activate": {"$exists": False}}) return user if user is not None and self.connect_user(username, user["realname"], user["email"], user["language"]) else None
Authenticate the user in database :param username: Username/Login :param password: User password :return: Returns a dict represrnting the user
def make_source_mask(data, snr, npixels, mask=None, mask_value=None, filter_fwhm=None, filter_size=3, filter_kernel=None, sigclip_sigma=3.0, sigclip_iters=5, dilate_size=11): """ Make a source mask using source segmentation and binary dilation. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. filter_fwhm : float, optional The full-width at half-maximum (FWHM) of the Gaussian kernel to filter the image before thresholding. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_size : float, optional The size of the square Gaussian kernel image. Used only if ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. ``filter_kernel`` overrides ``filter_fwhm`` and ``filter_size``. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. dilate_size : int, optional The size of the square array used to dilate the segmentation image. Returns ------- mask : 2D `~numpy.ndarray`, bool A 2D boolean image containing the source mask. """ from scipy import ndimage threshold = detect_threshold(data, snr, background=None, error=None, mask=mask, mask_value=None, sigclip_sigma=sigclip_sigma, sigclip_iters=sigclip_iters) kernel = None if filter_kernel is not None: kernel = filter_kernel if filter_fwhm is not None: sigma = filter_fwhm * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(sigma, x_size=filter_size, y_size=filter_size) if kernel is not None: kernel.normalize() segm = detect_sources(data, threshold, npixels, filter_kernel=kernel) selem = np.ones((dilate_size, dilate_size)) return ndimage.binary_dilation(segm.data.astype(np.bool), selem)
Make a source mask using source segmentation and binary dilation. Parameters ---------- data : array_like The 2D array of the image. snr : float The signal-to-noise ratio per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. filter_fwhm : float, optional The full-width at half-maximum (FWHM) of the Gaussian kernel to filter the image before thresholding. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_size : float, optional The size of the square Gaussian kernel image. Used only if ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size`` are ignored if ``filter_kernel`` is defined. filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. ``filter_kernel`` overrides ``filter_fwhm`` and ``filter_size``. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. dilate_size : int, optional The size of the square array used to dilate the segmentation image. Returns ------- mask : 2D `~numpy.ndarray`, bool A 2D boolean image containing the source mask.
def start_drag(self, sprite, cursor_x = None, cursor_y = None): """start dragging given sprite""" cursor_x, cursor_y = cursor_x or sprite.x, cursor_y or sprite.y self._mouse_down_sprite = self._drag_sprite = sprite sprite.drag_x, sprite.drag_y = self._drag_sprite.x, self._drag_sprite.y self.__drag_start_x, self.__drag_start_y = cursor_x, cursor_y self.__drag_started = True
start dragging given sprite
def stellar_luminosity2(self, steps=10000): """ DEPRECATED: ADW 2017-09-20 Compute the stellar luminosity (L_Sol; average per star). Uses "sample" to generate mass sample and pdf. The range of integration only covers the input isochrone data (no extrapolation used), but this seems like a sub-percent effect if the isochrone goes to 0.15 Msun for the old and metal-poor stellar populations of interest. Note that the stellar luminosity is very sensitive to the post-AGB population. """ msg = "'%s.stellar_luminosity2': ADW 2017-09-20"%self.__class__.__name__ DeprecationWarning(msg) mass_init, mass_pdf, mass_act, mag_1, mag_2 = self.sample(mass_steps=steps) luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False) luminosity = luminosity_interpolation(mass_init) return np.sum(luminosity * mass_pdf)
DEPRECATED: ADW 2017-09-20 Compute the stellar luminosity (L_Sol; average per star). Uses "sample" to generate mass sample and pdf. The range of integration only covers the input isochrone data (no extrapolation used), but this seems like a sub-percent effect if the isochrone goes to 0.15 Msun for the old and metal-poor stellar populations of interest. Note that the stellar luminosity is very sensitive to the post-AGB population.
def design(self, max_stimuli=-1, max_inhibitors=-1, max_experiments=10, relax=False, configure=None): """ Finds all optimal experimental designs using up to :attr:`max_experiments` experiments, such that each experiment has up to :attr:`max_stimuli` stimuli and :attr:`max_inhibitors` inhibitors. Each optimal experimental design is appended in the attribute :attr:`designs` as an instance of :class:`caspo.core.clamping.ClampingList`. Example:: >>> from caspo import core, design >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> designer = design.Designer(networks, setup) >>> designer.design(3, 2) >>> for i,d in enumerate(designer.designs): ... f = 'design-%s' % i ... d.to_csv(f, stimuli=self.setup.stimuli, inhibitors=self.setup.inhibitors) Parameters ---------- max_stimuli : int Maximum number of stimuli per experiment max_inhibitors : int Maximum number of inhibitors per experiment max_experiments : int Maximum number of experiments per design relax : boolean Whether to relax the full-pairwise networks discrimination (True) or not (False). If relax equals True, the number of experiments per design is fixed to :attr:`max_experiments` configure : callable Callable object responsible of setting clingo configuration """ self.designs = [] args = ['-c maxstimuli=%s' % max_stimuli, '-c maxinhibitors=%s' % max_inhibitors, '-Wno-atom-undefined'] clingo = gringo.Control(args) clingo.conf.solve.opt_mode = 'optN' if configure is not None: configure(clingo.conf) clingo.add("base", [], self.instance) clingo.load(self.encodings['design']) clingo.ground([("base", [])]) if relax: parts = [("step", [step]) for step in xrange(1, max_experiments+1)] parts.append(("diff", [max_experiments + 1])) clingo.ground(parts) ret = clingo.solve(on_model=self.__save__) else: step, ret = 0, gringo.SolveResult.UNKNOWN while step <= max_experiments and ret != gringo.SolveResult.SAT: parts = [] parts.append(("check", [step])) if step > 0: clingo.release_external(gringo.Fun("query", [step-1])) parts.append(("step", [step])) clingo.cleanup_domains() clingo.ground(parts) clingo.assign_external(gringo.Fun("query", [step]), True) ret, step = clingo.solve(on_model=self.__save__), step + 1 self.stats['time_optimum'] = clingo.stats['time_solve'] self.stats['time_enumeration'] = clingo.stats['time_total'] self._logger.info("%s optimal experimental designs found in %.4fs", len(self.designs), self.stats['time_enumeration'])
Finds all optimal experimental designs using up to :attr:`max_experiments` experiments, such that each experiment has up to :attr:`max_stimuli` stimuli and :attr:`max_inhibitors` inhibitors. Each optimal experimental design is appended in the attribute :attr:`designs` as an instance of :class:`caspo.core.clamping.ClampingList`. Example:: >>> from caspo import core, design >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> designer = design.Designer(networks, setup) >>> designer.design(3, 2) >>> for i,d in enumerate(designer.designs): ... f = 'design-%s' % i ... d.to_csv(f, stimuli=self.setup.stimuli, inhibitors=self.setup.inhibitors) Parameters ---------- max_stimuli : int Maximum number of stimuli per experiment max_inhibitors : int Maximum number of inhibitors per experiment max_experiments : int Maximum number of experiments per design relax : boolean Whether to relax the full-pairwise networks discrimination (True) or not (False). If relax equals True, the number of experiments per design is fixed to :attr:`max_experiments` configure : callable Callable object responsible of setting clingo configuration
def getAsKmlGridAnimation(self, tableName, timeStampedRasters=[], rasterIdFieldName='id', rasterFieldName='raster', documentName='default', alpha=1.0, noDataValue=0, discreet=False): """ Return a sequence of rasters with timestamps as a kml with time markers for animation. :param tableName: Name of the table to extract rasters from :param timeStampedRasters: List of dictionaries with keys: rasterId, dateTime rasterId = a unique integer identifier used to locate the raster (usually value of primary key column) dateTime = a datetime object representing the time the raster occurs e.g: timeStampedRasters = [{ 'rasterId': 1, 'dateTime': datetime(1970, 1, 1)}, { 'rasterId': 2, 'dateTime': datetime(1970, 1, 2)}, { 'rasterId': 3, 'dateTime': datetime(1970, 1, 3)}] :param rasterIdFieldName: Name of the id field for rasters (usually the primary key field) :param rasterFieldName: Name of the field where rasters are stored (of type raster) :param documentName: The name to give to the KML document (will be listed in legend under this name) :param alpha: The transparency to apply to each raster cell :param noDataValue: The value to be used as the no data value (default is 0) :rtype : string """ # Validate alpha if not (alpha >= 0 and alpha <= 1.0): raise ValueError("RASTER CONVERSION ERROR: alpha must be between 0.0 and 1.0.") rasterIds = [] for timeStampedRaster in timeStampedRasters: # Validate dictionary if 'rasterId' not in timeStampedRaster: raise ValueError('RASTER CONVERSION ERROR: rasterId must be provided for each raster.') elif 'dateTime' not in timeStampedRaster: raise ValueError('RASTER CONVERSION ERROR: dateTime must be provided for each raster.') rasterIds.append(str(timeStampedRaster['rasterId'])) # One color ramp to rule them all # Get a single color ramp that is based on the range of values in all the rasters minValue, maxValue = self.getMinMaxOfRasters(session=self._session, table=tableName, rasterIds=rasterIds, rasterIdField=rasterIdFieldName, rasterField=rasterFieldName, noDataValue=noDataValue) mappedColorRamp = ColorRampGenerator.mapColorRampToValues(colorRamp=self._colorRamp, minValue=minValue, maxValue=maxValue, alpha=alpha) # Default to time delta to None deltaTime = None # Calculate delta time between images if more than one time1 = timeStampedRasters[0]['dateTime'] if len(timeStampedRasters) >= 2: time2 = timeStampedRasters[1]['dateTime'] deltaTime = time2 - time1 # Initialize KML Document kml = ET.Element('kml', xmlns='http://www.opengis.net/kml/2.2') document = ET.SubElement(kml, 'Document') docName = ET.SubElement(document, 'name') docName.text = documentName if not discreet: # Embed the color ramp in SLD format document.append(ET.fromstring(mappedColorRamp.getColorMapAsContinuousSLD())) else: values = [] document.append(ET.fromstring(mappedColorRamp.getColorMapAsDiscreetSLD(values))) # Apply special style to hide legend items style = ET.SubElement(document, 'Style', id='check-hide-children') listStyle = ET.SubElement(style, 'ListStyle') listItemType = ET.SubElement(listStyle, 'listItemType') listItemType.text = 'checkHideChildren' styleUrl = ET.SubElement(document, 'styleUrl') styleUrl.text = '#check-hide-children' # Collect unique values uniqueValues = [] # Retrieve the rasters and styles for timeStampedRaster in timeStampedRasters: # Extract variables rasterId = timeStampedRaster['rasterId'] if deltaTime: dateTime = timeStampedRaster['dateTime'] prevDateTime = dateTime - deltaTime # Get polygons for each cell in kml format statement = ''' SELECT x, y, val, ST_AsKML(geom) AS polygon FROM ( SELECT (ST_PixelAsPolygons({0})).* FROM {1} WHERE {2}={3} ) AS foo ORDER BY val; '''.format(rasterFieldName, tableName, rasterIdFieldName, rasterId) result = self._session.execute(statement) # Set initial group value groupValue = -9999999.0 # Add polygons to the kml file with styling for row in result: # Value will be None if it is a no data value if row.val: value = float(row.val) else: value = None polygonString = row.polygon i = int(row.x) j = int(row.y) # Only create placemarks for values that are not no data values if value: if value not in uniqueValues: uniqueValues.append(value) # Create a new placemark for each group of values if value != groupValue: placemark = ET.SubElement(document, 'Placemark') placemarkName = ET.SubElement(placemark, 'name') placemarkName.text = str(value) # Create style tag and setup styles style = ET.SubElement(placemark, 'Style') # Set polygon line style lineStyle = ET.SubElement(style, 'LineStyle') # Set polygon line color and width lineColor = ET.SubElement(lineStyle, 'color') lineColor.text = self.LINE_COLOR lineWidth = ET.SubElement(lineStyle, 'width') lineWidth.text = str(self.LINE_WIDTH) # Set polygon fill color polyStyle = ET.SubElement(style, 'PolyStyle') polyColor = ET.SubElement(polyStyle, 'color') # Convert alpha from 0.0-1.0 decimal to 00-FF string integerAlpha = mappedColorRamp.getAlphaAsInteger() # Get RGB color from color ramp and convert to KML hex ABGR string with alpha integerRGB = mappedColorRamp.getColorForValue(value) hexABGR = '%02X%02X%02X%02X' % (integerAlpha, integerRGB[mappedColorRamp.B], integerRGB[mappedColorRamp.G], integerRGB[mappedColorRamp.R]) # Set the polygon fill alpha and color polyColor.text = hexABGR if deltaTime: # Create TimeSpan tag timeSpan = ET.SubElement(placemark, 'TimeSpan') # Create begin and end tags begin = ET.SubElement(timeSpan, 'begin') begin.text = prevDateTime.strftime('%Y-%m-%dT%H:%M:%S') end = ET.SubElement(timeSpan, 'end') end.text = dateTime.strftime('%Y-%m-%dT%H:%M:%S') # Create multigeometry tag multigeometry = ET.SubElement(placemark, 'MultiGeometry') # Create the data tag extendedData = ET.SubElement(placemark, 'ExtendedData') # Add value to data valueData = ET.SubElement(extendedData, 'Data', name='value') valueValue = ET.SubElement(valueData, 'value') valueValue.text = str(value) iData = ET.SubElement(extendedData, 'Data', name='i') valueI = ET.SubElement(iData, 'value') valueI.text = str(i) jData = ET.SubElement(extendedData, 'Data', name='j') valueJ = ET.SubElement(jData, 'value') valueJ.text = str(j) if deltaTime: tData = ET.SubElement(extendedData, 'Data', name='t') valueT = ET.SubElement(tData, 'value') valueT.text = dateTime.strftime('%Y-%m-%dT%H:%M:%S') groupValue = value # Get polygon object from kml string and append to the current multigeometry group polygon = ET.fromstring(polygonString) multigeometry.append(polygon) if not discreet: # Embed the color ramp in SLD format document.append(ET.fromstring(mappedColorRamp.getColorMapAsContinuousSLD())) else: # Sort the unique values uniqueValues.sort() document.append(ET.fromstring(mappedColorRamp.getColorMapAsDiscreetSLD(uniqueValues))) return ET.tostring(kml)
Return a sequence of rasters with timestamps as a kml with time markers for animation. :param tableName: Name of the table to extract rasters from :param timeStampedRasters: List of dictionaries with keys: rasterId, dateTime rasterId = a unique integer identifier used to locate the raster (usually value of primary key column) dateTime = a datetime object representing the time the raster occurs e.g: timeStampedRasters = [{ 'rasterId': 1, 'dateTime': datetime(1970, 1, 1)}, { 'rasterId': 2, 'dateTime': datetime(1970, 1, 2)}, { 'rasterId': 3, 'dateTime': datetime(1970, 1, 3)}] :param rasterIdFieldName: Name of the id field for rasters (usually the primary key field) :param rasterFieldName: Name of the field where rasters are stored (of type raster) :param documentName: The name to give to the KML document (will be listed in legend under this name) :param alpha: The transparency to apply to each raster cell :param noDataValue: The value to be used as the no data value (default is 0) :rtype : string
def _evolve(self, state, qargs=None): """Evolve a quantum state by the QuantumChannel. Args: state (QuantumState): The input statevector or density matrix. qargs (list): a list of QuantumState subsystem positions to apply the operator on. Returns: QuantumState: the output quantum state. Raises: QiskitError: if the operator dimension does not match the specified QuantumState subsystem dimensions. """ # If subsystem evolution we use the SuperOp representation if qargs is not None: return SuperOp(self)._evolve(state, qargs) # Otherwise we compute full evolution directly state = self._format_state(state) if state.shape[0] != self._input_dim: raise QiskitError( "QuantumChannel input dimension is not equal to state dimension." ) if state.ndim == 1 and self._data[1] is None and len( self._data[0]) == 1: # If we only have a single Kraus operator we can implement unitary-type # evolution of a state vector psi -> K[0].psi return np.dot(self._data[0][0], state) # Otherwise we always return a density matrix state = self._format_state(state, density_matrix=True) kraus_l, kraus_r = self._data if kraus_r is None: kraus_r = kraus_l return np.einsum('AiB,BC,AjC->ij', kraus_l, state, np.conjugate(kraus_r))
Evolve a quantum state by the QuantumChannel. Args: state (QuantumState): The input statevector or density matrix. qargs (list): a list of QuantumState subsystem positions to apply the operator on. Returns: QuantumState: the output quantum state. Raises: QiskitError: if the operator dimension does not match the specified QuantumState subsystem dimensions.
def Run(self): """Event loop.""" if data_store.RelationalDBEnabled(): data_store.REL_DB.RegisterMessageHandler( self._ProcessMessageHandlerRequests, self.well_known_flow_lease_time, limit=100) data_store.REL_DB.RegisterFlowProcessingHandler(self.ProcessFlow) try: while 1: processed = self.RunOnce() if processed == 0: if time.time() - self.last_active > self.SHORT_POLL_TIME: interval = self.POLLING_INTERVAL else: interval = self.SHORT_POLLING_INTERVAL time.sleep(interval) else: self.last_active = time.time() except KeyboardInterrupt: logging.info("Caught interrupt, exiting.") self.thread_pool.Join()
Event loop.
def update_headers(self, headers: Optional[LooseHeaders]) -> None: """Update request headers.""" self.headers = CIMultiDict() # type: CIMultiDict[str] # add host netloc = cast(str, self.url.raw_host) if helpers.is_ipv6_address(netloc): netloc = '[{}]'.format(netloc) if not self.url.is_default_port(): netloc += ':' + str(self.url.port) self.headers[hdrs.HOST] = netloc if headers: if isinstance(headers, (dict, MultiDictProxy, MultiDict)): headers = headers.items() # type: ignore for key, value in headers: # A special case for Host header if key.lower() == 'host': self.headers[key] = value else: self.headers.add(key, value)
Update request headers.
def get_bucket_website_config(self, bucket): """ Get the website configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the bucket's website configuration. """ details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name='?website'), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_website_config) return d
Get the website configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the bucket's website configuration.
def modify(self, modification, parameters): """ Apply a single modification to an MFD parameters. Reflects the modification method and calls it passing ``parameters`` as keyword arguments. See also :attr:`MODIFICATIONS`. Modifications can be applied one on top of another. The logic of stacking modifications is up to a specific MFD implementation. :param modification: String name representing the type of modification. :param parameters: Dictionary of parameters needed for modification. :raises ValueError: If ``modification`` is missing from :attr:`MODIFICATIONS`. """ if modification not in self.MODIFICATIONS: raise ValueError('Modification %s is not supported by %s' % (modification, type(self).__name__)) meth = getattr(self, 'modify_%s' % modification) meth(**parameters) self.check_constraints()
Apply a single modification to an MFD parameters. Reflects the modification method and calls it passing ``parameters`` as keyword arguments. See also :attr:`MODIFICATIONS`. Modifications can be applied one on top of another. The logic of stacking modifications is up to a specific MFD implementation. :param modification: String name representing the type of modification. :param parameters: Dictionary of parameters needed for modification. :raises ValueError: If ``modification`` is missing from :attr:`MODIFICATIONS`.
def show_lbaas_pool(self, lbaas_pool, **_params): """Fetches information for a lbaas_pool.""" return self.get(self.lbaas_pool_path % (lbaas_pool), params=_params)
Fetches information for a lbaas_pool.
def tagsInString_process(self, d_DICOM, astr, *args, **kwargs): """ This method substitutes DICOM tags that are '%'-tagged in a string template with the actual tag lookup. For example, an output filename that is specified as the following string: %PatientAge-%PatientID-output.txt will be parsed to 006Y-4412364-ouptut.txt It is also possible to apply certain permutations/functions to a tag. For example, a function is identified by an underscore prefixed and suffixed string as part of the DICOM tag. If found, this function is applied to the tag value. For example, %PatientAge-%_md5|4_PatientID-output.txt will apply an md5 hash to the PatientID and use the first 4 characters: 006Y-7f38-output.txt """ b_tagsFound = False str_replace = '' # The lookup/processed tag value l_tags = [] # The input string split by '%' l_tagsToSub = [] # Remove any noise etc from each tag l_funcTag = [] # a function/tag list l_args = [] # the 'args' of the function func = '' # the function to apply tag = '' # the tag in the funcTag combo chars = '' # the number of resultant chars from func # result to use if '%' in astr: l_tags = astr.split('%')[1:] # Find which tags (mangled) in string match actual tags l_tagsToSub = [i for i in d_DICOM['l_tagRaw'] if any(i in b for b in l_tags)] # Need to arrange l_tagsToSub in same order as l_tags l_tagsToSubSort = sorted( l_tagsToSub, key = lambda x: [i for i, s in enumerate(l_tags) if x in s][0] ) for tag, func in zip(l_tagsToSubSort, l_tags): b_tagsFound = True str_replace = d_DICOM['d_dicomSimple'][tag] if 'md5' in func: str_replace = hashlib.md5(str_replace.encode('utf-8')).hexdigest() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') if len(l_args) > 1: chars = l_args[1] str_replace = str_replace[0:int(chars)] astr = astr.replace('_%s_' % func, '') if 'strmsk' in func: l_funcTag = func.split('_')[1:] func = l_funcTag[0] str_msk = func.split('|')[1] l_n = [] for i, j in zip(list(str_replace), list(str_msk)): if j == '*': l_n.append(i) else: l_n.append(j) str_replace = ''.join(l_n) astr = astr.replace('_%s_' % func, '') if 'nospc' in func: # pudb.set_trace() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') str_char = '' if len(l_args) > 1: str_char = l_args[1] # strip out all non-alphnumeric chars and # replace with space str_replace = re.sub(r'\W+', ' ', str_replace) # replace all spaces with str_char str_replace = str_char.join(str_replace.split()) astr = astr.replace('_%s_' % func, '') astr = astr.replace('%' + tag, str_replace) return { 'status': True, 'b_tagsFound': b_tagsFound, 'str_result': astr }
This method substitutes DICOM tags that are '%'-tagged in a string template with the actual tag lookup. For example, an output filename that is specified as the following string: %PatientAge-%PatientID-output.txt will be parsed to 006Y-4412364-ouptut.txt It is also possible to apply certain permutations/functions to a tag. For example, a function is identified by an underscore prefixed and suffixed string as part of the DICOM tag. If found, this function is applied to the tag value. For example, %PatientAge-%_md5|4_PatientID-output.txt will apply an md5 hash to the PatientID and use the first 4 characters: 006Y-7f38-output.txt
def add_clause(self, clause): """Add a new clause to the existing query. :param clause: The clause to add :type clause: MongoClause :return: None """ if clause.query_loc == MongoClause.LOC_MAIN: self._main.append(clause) elif clause.query_loc == MongoClause.LOC_MAIN2: self._main2.append(clause) elif clause.query_loc == MongoClause.LOC_WHERE: self._where.append(clause) else: raise RuntimeError('bad clause location: {}'.format(clause.query_loc))
Add a new clause to the existing query. :param clause: The clause to add :type clause: MongoClause :return: None
def inspect_file(self, commit, path): """ Returns info about a specific file. Params: * commit: A tuple, string, or Commit object representing the commit. * path: Path to file. """ req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path)) res = self.stub.InspectFile(req, metadata=self.metadata) return res
Returns info about a specific file. Params: * commit: A tuple, string, or Commit object representing the commit. * path: Path to file.
def create_marginalized_hist(ax, values, label, percentiles=None, color='k', fillcolor='gray', linecolor='navy', linestyle='-', title=True, expected_value=None, expected_color='red', rotated=False, plot_min=None, plot_max=None): """Plots a 1D marginalized histogram of the given param from the given samples. Parameters ---------- ax : pyplot.Axes The axes on which to draw the plot. values : array The parameter values to plot. label : str A label to use for the title. percentiles : {None, float or array} What percentiles to draw lines at. If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the median). color : {'k', string} What color to make the histogram; default is black. fillcolor : {'gray', string, or None} What color to fill the histogram with. Set to None to not fill the histogram. Default is 'gray'. linestyle : str, optional What line style to use for the histogram. Default is '-'. linecolor : {'navy', string} What color to use for the percentile lines. Default is 'navy'. title : bool, optional Add a title with a estimated value +/- uncertainty. The estimated value is the pecentile halfway between the max/min of ``percentiles``, while the uncertainty is given by the max/min of the ``percentiles``. If no percentiles are specified, defaults to quoting the median +/- 95/5 percentiles. rotated : {False, bool} Plot the histogram on the y-axis instead of the x. Default is False. plot_min : {None, float} The minimum value to plot. If None, will default to whatever `pyplot` creates. plot_max : {None, float} The maximum value to plot. If None, will default to whatever `pyplot` creates. scalefac : {1., float} Factor to scale the default font sizes by. Default is 1 (no scaling). """ if fillcolor is None: htype = 'step' else: htype = 'stepfilled' if rotated: orientation = 'horizontal' else: orientation = 'vertical' ax.hist(values, bins=50, histtype=htype, orientation=orientation, facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2, density=True) if percentiles is None: percentiles = [5., 50., 95.] if len(percentiles) > 0: plotp = numpy.percentile(values, percentiles) else: plotp = [] for val in plotp: if rotated: ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3) else: ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3) # plot expected if expected_value is not None: if rotated: ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2) else: ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2) if title: if len(percentiles) > 0: minp = min(percentiles) maxp = max(percentiles) medp = (maxp + minp) / 2. else: minp = 5 medp = 50 maxp = 95 values_min = numpy.percentile(values, minp) values_med = numpy.percentile(values, medp) values_max = numpy.percentile(values, maxp) negerror = values_med - values_min poserror = values_max - values_med fmt = '${0}$'.format(str_utils.format_value( values_med, negerror, plus_error=poserror)) if rotated: ax.yaxis.set_label_position("right") # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label, rotated=rotated) # Remove x-ticks ax.set_xticks([]) # turn off x-labels ax.set_xlabel('') # set limits ymin, ymax = ax.get_ylim() if plot_min is not None: ymin = plot_min if plot_max is not None: ymax = plot_max ax.set_ylim(ymin, ymax) else: # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label) # Remove y-ticks ax.set_yticks([]) # turn off y-label ax.set_ylabel('') # set limits xmin, xmax = ax.get_xlim() if plot_min is not None: xmin = plot_min if plot_max is not None: xmax = plot_max ax.set_xlim(xmin, xmax)
Plots a 1D marginalized histogram of the given param from the given samples. Parameters ---------- ax : pyplot.Axes The axes on which to draw the plot. values : array The parameter values to plot. label : str A label to use for the title. percentiles : {None, float or array} What percentiles to draw lines at. If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the median). color : {'k', string} What color to make the histogram; default is black. fillcolor : {'gray', string, or None} What color to fill the histogram with. Set to None to not fill the histogram. Default is 'gray'. linestyle : str, optional What line style to use for the histogram. Default is '-'. linecolor : {'navy', string} What color to use for the percentile lines. Default is 'navy'. title : bool, optional Add a title with a estimated value +/- uncertainty. The estimated value is the pecentile halfway between the max/min of ``percentiles``, while the uncertainty is given by the max/min of the ``percentiles``. If no percentiles are specified, defaults to quoting the median +/- 95/5 percentiles. rotated : {False, bool} Plot the histogram on the y-axis instead of the x. Default is False. plot_min : {None, float} The minimum value to plot. If None, will default to whatever `pyplot` creates. plot_max : {None, float} The maximum value to plot. If None, will default to whatever `pyplot` creates. scalefac : {1., float} Factor to scale the default font sizes by. Default is 1 (no scaling).
def _calculate_num_queries(self): """ Calculate the total number of request and response queries. Used for count header and count table. """ request_totals = self._totals("request") response_totals = self._totals("response") return request_totals[2] + response_totals[2]
Calculate the total number of request and response queries. Used for count header and count table.
def admin_url_params(request, params=None): """ given a request, looks at GET and POST values to determine which params should be added. Is used to keep the context of popup and picker mode. """ params = params or {} if popup_status(request): params[IS_POPUP_VAR] = '1' pick_type = popup_pick_type(request) if pick_type: params['_pick'] = pick_type return params
given a request, looks at GET and POST values to determine which params should be added. Is used to keep the context of popup and picker mode.
async def on_raw_762(self, message): """ End of metadata. """ # No way to figure out whose query this belongs to, so make a best guess # it was the first one. if not self._metadata_queue: return nickname = self._metadata_queue.pop() future = self._pending['metadata'].pop(nickname) future.set_result(self._metadata_info.pop(nickname))
End of metadata.
def config_generator(search_space, max_search, rng, shuffle=True): """Generates config dicts from the given search space Args: search_space: (dict) A dictionary of parameters to search over. See note below for more details. max_search: (int) The maximum number of configurations to search. If max_search is None, do a full grid search of all discrete parameters, filling in range parameters as needed. Otherwise, do a full grid search of all discrete parameters and then cycle through again filling in new range parameters values; if there are no range parameters, stop after yielding the full cross product of parameters once. shuffle: (bool) If True, shuffle the order of generated configs Yields: configs: each config is a dict of parameter values based on the provided search space The search_space dictionary may consist of two types of parameters: --discrete: a discrete parameter is either a single value or a list of values. Use single values, for example, to override a default model parameter or set a flag such as 'verbose'=True. --range: a range parameter is a dict of the form: {'range': [<min>, <max>], 'scale': <scale>} where <min> and <max> are the min/max values to search between and scale is one of ['linear', 'log'] (defaulting to 'linear') representing the scale to use when searching the given range Example: search_space = { 'verbose': True, # discrete 'n_epochs': 100, # discrete 'momentum': [0.0, 0.9, 0.99], # discrete 'l2': {'range': [0.0001, 10]} # linear range 'lr': {'range': [0.001, 1], 'scale': 'log'}, # log range } If max_search is None, this will return 3 configurations (enough to just cover the full cross-product of discrete values, filled in with sampled range values) Otherewise, this will return max_search configurations (cycling through the discrete value combinations multiple time if necessary) """ def dict_product(d): keys = d.keys() for element in product(*d.values()): yield dict(zip(keys, element)) def range_param_func(v): scale = v.get("scale", "linear") mini = min(v["range"]) maxi = max(v["range"]) if scale == "linear": func = lambda rand: mini + (maxi - mini) * rand elif scale == "log": mini = np.log(mini) maxi = np.log(maxi) func = lambda rand: np.exp(mini + (maxi - mini) * rand) else: raise ValueError(f"Unrecognized scale '{scale}' for " "parameter {k}") return func discretes = {} ranges = {} for k, v in search_space.items(): if isinstance(v, dict): ranges[k] = range_param_func(v) elif isinstance(v, list): discretes[k] = v else: discretes[k] = [v] discrete_configs = list(dict_product(discretes)) if shuffle: rng.shuffle(discrete_configs) # If there are range parameters and a non-None max_search, cycle # through the discrete_configs (with new range values) until # max_search is met if ranges and max_search: discrete_configs = cycle(discrete_configs) for i, config in enumerate(discrete_configs): # We may see the same config twice due to cycle config = config.copy() if max_search and i == max_search: break for k, v in ranges.items(): config[k] = float(v(rng.random())) yield config
Generates config dicts from the given search space Args: search_space: (dict) A dictionary of parameters to search over. See note below for more details. max_search: (int) The maximum number of configurations to search. If max_search is None, do a full grid search of all discrete parameters, filling in range parameters as needed. Otherwise, do a full grid search of all discrete parameters and then cycle through again filling in new range parameters values; if there are no range parameters, stop after yielding the full cross product of parameters once. shuffle: (bool) If True, shuffle the order of generated configs Yields: configs: each config is a dict of parameter values based on the provided search space The search_space dictionary may consist of two types of parameters: --discrete: a discrete parameter is either a single value or a list of values. Use single values, for example, to override a default model parameter or set a flag such as 'verbose'=True. --range: a range parameter is a dict of the form: {'range': [<min>, <max>], 'scale': <scale>} where <min> and <max> are the min/max values to search between and scale is one of ['linear', 'log'] (defaulting to 'linear') representing the scale to use when searching the given range Example: search_space = { 'verbose': True, # discrete 'n_epochs': 100, # discrete 'momentum': [0.0, 0.9, 0.99], # discrete 'l2': {'range': [0.0001, 10]} # linear range 'lr': {'range': [0.001, 1], 'scale': 'log'}, # log range } If max_search is None, this will return 3 configurations (enough to just cover the full cross-product of discrete values, filled in with sampled range values) Otherewise, this will return max_search configurations (cycling through the discrete value combinations multiple time if necessary)
def message_loop(self, t_q, r_q): """Loop through messages and execute tasks""" t_msg = {} while t_msg.get("state", "") != "__DIE__": try: t_msg = t_q.get(True, self.cycle_sleep) # Poll blocking self.task = t_msg.get("task", "") # __DIE__ has no task if self.task != "": self.task.task_start = time.time() # Start the timer # Send ACK to the controller who requested work on this task self.r_q_send( {"w_id": self.w_id, "task": self.task, "state": "__ACK__"} ) # Update the sleep time with latest recommendations self.cycle_sleep = self.task.worker_loop_delay # Assign the result of task.run() to task.result self.task.result = self.task.run() self.task.task_stop = time.time() # Seconds since epoch self.r_q_send( {"w_id": self.w_id, "task": self.task, "state": "__FINISHED__"} ) # Ack work finished self.task = None except Empty: pass except Full: time.sleep(0.1) ## Disable extraneous error handling... except: if self.task is not None: self.task.task_stop = time.time() # Seconds since epoch # Handle all other errors here... tb_str = "".join(tb.format_exception(*(sys.exc_info()))) self.r_q_send( { "w_id": self.w_id, "task": self.task, "error": tb_str, "state": "__ERROR__", } ) return
Loop through messages and execute tasks
def _interpolate_doy_calendar(source, doy_max): """Interpolate from one set of dayofyear range to another Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1 to 365). Parameters ---------- source : xarray.DataArray Array with `dayofyear` coordinates. doy_max : int Largest day of the year allowed by calendar. Returns ------- xarray.DataArray Interpolated source array over coordinates spanning the target `dayofyear` range. """ if 'dayofyear' not in source.coords.keys(): raise AttributeError("source should have dayofyear coordinates.") # Interpolation of source to target dayofyear range doy_max_source = source.dayofyear.max() # Interpolate to fill na values tmp = source.interpolate_na(dim='dayofyear') # Interpolate to target dayofyear range tmp.coords['dayofyear'] = np.linspace(start=1, stop=doy_max, num=doy_max_source) return tmp.interp(dayofyear=range(1, doy_max + 1))
Interpolate from one set of dayofyear range to another Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1 to 365). Parameters ---------- source : xarray.DataArray Array with `dayofyear` coordinates. doy_max : int Largest day of the year allowed by calendar. Returns ------- xarray.DataArray Interpolated source array over coordinates spanning the target `dayofyear` range.
def get_discovery_doc(self, services, hostname=None): """JSON dict description of a protorpc.remote.Service in discovery format. Args: services: Either a single protorpc.remote.Service or a list of them that implements an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: dict, The discovery document as a JSON dict. """ if not isinstance(services, (tuple, list)): services = [services] # The type of a class that inherits from remote.Service is actually # remote._ServiceClass, thanks to metaclass strangeness. # pylint: disable=protected-access util.check_list_type(services, remote._ServiceClass, 'services', allow_none=False) return self.__discovery_doc_descriptor(services, hostname=hostname)
JSON dict description of a protorpc.remote.Service in discovery format. Args: services: Either a single protorpc.remote.Service or a list of them that implements an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: dict, The discovery document as a JSON dict.
def deepest_common_ancestor(goterms, godag): ''' This function gets the nearest common ancestor using the above function. Only returns single most specific - assumes unique exists. ''' # Take the element at maximum depth. return max(common_parent_go_ids(goterms, godag), key=lambda t: godag[t].depth)
This function gets the nearest common ancestor using the above function. Only returns single most specific - assumes unique exists.