code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def replace(text, replacements): """ Replaces multiple slices of text with new values. This is a convenience method for making code modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is an iterable of ``(start, end, new_text)`` tuples. For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces ``"X is THE test"``. """ p = 0 parts = [] for (start, end, new_text) in sorted(replacements): parts.append(text[p:start]) parts.append(new_text) p = end parts.append(text[p:]) return ''.join(parts)
Replaces multiple slices of text with new values. This is a convenience method for making code modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is an iterable of ``(start, end, new_text)`` tuples. For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces ``"X is THE test"``.
def TNE_metric(bpmn_graph): """ Returns the value of the TNE metric (Total Number of Events of the Model) for the BPMNDiagramGraph instance. :param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model. """ events_counts = get_events_counts(bpmn_graph) return sum( [count for _, count in events_counts.items()] )
Returns the value of the TNE metric (Total Number of Events of the Model) for the BPMNDiagramGraph instance. :param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
async def _senddms(self): """Toggles sending DMs to owner.""" data = self.bot.config.get("meta", {}) tosend = data.get('send_dms', True) data['send_dms'] = not tosend await self.bot.config.put('meta', data) await self.bot.responses.toggle(message="Forwarding of DMs to owner has been {status}.", success=data['send_dms'])
Toggles sending DMs to owner.
def update_activity(self, activity_id, activity_name=None, desc=None, started_on=None, ended_on=None): """ Send PUT request to /activities/{activity_id} to update the activity metadata. Raises ValueError if at least one field is not updated. :param activity_id: str uuid of activity :param activity_name: str new name of the activity (optional) :param desc: str description of the activity (optional) :param started_on: str date the updated activity began on (optional) :param ended_on: str date the updated activity ended on (optional) :return: requests.Response containing the successful result """ put_data = { "name": activity_name, "description": desc, "started_on": started_on, "ended_on": ended_on } return self._put("/activities/" + activity_id, put_data)
Send PUT request to /activities/{activity_id} to update the activity metadata. Raises ValueError if at least one field is not updated. :param activity_id: str uuid of activity :param activity_name: str new name of the activity (optional) :param desc: str description of the activity (optional) :param started_on: str date the updated activity began on (optional) :param ended_on: str date the updated activity ended on (optional) :return: requests.Response containing the successful result
def exitDialog(self): """ Helper method that exits the dialog. This method will cause the previously active submenu to activate. """ if self.prev_submenu is not None: # change back to the previous submenu # could in theory form a stack if one dialog opens another self.menu.changeSubMenu(self.prev_submenu) self.prev_submenu = None
Helper method that exits the dialog. This method will cause the previously active submenu to activate.
def configure_logging(self): """Create logging handlers for any log output.""" root_logger = logging.getLogger('') # Set up logging to a file root_logger.setLevel(logging.DEBUG) # Send higher-level messages to the console via stderr console = logging.StreamHandler(self.stderr) console_level = {self.WARNING_LEVEL: logging.WARNING, self.INFO_LEVEL: logging.INFO, self.DEBUG_LEVEL: logging.DEBUG, }.get(self.options.verbose_level, logging.DEBUG) # The default log level is INFO, in this situation, set the # log level of the console to WARNING, to avoid displaying # useless messages. This equals using "--quiet" if console_level == logging.INFO: console.setLevel(logging.WARNING) else: console.setLevel(console_level) if logging.DEBUG == console_level: formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT) else: formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT) logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING) logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING) console.setFormatter(formatter) root_logger.addHandler(console) return
Create logging handlers for any log output.
def parse_ssh_destination(destination): """Parses the SSH destination argument. """ match = _re_ssh.match(destination) if not match: raise InvalidDestination("Invalid destination: %s" % destination) user, password, host, port = match.groups() info = {} if user: info['username'] = user else: info['username'] = getpass.getuser() if password: info['password'] = password if port: info['port'] = int(port) info['hostname'] = host return info
Parses the SSH destination argument.
def make_block_creator(yaml_path, filename=None): # type: (str, str) -> Callable[..., List[Controller]] """Make a collection function that will create a list of blocks Args: yaml_path (str): File path to YAML file, or a file in the same dir filename (str): If give, use this filename as the last element in the yaml_path (so yaml_path can be __file__) Returns: function: A collection function decorated with @takes. This can be used in other blocks or instantiated by the process. If the YAML text specified controllers or parts then a block instance with the given name will be instantiated. If there are any blocks listed then they will be called. All created blocks by this or any sub collection will be returned """ sections, yamlname, docstring = Section.from_yaml(yaml_path, filename) yamldir = os.path.dirname(yaml_path) # Check we have only one controller controller_sections = [s for s in sections if s.section == "controllers"] assert len(controller_sections) == 1, \ "Expected exactly 1 controller, got %s" % (controller_sections,) controller_section = controller_sections[0] def block_creator(kwargs): # Create the param dict of the static defined arguments defines = _create_defines(sections, yamlname, yamldir, kwargs) controllers, parts = _create_blocks_and_parts(sections, defines) # Make the controller controller = controller_section.instantiate(defines) for part in parts: controller.add_part(part) controllers.append(controller) return controllers creator = creator_with_nice_signature( block_creator, sections, yamlname, yaml_path, docstring) return creator
Make a collection function that will create a list of blocks Args: yaml_path (str): File path to YAML file, or a file in the same dir filename (str): If give, use this filename as the last element in the yaml_path (so yaml_path can be __file__) Returns: function: A collection function decorated with @takes. This can be used in other blocks or instantiated by the process. If the YAML text specified controllers or parts then a block instance with the given name will be instantiated. If there are any blocks listed then they will be called. All created blocks by this or any sub collection will be returned
def get_name(self): """ @rtype: str @return: Module name, as used in labels. @warning: Names are B{NOT} guaranteed to be unique. If you need unique identification for a loaded module, use the base address instead. @see: L{get_label} """ pathname = self.get_filename() if pathname: modName = self.__filename_to_modname(pathname) if isinstance(modName, compat.unicode): try: modName = modName.encode('cp1252') except UnicodeEncodeError: e = sys.exc_info()[1] warnings.warn(str(e)) else: modName = "0x%x" % self.get_base() return modName
@rtype: str @return: Module name, as used in labels. @warning: Names are B{NOT} guaranteed to be unique. If you need unique identification for a loaded module, use the base address instead. @see: L{get_label}
def circumcircleForTriangle(cls, triangle): ''' :param: triangle - Triangle class :return: Circle class Returns the circle where every vertex in the input triangle is on the radius of that circle. ''' if triangle.isRight: # circumcircle origin is the midpoint of the hypotenues o = triangle.hypotenuse.midpoint r = o.distance(triangle.A) return cls(o, r) # otherwise # 1. find the normals to two sides # 2. translate them to the midpoints of those two sides # 3. intersect those lines for center of circumcircle # 4. radius is distance from center to any vertex in the triangle abn = triangle.AB.normal abn += triangle.AB.midpoint acn = triangle.AC.normal acn += triangle.AC.midpoint o = abn.intersection(acn) r = o.distance(triangle.A) return cls(o, r)
:param: triangle - Triangle class :return: Circle class Returns the circle where every vertex in the input triangle is on the radius of that circle.
def _m2m_rev_field_name(model1, model2): """Gets the name of the reverse m2m accessor from `model1` to `model2` For example, if User has a ManyToManyField connected to Group, `_m2m_rev_field_name(Group, User)` retrieves the name of the field on Group that lists a group's Users. (By default, this field is called `user_set`, but the name can be overridden). """ m2m_field_names = [ rel.get_accessor_name() for rel in model1._meta.get_fields() if rel.many_to_many and rel.auto_created and rel.related_model == model2 ] return m2m_field_names[0]
Gets the name of the reverse m2m accessor from `model1` to `model2` For example, if User has a ManyToManyField connected to Group, `_m2m_rev_field_name(Group, User)` retrieves the name of the field on Group that lists a group's Users. (By default, this field is called `user_set`, but the name can be overridden).
def visit_FunctionDef(self, node): """ Update import context using overwriting name information. Examples -------- >> import foo >> import bar >> def foo(bar): >> print(bar) In this case, neither bar nor foo can be used in the foo function and in future function, foo will not be usable. """ self.symbols.pop(node.name, None) gsymbols = self.symbols.copy() [self.symbols.pop(arg.id, None) for arg in node.args.args] self.generic_visit(node) self.symbols = gsymbols return node
Update import context using overwriting name information. Examples -------- >> import foo >> import bar >> def foo(bar): >> print(bar) In this case, neither bar nor foo can be used in the foo function and in future function, foo will not be usable.
def reverse(self, point, language=None, sensor=False): '''Reverse geocode a point. Pls refer to the Google Maps Web API for the details of the parameters ''' params = { 'latlng': point, 'sensor': str(sensor).lower() } if language: params['language'] = language if not self.premier: url = self.get_url(params) else: url = self.get_signed_url(params) return self.GetService_url(url)
Reverse geocode a point. Pls refer to the Google Maps Web API for the details of the parameters
def floor(self): """Round `x` and `y` down to integers.""" return Point(int(math.floor(self.x)), int(math.floor(self.y)))
Round `x` and `y` down to integers.
def buffer(self, *args, **kwargs): """Buffer documents, in the current session""" self.check_session() result = self.session.buffer(*args, **kwargs) return result
Buffer documents, in the current session
def convert_ram_sdp_ar(ADDR_WIDTH=8, DATA_WIDTH=8): ''' Convert RAM: Simple-Dual-Port, Asynchronous Read''' clk = Signal(bool(0)) we = Signal(bool(0)) addrw = Signal(intbv(0)[ADDR_WIDTH:]) addrr = Signal(intbv(0)[ADDR_WIDTH:]) di = Signal(intbv(0)[DATA_WIDTH:]) do = Signal(intbv(0)[DATA_WIDTH:]) toVerilog(ram_sdp_ar, clk, we, addrw, addrr, di, do)
Convert RAM: Simple-Dual-Port, Asynchronous Read
def symlink(src, link): ''' Create a symbolic link to a file This is only supported with Windows Vista or later and must be executed by a user with the SeCreateSymbolicLink privilege. The behavior of this function matches the Unix equivalent, with one exception - invalid symlinks cannot be created. The source path must exist. If it doesn't, an error will be raised. Args: src (str): The path to a file or directory link (str): The path to the link Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' file.symlink /path/to/file /path/to/link ''' # When Python 3.2 or later becomes the minimum version, this function can be # replaced with the built-in os.symlink function, which supports Windows. if sys.getwindowsversion().major < 6: raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.') if not os.path.exists(src): raise SaltInvocationError('The given source path does not exist.') if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') # ensure paths are using the right slashes src = os.path.normpath(src) link = os.path.normpath(link) is_dir = os.path.isdir(src) try: win32file.CreateSymbolicLink(link, src, int(is_dir)) return True except pywinerror as exc: raise CommandExecutionError( 'Could not create \'{0}\' - [{1}] {2}'.format( link, exc.winerror, exc.strerror ) )
Create a symbolic link to a file This is only supported with Windows Vista or later and must be executed by a user with the SeCreateSymbolicLink privilege. The behavior of this function matches the Unix equivalent, with one exception - invalid symlinks cannot be created. The source path must exist. If it doesn't, an error will be raised. Args: src (str): The path to a file or directory link (str): The path to the link Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' file.symlink /path/to/file /path/to/link
def qa(ctx): '''Run a quality report''' header('Performing static analysis') info('Python static analysis') flake8_results = lrun('flake8 udata --jobs 1', pty=True, warn=True) info('JavaScript static analysis') eslint_results = lrun('npm -s run lint', pty=True, warn=True) if flake8_results.failed or eslint_results.failed: exit(flake8_results.return_code or eslint_results.return_code) print(green('OK'))
Run a quality report
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False): """Creates a basic graph out of an incidence matrix. The matrix has to be a list of rows of values representing an incidence matrix. The values can be anything: bool, int, float, as long as they can evaluate to True or False. """ if directed: graph = Dot(graph_type='digraph') else: graph = Dot(graph_type='graph') for row in matrix: nodes = [] c = 1 for node in row: if node: nodes.append(c * node) c += 1 nodes.sort() if len(nodes) == 2: graph.add_edge( Edge( node_prefix + abs(nodes[0]), node_prefix + nodes[1])) if not directed: graph.set_simplify(True) return graph
Creates a basic graph out of an incidence matrix. The matrix has to be a list of rows of values representing an incidence matrix. The values can be anything: bool, int, float, as long as they can evaluate to True or False.
def lock(): ''' Attempts an exclusive lock on the candidate configuration. This is a non-blocking call. .. note:: When locking, it is important to remember to call :py:func:`junos.unlock <salt.modules.junos.unlock>` once finished. If locking during orchestration, remember to include a step in the orchestration job to unlock. CLI Example: .. code-block:: bash salt 'device_name' junos.lock ''' conn = __proxy__['junos.conn']() ret = {} ret['out'] = True try: conn.cu.lock() ret['message'] = "Successfully locked the configuration." except jnpr.junos.exception.LockError as exception: ret['message'] = 'Could not gain lock due to : "{0}"'.format(exception) ret['out'] = False return ret
Attempts an exclusive lock on the candidate configuration. This is a non-blocking call. .. note:: When locking, it is important to remember to call :py:func:`junos.unlock <salt.modules.junos.unlock>` once finished. If locking during orchestration, remember to include a step in the orchestration job to unlock. CLI Example: .. code-block:: bash salt 'device_name' junos.lock
def discard(self, element, multiplicity=None): """Removes the `element` from the multiset. If multiplicity is ``None``, all occurrences of the element are removed: >>> ms = Multiset('aab') >>> ms.discard('a') 2 >>> sorted(ms) ['b'] Otherwise, the multiplicity is subtracted from the one in the multiset and the old multiplicity is removed: >>> ms = Multiset('aab') >>> ms.discard('a', 1) 2 >>> sorted(ms) ['a', 'b'] In contrast to :meth:`remove`, this does not raise an error if the element is not in the multiset: >>> ms = Multiset('a') >>> ms.discard('b') 0 >>> sorted(ms) ['a'] It is also not an error to remove more elements than are in the set: >>> ms.remove('a', 2) 1 >>> sorted(ms) [] Args: element: The element to remove from the multiset. multiplicity: An optional multiplicity i.e. count of elements to remove. Returns: The multiplicity of the element in the multiset before the removal. """ _elements = self._elements if element in _elements: old_multiplicity = _elements[element] if multiplicity is None or multiplicity >= old_multiplicity: del _elements[element] self._total -= old_multiplicity elif multiplicity < 0: raise ValueError("Multiplicity must not be negative") elif multiplicity > 0: _elements[element] -= multiplicity self._total -= multiplicity return old_multiplicity else: return 0
Removes the `element` from the multiset. If multiplicity is ``None``, all occurrences of the element are removed: >>> ms = Multiset('aab') >>> ms.discard('a') 2 >>> sorted(ms) ['b'] Otherwise, the multiplicity is subtracted from the one in the multiset and the old multiplicity is removed: >>> ms = Multiset('aab') >>> ms.discard('a', 1) 2 >>> sorted(ms) ['a', 'b'] In contrast to :meth:`remove`, this does not raise an error if the element is not in the multiset: >>> ms = Multiset('a') >>> ms.discard('b') 0 >>> sorted(ms) ['a'] It is also not an error to remove more elements than are in the set: >>> ms.remove('a', 2) 1 >>> sorted(ms) [] Args: element: The element to remove from the multiset. multiplicity: An optional multiplicity i.e. count of elements to remove. Returns: The multiplicity of the element in the multiset before the removal.
def records(): """Load test data fixture.""" with db.session.begin_nested(): for idx in range(20): # create the record id_ = uuid.uuid4() Record.create({ 'title': 'LHC experiment {}'.format(idx), 'description': 'Data from experiment {}.'.format(idx), 'type': 'data', 'recid': idx }, id_=id_) PersistentIdentifier.create( pid_type='recid', pid_value=idx, object_type='rec', object_uuid=id_, status=PIDStatus.REGISTERED, ) db.session.commit()
Load test data fixture.
def retrieve(self, operation, field=None): """Retrieve a position in this collection. :param operation: Name of an operation :type operation: :class:`Operation` :param field: Name of field for sort order :type field: str :return: The position for this operation :rtype: Mark :raises: NoTrackingCollection """ obj = self._get(operation, field) if obj is None: # empty Mark instance return Mark(collection=self.collection, operation=operation, field=field) return Mark.from_dict(self.collection, obj)
Retrieve a position in this collection. :param operation: Name of an operation :type operation: :class:`Operation` :param field: Name of field for sort order :type field: str :return: The position for this operation :rtype: Mark :raises: NoTrackingCollection
def convert_quadratic_to_cubic_path(q0, q1, q2): """ Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one. """ c0 = q0 c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1])) c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1])) c3 = q2 return c0, c1, c2, c3
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
def missed_lines(self, filename): """ Return a list of extrapolated uncovered line numbers for the file `filename` according to `Cobertura.line_statuses`. """ statuses = self.line_statuses(filename) statuses = extrapolate_coverage(statuses) return [lno for lno, status in statuses if status is False]
Return a list of extrapolated uncovered line numbers for the file `filename` according to `Cobertura.line_statuses`.
def register(self): """ Register a new user by POSTing all required data. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ user, created = self.Model.create_account(self._json_params) if user.api_key is None: raise JHTTPBadRequest('Failed to generate ApiKey for user') if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user headers = remember(self.request, user.username) return JHTTPOk('Registered', headers=headers)
Register a new user by POSTing all required data. User's `Authorization` header value is returned in `WWW-Authenticate` header.
def store_equal(self): """ Takes a tetrad class object and populates array with random quartets sampled equally among splits of the tree so that deep splits are not overrepresented relative to rare splits, like those near the tips. """ with h5py.File(self.database.input, 'a') as io5: fillsets = io5["quartets"] ## require guidetree if not os.path.exists(self.files.tree): raise IPyradWarningExit( "To use sampling method 'equal' requires a guidetree") tre = ete3.Tree(self.files.tree) tre.unroot() tre.resolve_polytomy(recursive=True) ## randomly sample internals splits splits = [([self.samples.index(z.name) for z in i], [self.samples.index(z.name) for z in j]) \ for (i, j) in tre.get_edges()] ## only keep internal splits, not single tip edges splits = [i for i in splits if all([len(j) > 1 for j in i])] ## how many min quartets shoudl be equally sampled from each split squarts = self.params.nquartets // len(splits) ## keep track of how many iterators are saturable. saturable = 0 ## turn each into an iterable split sampler ## if the nquartets for that split is small, then sample all, ## if it is big then make it a random sampler for that split. qiters = [] ## iterate over splits sampling quartets evenly for idx, split in enumerate(splits): ## if small number at this split then sample all possible sets ## we will exhaust this quickly and then switch to random for ## the larger splits. total = n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2) if total < squarts*2: qiter = (i+j for (i, j) in itertools.product( itertools.combinations(split[0], 2), itertools.combinations(split[1], 2))) saturable += 1 ## else create random sampler across that split, this is slower ## because it can propose the same split repeatedly and so we ## have to check it against the 'sampled' set. else: qiter = (random_product(split[0], split[1]) for _ \ in xrange(self.params.nquartets)) ## store all iterators into a list qiters.append((idx, qiter)) ## create infinite cycler of qiters qitercycle = itertools.cycle(qiters) ## store visited quartets sampled = set() ## fill chunksize at a time i = 0 empty = set() edge_targeted = 0 random_targeted = 0 ## keep filling quartets until nquartets are sampled. while i < self.params.nquartets: ## grab the next iterator cycle, qiter = qitercycle.next() ## sample from iterators, store sorted set. try: qrtsamp = tuple(sorted(qiter.next())) if qrtsamp not in sampled: sampled.add(qrtsamp) edge_targeted += 1 i += 1 ## print progress bar update to engine stdout if not i % self._chunksize: print(min(i, self.params.nquartets)) except StopIteration: empty.add(cycle) if len(empty) == saturable: break ## if array is not full then add random samples while i <= self.params.nquartets: newset = tuple(sorted(np.random.choice( range(len(self.samples)), 4, replace=False))) if newset not in sampled: sampled.add(newset) random_targeted += 1 i += 1 ## print progress bar update to engine stdout if not i % self._chunksize: print(min(i, self.params.nquartets)) ## store into database print(self.params.nquartets) fillsets[:] = np.array(tuple(sampled)) del sampled
Takes a tetrad class object and populates array with random quartets sampled equally among splits of the tree so that deep splits are not overrepresented relative to rare splits, like those near the tips.
def stats(self, indices=None): """ Retrieve the statistic of one or more indices (See :ref:`es-guide-reference-api-admin-indices-stats`) :keyword indices: an index or a list of indices """ path = self.conn._make_path(indices, (), "_stats") return self.conn._send_request('GET', path)
Retrieve the statistic of one or more indices (See :ref:`es-guide-reference-api-admin-indices-stats`) :keyword indices: an index or a list of indices
def open(filename, frame='unspecified'): """Creates a PointCloudImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`PointCloudImage` The new PointCloudImage. """ data = Image.load_data(filename) return PointCloudImage(data, frame)
Creates a PointCloudImage from a file. Parameters ---------- filename : :obj:`str` The file to load the data from. Must be one of .png, .jpg, .npy, or .npz. frame : :obj:`str` A string representing the frame of reference in which the new image lies. Returns ------- :obj:`PointCloudImage` The new PointCloudImage.
def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) elif self._flat_values_list: # the user has requested flattened list (1 value per row) return lambda row: row.popitem()[1] else: return lambda row: self._get_row_value_list(self._only_fields, row)
Returns a function that will be used to instantiate query results
def leave_command_mode(self, append_to_history=False): """ Leave the command/prompt mode. """ client_state = self.get_client_state() client_state.command_buffer.reset(append_to_history=append_to_history) client_state.prompt_buffer.reset(append_to_history=True) client_state.prompt_command = '' client_state.confirm_command = '' client_state.app.layout.focus_previous()
Leave the command/prompt mode.
def _merge_common_bands(rasters): # type: (List[_Raster]) -> List[_Raster] """Combine the common bands. """ # Compute band order all_bands = IndexedSet([rs.band_names[0] for rs in rasters]) def key(rs): return all_bands.index(rs.band_names[0]) rasters_final = [] # type: List[_Raster] for band_name, rasters_group in groupby(sorted(rasters, key=key), key=key): rasters_final.append(reduce(_fill_pixels, rasters_group)) return rasters_final
Combine the common bands.
def load_preprocess_images(image_paths: List[str], image_size: tuple) -> List[np.ndarray]: """ Load and pre-process the images specified with absolute paths. :param image_paths: List of images specified with paths. :param image_size: Tuple to resize the image to (Channels, Height, Width) :return: A list of loaded images (numpy arrays). """ image_size = image_size[1:] # we do not need the number of channels images = [] for image_path in image_paths: images.append(load_preprocess_image(image_path, image_size)) return images
Load and pre-process the images specified with absolute paths. :param image_paths: List of images specified with paths. :param image_size: Tuple to resize the image to (Channels, Height, Width) :return: A list of loaded images (numpy arrays).
def filter_single_grain(self): ''' This subroutine is to filter out single grains. It is kind of useless if you have tons of data still in the list. To work on there, you have other filters (filter_desc and filter_data) available! This filter gives an index to every grain, plots the most important information, and then asks you to pick a filter. No input necessary, input is given during the routine ''' my_index = 0 my_grains = [['Index','Label','Type','Group','Meteorite','Mineralogy','C12/C13','d(Si29/Si30)','d(Si30/Si29)']] # add the data to this grain list for it in range(len(self.data)): my_grains.append([my_index,self.desc[it][self.descdict['Grain Label']], self.desc[it][self.descdict['Type']], self.desc[it][self.descdict['Group']], self.desc[it][self.descdict['Meteorite']], self.desc[it][self.descdict['Mineralogy']], self.data[it][self.datadict['12c/13c']], self.data[it][self.datadict['d(29si/28si)']], self.data[it][self.datadict['d(30si/28si)']]]) my_index += 1 for prt_line in my_grains: print(prt_line) # now write the selector for the index of the grains to select which one should be # available and which ones should be dumped usr_input = '' usr_input = input('Select the grains by index that you want to use. Please separate the indeces by a comma, e.g., 1 or 0,2,3,4\n') # process user index if usr_input == '': print('No data selected to filter.') return None elif len(usr_input) == 1: usr_index = [usr_input] else: usr_index = usr_input.split(',') for it in range(len(usr_index)): usr_index[it] = int(usr_index[it]) # filter desc_tmp = np.zeros((len(usr_index),len(self.header_desc)),dtype='|S1024') data_tmp = np.zeros((len(usr_index),len(self.header_data))) style_tmp= np.zeros((len(usr_index),len(self.header_style)),dtype='|S1024') for i in range(len(usr_index)): for j in range(len(self.header_desc)): desc_tmp[i][j] = self.desc[usr_index[i]][j] for k in range(len(self.header_data)): data_tmp[i][k] = self.data[usr_index[i]][k] for l in range(len(self.header_style)): style_tmp[i][l]= self.style[usr_index[i]][l] self.desc = desc_tmp self.data = data_tmp self.style= style_tmp
This subroutine is to filter out single grains. It is kind of useless if you have tons of data still in the list. To work on there, you have other filters (filter_desc and filter_data) available! This filter gives an index to every grain, plots the most important information, and then asks you to pick a filter. No input necessary, input is given during the routine
def mass_3d(self, r, kwargs, bool_list=None): """ computes the mass within a 3d sphere of radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit) """ bool_list = self._bool_list(bool_list) mass_3d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: kwargs_i = {k:v for k, v in kwargs[i].items() if not k in ['center_x', 'center_y']} mass_3d_i = func.mass_3d_lens(r, **kwargs_i) mass_3d += mass_3d_i #except: # raise ValueError('Lens profile %s does not support a 3d mass function!' % self.model_list[i]) return mass_3d
computes the mass within a 3d sphere of radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit)
def name(self): '''Returns the name of this template (if created from a file) or "string" if not''' if self.mako_template.filename: return os.path.basename(self.mako_template.filename) return 'string'
Returns the name of this template (if created from a file) or "string" if not
def _reconstruct_object(typ, obj, axes, dtype): """Reconstruct an object given its type, raw value, and possibly empty (None) axes. Parameters ---------- typ : object A type obj : object The value to use in the type constructor axes : dict The axes to use to construct the resulting pandas object Returns ------- ret : typ An object of type ``typ`` with the value `obj` and possible axes `axes`. """ try: typ = typ.type except AttributeError: pass res_t = np.result_type(obj.dtype, dtype) if (not isinstance(typ, partial) and issubclass(typ, pd.core.generic.PandasObject)): return typ(obj, dtype=res_t, **axes) # special case for pathological things like ~True/~False if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_: ret_value = res_t.type(obj) else: ret_value = typ(obj).astype(res_t) # The condition is to distinguish 0-dim array (returned in case of # scalar) and 1 element array # e.g. np.array(0) and np.array([0]) if len(obj.shape) == 1 and len(obj) == 1: if not isinstance(ret_value, np.ndarray): ret_value = np.array([ret_value]).astype(res_t) return ret_value
Reconstruct an object given its type, raw value, and possibly empty (None) axes. Parameters ---------- typ : object A type obj : object The value to use in the type constructor axes : dict The axes to use to construct the resulting pandas object Returns ------- ret : typ An object of type ``typ`` with the value `obj` and possible axes `axes`.
def _parse(self, r, length): """Raises BitReaderError""" def bits_left(): return length * 8 - r.get_position() self.audioObjectType = self._get_audio_object_type(r) self.samplingFrequency = self._get_sampling_freq(r) self.channelConfiguration = r.bits(4) self.sbrPresentFlag = -1 self.psPresentFlag = -1 if self.audioObjectType in (5, 29): self.extensionAudioObjectType = 5 self.sbrPresentFlag = 1 if self.audioObjectType == 29: self.psPresentFlag = 1 self.extensionSamplingFrequency = self._get_sampling_freq(r) self.audioObjectType = self._get_audio_object_type(r) if self.audioObjectType == 22: self.extensionChannelConfiguration = r.bits(4) else: self.extensionAudioObjectType = 0 if self.audioObjectType in (1, 2, 3, 4, 6, 7, 17, 19, 20, 21, 22, 23): try: GASpecificConfig(r, self) except NotImplementedError: # unsupported, (warn?) return else: # unsupported return if self.audioObjectType in ( 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 39): epConfig = r.bits(2) if epConfig in (2, 3): # unsupported return if self.extensionAudioObjectType != 5 and bits_left() >= 16: syncExtensionType = r.bits(11) if syncExtensionType == 0x2b7: self.extensionAudioObjectType = self._get_audio_object_type(r) if self.extensionAudioObjectType == 5: self.sbrPresentFlag = r.bits(1) if self.sbrPresentFlag == 1: self.extensionSamplingFrequency = \ self._get_sampling_freq(r) if bits_left() >= 12: syncExtensionType = r.bits(11) if syncExtensionType == 0x548: self.psPresentFlag = r.bits(1) if self.extensionAudioObjectType == 22: self.sbrPresentFlag = r.bits(1) if self.sbrPresentFlag == 1: self.extensionSamplingFrequency = \ self._get_sampling_freq(r) self.extensionChannelConfiguration = r.bits(4)
Raises BitReaderError
def version(self): """Return version from installed packages """ if self.find: return self.meta.sp + split_package(self.find)[1] return ""
Return version from installed packages
def _set_logger(self): """change log format.""" self.logger.propagate = False hdl = logging.StreamHandler() fmt_str = '[querier][%(levelname)s] %(message)s' hdl.setFormatter(logging.Formatter(fmt_str)) self.logger.addHandler(hdl)
change log format.
def GetFileObjectByPathSpec(self, path_spec): """Retrieves a file-like object for a path specification. Args: path_spec (PathSpec): a path specification. Returns: FileIO: a file-like object or None if not available. """ file_entry = self.GetFileEntryByPathSpec(path_spec) if not file_entry: return None return file_entry.GetFileObject()
Retrieves a file-like object for a path specification. Args: path_spec (PathSpec): a path specification. Returns: FileIO: a file-like object or None if not available.
def status(self, job_ids): ''' Get the status of a list of jobs identified by the job identifiers returned from the submit request. Args: - job_ids (list) : A list of job identifiers Returns: - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED', 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list. Raises: - ExecutionProviderException or its subclasses ''' statuses = [] for job_id in job_ids: instance = self.client.instances().get(instance=job_id, project=self.project_id, zone=self.zone).execute() self.resources[job_id]['status'] = translate_table[instance['status']] statuses.append(translate_table[instance['status']]) return statuses
Get the status of a list of jobs identified by the job identifiers returned from the submit request. Args: - job_ids (list) : A list of job identifiers Returns: - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED', 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list. Raises: - ExecutionProviderException or its subclasses
def get_connected_devices(): """ Return an array of all mbed boards connected """ all_daplinks = [] all_interfaces = _get_interfaces() for interface in all_interfaces: try: new_daplink = DAPAccessCMSISDAP(None, interface=interface) all_daplinks.append(new_daplink) except DAPAccessIntf.TransferError: logger = logging.getLogger(__name__) logger.error('Failed to get unique id', exc_info=session.Session.get_current().log_tracebacks) return all_daplinks
Return an array of all mbed boards connected
def set_app(name, site, settings=None): # pylint: disable=anomalous-backslash-in-string ''' .. versionadded:: 2017.7.0 Set the value of the setting for an IIS web application. .. note:: This function only configures existing app. Params are case sensitive. :param str name: The IIS application. :param str site: The IIS site name. :param str settings: A dictionary of the setting names and their values. Available settings: - ``physicalPath`` - The physical path of the webapp - ``applicationPool`` - The application pool for the webapp - ``userName`` "connectAs" user - ``password`` "connectAs" password for user :rtype: bool Example of usage: .. code-block:: yaml site0-webapp-setting: win_iis.set_app: - name: app0 - site: Default Web Site - settings: userName: domain\\user password: pass physicalPath: c:\inetpub\wwwroot applicationPool: appPool0 ''' # pylint: enable=anomalous-backslash-in-string ret = {'name': name, 'changes': {}, 'comment': str(), 'result': None} if not settings: ret['comment'] = 'No settings to change provided.' ret['result'] = True return ret ret_settings = { 'changes': {}, 'failures': {}, } current_settings = __salt__['win_iis.get_webapp_settings'](name=name, site=site, settings=settings.keys()) for setting in settings: if str(settings[setting]) != str(current_settings[setting]): ret_settings['changes'][setting] = {'old': current_settings[setting], 'new': settings[setting]} if not ret_settings['changes']: ret['comment'] = 'Settings already contain the provided values.' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'Settings will be changed.' ret['changes'] = ret_settings return ret __salt__['win_iis.set_webapp_settings'](name=name, site=site, settings=settings) new_settings = __salt__['win_iis.get_webapp_settings'](name=name, site=site, settings=settings.keys()) for setting in settings: if str(settings[setting]) != str(new_settings[setting]): ret_settings['failures'][setting] = {'old': current_settings[setting], 'new': new_settings[setting]} ret_settings['changes'].pop(setting, None) if ret_settings['failures']: ret['comment'] = 'Some settings failed to change.' ret['changes'] = ret_settings ret['result'] = False else: ret['comment'] = 'Set settings to contain the provided values.' ret['changes'] = ret_settings['changes'] ret['result'] = True return ret
.. versionadded:: 2017.7.0 Set the value of the setting for an IIS web application. .. note:: This function only configures existing app. Params are case sensitive. :param str name: The IIS application. :param str site: The IIS site name. :param str settings: A dictionary of the setting names and their values. Available settings: - ``physicalPath`` - The physical path of the webapp - ``applicationPool`` - The application pool for the webapp - ``userName`` "connectAs" user - ``password`` "connectAs" password for user :rtype: bool Example of usage: .. code-block:: yaml site0-webapp-setting: win_iis.set_app: - name: app0 - site: Default Web Site - settings: userName: domain\\user password: pass physicalPath: c:\inetpub\wwwroot applicationPool: appPool0
def get_mock_personalization_dict(): """Get a dict of personalization mock.""" mock_pers = dict() mock_pers['to_list'] = [To("test1@example.com", "Example User"), To("test2@example.com", "Example User")] mock_pers['cc_list'] = [To("test3@example.com", "Example User"), To("test4@example.com", "Example User")] mock_pers['bcc_list'] = [To("test5@example.com"), To("test6@example.com")] mock_pers['subject'] = ("Hello World from the Personalized " "SendGrid Python Library") mock_pers['headers'] = [Header("X-Test", "test"), Header("X-Mock", "true")] mock_pers['substitutions'] = [Substitution("%name%", "Example User"), Substitution("%city%", "Denver")] mock_pers['custom_args'] = [CustomArg("user_id", "343"), CustomArg("type", "marketing")] mock_pers['send_at'] = 1443636843 return mock_pers
Get a dict of personalization mock.
def _process_execute_error(self, msg): """ Reimplemented for IPython-style traceback formatting. """ content = msg['content'] traceback = '\n'.join(content['traceback']) + '\n' if False: # FIXME: For now, tracebacks come as plain text, so we can't use # the html renderer yet. Once we refactor ultratb to produce # properly styled tracebacks, this branch should be the default traceback = traceback.replace(' ', '&nbsp;') traceback = traceback.replace('\n', '<br/>') ename = content['ename'] ename_styled = '<span class="error">%s</span>' % ename traceback = traceback.replace(ename, ename_styled) self._append_html(traceback) else: # This is the fallback for now, using plain text with ansi escapes self._append_plain_text(traceback)
Reimplemented for IPython-style traceback formatting.
def released(self, unit, lock, timestamp): '''Called on the leader when it has released a lock. By default, does nothing but log messages. Override if you need to perform additional housekeeping when a lock is released, for example recording timestamps. ''' interval = _utcnow() - timestamp self.msg('Leader released {} from {}, held {}'.format(lock, unit, interval))
Called on the leader when it has released a lock. By default, does nothing but log messages. Override if you need to perform additional housekeeping when a lock is released, for example recording timestamps.
def p_suffix(self, length=None, elipsis=False): "Return the rest of the input" if length is not None: result = self.input[self.pos:self.pos + length] if elipsis and len(result) == length: result += "..." return result return self.input[self.pos:]
Return the rest of the input
def on_message(self, ws, message): """ Todo """ m = json.loads(message) self.logger.debug(m) if m.get("s", 0): self.sequence = m["s"] if m["op"] == self.DISPATCH: if m["t"] == "READY": for channel in m["d"]["private_channels"]: if len(channel["recipients"]) == 1: self.channels[channel["id"]] = User(channel["recipients"][0]) self.logger.info("added channel for %s", self.channels[channel["id"]]) self.session = m["d"]["session_id"] self.con_connect(User(m["d"]["user"])) elif m["t"] == "GUILD_CREATE": pass elif m["t"] == "MESSAGE_CREATE": # if not m["d"]["channel_id"] in self.channels: # print("ch:") # print(self.get("channels/"+m["d"]["channel_id"])) self.con_message(Message(m["d"])) elif m["op"] == self.HELLO: interval = int(m['d']['heartbeat_interval'] / 1000) self.h = Heartbeat(self, interval) self.h.daemon = True self.h.start() elif m["op"] == self.HEARTBEAT_ACK: pass else: self.logger.debug(m)
Todo
def pause_writing(self): '''Transport calls when the send buffer is full.''' if not self.is_closing(): self._can_send.clear() self.transport.pause_reading()
Transport calls when the send buffer is full.
def abs(x, context=None): """ Return abs(x). """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_abs, (BigFloat._implicit_convert(x),), context, )
Return abs(x).
def angular_errors(hyp_axes): """ Minimum and maximum angular errors corresponding to 1st and 2nd axes of PCA distribution. Ordered as [minimum, maximum] angular error. """ # Not quite sure why this is sqrt but it is empirically correct ax = N.sqrt(hyp_axes) return tuple(N.arctan2(ax[-1],ax[:-1]))
Minimum and maximum angular errors corresponding to 1st and 2nd axes of PCA distribution. Ordered as [minimum, maximum] angular error.
def add_on_connection_close_callback(self): """ Add an on close callback that will be invoked by pika when RabbitMQ closes the connection to the publisher unexpectedly. """ self._logger.debug('Adding connection close callback') self._connection.add_on_close_callback(self.on_connection_closed)
Add an on close callback that will be invoked by pika when RabbitMQ closes the connection to the publisher unexpectedly.
def get_bootstrap_from_recipes(cls, recipes, ctx): '''Returns a bootstrap whose recipe requirements do not conflict with the given recipes.''' info('Trying to find a bootstrap that matches the given recipes.') bootstraps = [cls.get_bootstrap(name, ctx) for name in cls.list_bootstraps()] acceptable_bootstraps = [] for bs in bootstraps: if not bs.can_be_chosen_automatically: continue possible_dependency_lists = expand_dependencies(bs.recipe_depends) for possible_dependencies in possible_dependency_lists: ok = True for recipe in possible_dependencies: recipe = Recipe.get_recipe(recipe, ctx) if any([conflict in recipes for conflict in recipe.conflicts]): ok = False break for recipe in recipes: try: recipe = Recipe.get_recipe(recipe, ctx) except ValueError: conflicts = [] else: conflicts = recipe.conflicts if any([conflict in possible_dependencies for conflict in conflicts]): ok = False break if ok and bs not in acceptable_bootstraps: acceptable_bootstraps.append(bs) info('Found {} acceptable bootstraps: {}'.format( len(acceptable_bootstraps), [bs.name for bs in acceptable_bootstraps])) if acceptable_bootstraps: info('Using the first of these: {}' .format(acceptable_bootstraps[0].name)) return acceptable_bootstraps[0] return None
Returns a bootstrap whose recipe requirements do not conflict with the given recipes.
def get_diff_endpoints_from_commit_range(repo, commit_range): """Get endpoints of a diff given a commit range The resulting endpoints can be diffed directly:: a, b = get_diff_endpoints_from_commit_range(repo, commit_range) a.diff(b) For details on specifying git diffs, see ``git diff --help``. For details on specifying revisions, see ``git help revisions``. Args: repo (git.Repo): Repo object initialized with project root commit_range (str): commit range as would be interpreted by ``git diff`` command. Unfortunately only patterns of the form ``a..b`` and ``a...b`` are accepted. Note that the latter pattern finds the merge-base of a and b and uses it as the starting point for the diff. Returns: Tuple[git.Commit, git.Commit]: starting commit, ending commit ( inclusive) Raises: ValueError: commit_range is empty or ill-formed See also: <https://stackoverflow.com/q/7251477> """ if not commit_range: raise ValueError('commit_range cannot be empty') result = re_find(COMMIT_RANGE_REGEX, commit_range) if not result: raise ValueError( 'Expected diff str of the form \'a..b\' or \'a...b\' (got {})' .format(commit_range)) a, b = result['a'], result['b'] a, b = repo.rev_parse(a), repo.rev_parse(b) if result['thirddot']: a = one_or_raise(repo.merge_base(a, b)) return a, b
Get endpoints of a diff given a commit range The resulting endpoints can be diffed directly:: a, b = get_diff_endpoints_from_commit_range(repo, commit_range) a.diff(b) For details on specifying git diffs, see ``git diff --help``. For details on specifying revisions, see ``git help revisions``. Args: repo (git.Repo): Repo object initialized with project root commit_range (str): commit range as would be interpreted by ``git diff`` command. Unfortunately only patterns of the form ``a..b`` and ``a...b`` are accepted. Note that the latter pattern finds the merge-base of a and b and uses it as the starting point for the diff. Returns: Tuple[git.Commit, git.Commit]: starting commit, ending commit ( inclusive) Raises: ValueError: commit_range is empty or ill-formed See also: <https://stackoverflow.com/q/7251477>
def _subset_by_support(orig_vcf, cmp_calls, data): """Subset orig_vcf to calls also present in any of the comparison callers. """ cmp_vcfs = [x["vrn_file"] for x in cmp_calls] out_file = "%s-inensemble.vcf.gz" % utils.splitext_plus(orig_vcf)[0] if not utils.file_uptodate(out_file, orig_vcf): with file_transaction(data, out_file) as tx_out_file: cmd = "bedtools intersect -header -wa -f 0.5 -r -a {orig_vcf} -b " for cmp_vcf in cmp_vcfs: cmd += "<(bcftools view -f 'PASS,.' %s) " % cmp_vcf cmd += "| bgzip -c > {tx_out_file}" do.run(cmd.format(**locals()), "Subset calls by those present in Ensemble output") return vcfutils.bgzip_and_index(out_file, data["config"])
Subset orig_vcf to calls also present in any of the comparison callers.
def reverse_dependencies(self, ireqs): """ Returns a lookup table of reverse dependencies for all the given ireqs. Since this is all static, it only works if the dependency cache contains the complete data, otherwise you end up with a partial view. This is typically no problem if you use this function after the entire dependency tree is resolved. """ ireqs_as_cache_values = [self.as_cache_key(ireq) for ireq in ireqs] return self._reverse_dependencies(ireqs_as_cache_values)
Returns a lookup table of reverse dependencies for all the given ireqs. Since this is all static, it only works if the dependency cache contains the complete data, otherwise you end up with a partial view. This is typically no problem if you use this function after the entire dependency tree is resolved.
def prompt(test_input = None): """ Prompt function that works for Python2 and Python3 :param test_input: Value to be returned when testing :return: Value typed by user (or passed in argument when testing) """ if test_input != None: if type(test_input) == list and len(test_input): choice = test_input.pop(0) elif type(test_input) == list: choice = '' else: choice = test_input else: # Coverage: 4 missed statements try: choice = raw_input() except: choice = input() return choice
Prompt function that works for Python2 and Python3 :param test_input: Value to be returned when testing :return: Value typed by user (or passed in argument when testing)
def drop_genes(self, build=None): """Delete the genes collection""" if build: LOG.info("Dropping the hgnc_gene collection, build %s", build) self.hgnc_collection.delete_many({'build': build}) else: LOG.info("Dropping the hgnc_gene collection") self.hgnc_collection.drop()
Delete the genes collection
def check_array_struct(array): """ Check to ensure arrays are symmetrical, for example: [[1, 2, 3], [1, 2]] is invalid """ #If a list is transformed into a numpy array and the sub elements #of this array are still lists, then numpy failed to fully convert #the list, meaning it is not symmetrical. try: arr = np.array(array) except: raise HydraError("Array %s is not valid."%(array,)) if type(arr[0]) is list: raise HydraError("Array %s is not valid."%(array,))
Check to ensure arrays are symmetrical, for example: [[1, 2, 3], [1, 2]] is invalid
def stock2fa(stock): """ convert stockholm to fasta """ seqs = {} for line in stock: if line.startswith('#') is False and line.startswith(' ') is False and len(line) > 3: id, seq = line.strip().split() id = id.rsplit('/', 1)[0] id = re.split('[0-9]\|', id, 1)[-1] if id not in seqs: seqs[id] = [] seqs[id].append(seq) if line.startswith('//'): break return seqs
convert stockholm to fasta
def has_edge(self, edge): """ Return whether an edge exists. @type edge: tuple @param edge: Edge. @rtype: boolean @return: Truth-value for edge existence. """ u, v = edge return (u, v) in self.edge_properties
Return whether an edge exists. @type edge: tuple @param edge: Edge. @rtype: boolean @return: Truth-value for edge existence.
def get(self, column_name): """ Retrieve a column from the list with name value :code:`column_name` :param str column_name: The name of the column to get :return: :class:`~giraffez.types.Column` with the specified name, or :code:`None` if it does not exist. """ column_name = column_name.lower() for c in self.columns: if c.name == column_name: return c return None
Retrieve a column from the list with name value :code:`column_name` :param str column_name: The name of the column to get :return: :class:`~giraffez.types.Column` with the specified name, or :code:`None` if it does not exist.
def unmount(self): """Unmounts the sftp system if it's currently mounted.""" if not self.mounted: return # Try to unmount properly. cmd = 'fusermount -u %s' % self.mount_point_local shell_exec(cmd) # The filesystem is probably still in use. # kill sshfs and re-run this same command (which will work then). if self.mounted: self._kill() shell_exec(cmd) self._mount_point_local_delete()
Unmounts the sftp system if it's currently mounted.
def kitchen_merge(backend, source_kitchen, target_kitchen): """ Merge two Kitchens """ click.secho('%s - Merging Kitchen %s into Kitchen %s' % (get_datetime(), source_kitchen, target_kitchen), fg='green') check_and_print(DKCloudCommandRunner.merge_kitchens_improved(backend.dki, source_kitchen, target_kitchen))
Merge two Kitchens
def remove_overlap(self, begin, end=None): """ Removes all intervals overlapping the given point or range. Completes in O((r+m)*log n) time, where: * n = size of the tree * m = number of matches * r = size of the search range (this is 1 for a point) """ hitlist = self.at(begin) if end is None else self.overlap(begin, end) for iv in hitlist: self.remove(iv)
Removes all intervals overlapping the given point or range. Completes in O((r+m)*log n) time, where: * n = size of the tree * m = number of matches * r = size of the search range (this is 1 for a point)
def LookupChain(lookup_func_list): """Returns a *function* suitable for passing as the more_formatters argument to Template. NOTE: In Java, this would be implemented using the 'Composite' pattern. A *list* of formatter lookup function behaves the same as a *single* formatter lookup funcion. Note the distinction between formatter *lookup* functions and formatter functions here. """ def MoreFormatters(formatter_name): for lookup_func in lookup_func_list: formatter_func = lookup_func(formatter_name) if formatter_func is not None: return formatter_func return MoreFormatters
Returns a *function* suitable for passing as the more_formatters argument to Template. NOTE: In Java, this would be implemented using the 'Composite' pattern. A *list* of formatter lookup function behaves the same as a *single* formatter lookup funcion. Note the distinction between formatter *lookup* functions and formatter functions here.
def wait_for_edge(self, pin, edge): """Wait for an edge. Pin should be type IN. Edge must be RISING, FALLING or BOTH. """ self.bbio_gpio.wait_for_edge(self.mraa_gpio.Gpio(pin), self._edge_mapping[edge])
Wait for an edge. Pin should be type IN. Edge must be RISING, FALLING or BOTH.
def attachviewers(self, profiles): """Attach viewers *and converters* to file, automatically scan all profiles for outputtemplate or inputtemplate""" if self.metadata: template = None for profile in profiles: if isinstance(self, CLAMInputFile): for t in profile.input: if self.metadata.inputtemplate == t.id: template = t break elif isinstance(self, CLAMOutputFile) and self.metadata and self.metadata.provenance: for t in profile.outputtemplates(): if self.metadata.provenance.outputtemplate_id == t.id: template = t break else: raise NotImplementedError #Is ok, nothing to implement for now if template: break if template and template.viewers: for viewer in template.viewers: self.viewers.append(viewer) if template and template.converters: for converter in template.converters: self.converters.append(converter)
Attach viewers *and converters* to file, automatically scan all profiles for outputtemplate or inputtemplate
def write_crc32(fo, bytes): """A 4-byte, big-endian CRC32 checksum""" data = crc32(bytes) & 0xFFFFFFFF fo.write(pack('>I', data))
A 4-byte, big-endian CRC32 checksum
def generate_entry_label(entry): """ Generates a label for the pourbaix plotter Args: entry (PourbaixEntry or MultiEntry): entry to get a label for """ if isinstance(entry, MultiEntry): return " + ".join([latexify_ion(e.name) for e in entry.entry_list]) else: return latexify_ion(latexify(entry.name))
Generates a label for the pourbaix plotter Args: entry (PourbaixEntry or MultiEntry): entry to get a label for
def get_atoms(structure, **kwargs): """ Returns ASE Atoms object from pymatgen structure. Args: structure: pymatgen.core.structure.Structure **kwargs: other keyword args to pass into the ASE Atoms constructor Returns: ASE Atoms object """ if not structure.is_ordered: raise ValueError("ASE Atoms only supports ordered structures") symbols = [str(site.specie.symbol) for site in structure] positions = [site.coords for site in structure] cell = structure.lattice.matrix return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell, **kwargs)
Returns ASE Atoms object from pymatgen structure. Args: structure: pymatgen.core.structure.Structure **kwargs: other keyword args to pass into the ASE Atoms constructor Returns: ASE Atoms object
def _parse_acl_config(self, acl_config): """Parse configured ACLs and rules ACLs are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., } """ parsed_acls = dict() for acl in acl_config['aclList']: parsed_acls[acl['name']] = set() for rule in acl['sequence']: parsed_acls[acl['name']].add(rule['text']) return parsed_acls
Parse configured ACLs and rules ACLs are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., }
def _gen_last_current_relation(self, post_id): ''' Generate the relation for the post and last post viewed. ''' last_post_id = self.get_secure_cookie('last_post_uid') if last_post_id: last_post_id = last_post_id.decode('utf-8') self.set_secure_cookie('last_post_uid', post_id) if last_post_id and MPost.get_by_uid(last_post_id): self._add_relation(last_post_id, post_id)
Generate the relation for the post and last post viewed.
def update_attributes(self, updates): """Update attributes.""" if not isinstance(updates, dict): updates = updates.to_dict() for sdk_key, spec_key in self._get_attributes_map().items(): attr = '_%s' % sdk_key if spec_key in updates and not hasattr(self, attr): setattr(self, attr, updates[spec_key])
Update attributes.
def deaccent(text): """ Remove accentuation from the given string. """ norm = unicodedata.normalize("NFD", text) result = "".join(ch for ch in norm if unicodedata.category(ch) != 'Mn') return unicodedata.normalize("NFC", result)
Remove accentuation from the given string.
def _handle_aui(self, data): """ Handle AUI messages. :param data: RF message to parse :type data: string :returns: :py:class`~alarmdecoder.messages.AUIMessage` """ msg = AUIMessage(data) self.on_aui_message(message=msg) return msg
Handle AUI messages. :param data: RF message to parse :type data: string :returns: :py:class`~alarmdecoder.messages.AUIMessage`
def clear(self): """ Clear the internal (private) variables of the class. """ self.filename = '' self.filehandler = 0 # Station name, identification and revision year: self.station_name = '' self.rec_dev_id = '' self.rev_year = 0000 # Number and type of channels: self.TT = 0 self.A = 0 # Number of analog channels. self.D = 0 # Number of digital channels. # Analog channel information: self.An = [] self.Ach_id = [] self.Aph = [] self.Accbm = [] self.uu = [] self.a = [] self.b = [] self.skew = [] self.min = [] self.max = [] self.primary = [] self.secondary = [] self.PS = [] # Digital channel information: self.Dn = [] self.Dch_id = [] self.Dph = [] self.Dccbm = [] self.y = [] # Line frequency: self.lf = 0 # Sampling rate information: self.nrates = 0 self.samp = [] self.endsamp = [] # Date/time stamps: # defined by: [dd,mm,yyyy,hh,mm,ss.ssssss] self.start = [00,00,0000,00,00,0.0] self.trigger = [00,00,0000,00,00,0.0] # Data file type: self.ft = '' # Time stamp multiplication factor: self.timemult = 0.0 self.DatFileContent = ''
Clear the internal (private) variables of the class.
def process_function(self, call_node, definition): """Processes a user defined function when it is called. Increments self.function_call_index each time it is called, we can refer to it as N in the comments. Make e.g. save_N_LHS = assignment.LHS for each AssignmentNode. (save_local_scope) Create e.g. temp_N_def_arg1 = call_arg1_label_visitor.result for each argument. Visit the arguments if they're calls. (save_def_args_in_temp) Create e.g. def_arg1 = temp_N_def_arg1 for each argument. (create_local_scope_from_def_args) Visit and get function nodes. (visit_and_get_function_nodes) Loop through each save_N_LHS node and create an e.g. foo = save_1_foo or, if foo was a call arg, foo = arg_mapping[foo]. (restore_saved_local_scope) Create e.g. ~call_1 = ret_func_foo RestoreNode. (return_handler) Notes: Page 31 in the original thesis, but changed a little. We don't have to return the ~call_1 = ret_func_foo RestoreNode made in return_handler, because it's the last node anyway, that we return in this function. e.g. ret_func_foo gets assigned to visit_Return. Args: call_node(ast.Call) : The node that calls the definition. definition(LocalModuleDefinition): Definition of the function being called. Returns: Last node in self.nodes, probably the return of the function appended to self.nodes in return_handler. """ self.function_call_index += 1 saved_function_call_index = self.function_call_index def_node = definition.node saved_variables, first_node = self.save_local_scope( def_node.lineno, saved_function_call_index ) args_mapping, first_node = self.save_def_args_in_temp( call_node.args, Arguments(def_node.args), call_node.lineno, saved_function_call_index, first_node ) self.filenames.append(definition.path) self.create_local_scope_from_def_args( call_node.args, Arguments(def_node.args), def_node.lineno, saved_function_call_index ) function_nodes, first_node = self.visit_and_get_function_nodes( definition, first_node ) self.filenames.pop() # Should really probably move after restore_saved_local_scope!!! self.restore_saved_local_scope( saved_variables, args_mapping, def_node.lineno ) self.return_handler( call_node, function_nodes, saved_function_call_index, first_node ) self.function_return_stack.pop() self.function_definition_stack.pop() return self.nodes[-1]
Processes a user defined function when it is called. Increments self.function_call_index each time it is called, we can refer to it as N in the comments. Make e.g. save_N_LHS = assignment.LHS for each AssignmentNode. (save_local_scope) Create e.g. temp_N_def_arg1 = call_arg1_label_visitor.result for each argument. Visit the arguments if they're calls. (save_def_args_in_temp) Create e.g. def_arg1 = temp_N_def_arg1 for each argument. (create_local_scope_from_def_args) Visit and get function nodes. (visit_and_get_function_nodes) Loop through each save_N_LHS node and create an e.g. foo = save_1_foo or, if foo was a call arg, foo = arg_mapping[foo]. (restore_saved_local_scope) Create e.g. ~call_1 = ret_func_foo RestoreNode. (return_handler) Notes: Page 31 in the original thesis, but changed a little. We don't have to return the ~call_1 = ret_func_foo RestoreNode made in return_handler, because it's the last node anyway, that we return in this function. e.g. ret_func_foo gets assigned to visit_Return. Args: call_node(ast.Call) : The node that calls the definition. definition(LocalModuleDefinition): Definition of the function being called. Returns: Last node in self.nodes, probably the return of the function appended to self.nodes in return_handler.
def _ge_from_lt(self, other): """Return a >= b. Computed by @total_ordering from (not a < b).""" op_result = self.__lt__(other) if op_result is NotImplemented: return NotImplemented return not op_result
Return a >= b. Computed by @total_ordering from (not a < b).
def find_egg_entry_point(self, object_type, name=None): """ Returns the (entry_point, protocol) for the with the given ``name``. """ if name is None: name = 'main' possible = [] for protocol_options in object_type.egg_protocols: for protocol in protocol_options: pkg_resources.require(self.spec) entry = pkg_resources.get_entry_info( self.spec, protocol, name) if entry is not None: possible.append((entry.load(), protocol, entry.name)) break if not possible: # Better exception dist = pkg_resources.get_distribution(self.spec) raise LookupError( "Entry point %r not found in egg %r (dir: %s; protocols: %s; " "entry_points: %s)" % (name, self.spec, dist.location, ', '.join(_flatten(object_type.egg_protocols)), ', '.join(_flatten([ dictkeys(pkg_resources.get_entry_info(self.spec, prot, name) or {}) for prot in protocol_options] or '(no entry points)')))) if len(possible) > 1: raise LookupError( "Ambiguous entry points for %r in egg %r (protocols: %s)" % (name, self.spec, ', '.join(_flatten(protocol_options)))) return possible[0]
Returns the (entry_point, protocol) for the with the given ``name``.
def trim_args(kwds): """Gets rid of args with value of None, as well as select keys.""" reject_key = ("type", "types", "configure") reject_val = (None, ()) kwargs = { k: v for k, v in kwds.items() if k not in reject_key and v not in reject_val } for k, v in kwargs.items(): if k in ("to", "cc", "bcc", "attachments"): kwargs[k] = list(kwargs[k]) return kwargs
Gets rid of args with value of None, as well as select keys.
def find(self, group=None, element=None, name=None, VR=None): """ Searches for data elements in the DICOM file given the filters supplied to this method. :param group: Hex decimal for the group of a DICOM element e.g. 0x002 :param element: Hex decimal for the element value of a DICOM element e.g. 0x0010 :param name: Name of the DICOM element, e.g. "Modality" :param VR: Value Representation of the DICOM element, e.g. "PN" """ results = self.read() if name is not None: def find_name(data_element): return data_element.name.lower() == name.lower() return filter(find_name, results) if group is not None: def find_group(data_element): return (data_element.tag['group'] == group or int(data_element.tag['group'], 16) == group) results = filter(find_group, results) if element is not None: def find_element(data_element): return (data_element.tag['element'] == element or int(data_element.tag['element'], 16) == element) results = filter(find_element, results) if VR is not None: def find_VR(data_element): return data_element.VR.lower() == VR.lower() results = filter(find_VR, results) return results
Searches for data elements in the DICOM file given the filters supplied to this method. :param group: Hex decimal for the group of a DICOM element e.g. 0x002 :param element: Hex decimal for the element value of a DICOM element e.g. 0x0010 :param name: Name of the DICOM element, e.g. "Modality" :param VR: Value Representation of the DICOM element, e.g. "PN"
def get_snippet_by_name(cls, name): """name is in dotted format, e.g. topsnippet.something.wantedsnippet""" name_with_dir_separators = name.replace('.', os.path.sep) loaded = yaml_loader.YamlLoader.load_yaml_by_relpath(cls.snippets_dirs, name_with_dir_separators + '.yaml') if loaded: return cls._create_snippet(name, *loaded) raise exceptions.SnippetNotFoundException('no such snippet: {name}'. format(name=name_with_dir_separators))
name is in dotted format, e.g. topsnippet.something.wantedsnippet
def _check_inputs(z, m): """Check inputs are arrays of same length or array and a scalar.""" try: nz = len(z) z = np.array(z) except TypeError: z = np.array([z]) nz = len(z) try: nm = len(m) m = np.array(m) except TypeError: m = np.array([m]) nm = len(m) if (z < 0).any() or (m < 0).any(): raise ValueError('z and m must be positive') if nz != nm and nz > 1 and nm > 1: raise ValueError('z and m arrays must be either equal in length, \ OR of different length with one of length 1.') else: if type(z) != np.ndarray: z = np.array(z) if type(m) != np.ndarray: m = np.array(m) return z, m
Check inputs are arrays of same length or array and a scalar.
def parse(self, data): """ Converts a NetJSON 'NetworkGraph' object to a NetworkX Graph object,which is then returned. Additionally checks for protocol version, revision and metric. """ graph = self._init_graph() # ensure is NetJSON NetworkGraph object if 'type' not in data or data['type'] != 'NetworkGraph': raise ParserError('Parse error, not a NetworkGraph object') # ensure required keys are present required_keys = ['protocol', 'version', 'metric', 'nodes', 'links'] for key in required_keys: if key not in data: raise ParserError('Parse error, "{0}" key not found'.format(key)) # store metadata self.protocol = data['protocol'] self.version = data['version'] self.revision = data.get('revision') # optional self.metric = data['metric'] # create graph for node in data['nodes']: graph.add_node(node['id'], label=node['label'] if 'label' in node else None, local_addresses=node.get('local_addresses', []), **node.get('properties', {})) for link in data['links']: try: source = link["source"] dest = link["target"] cost = link["cost"] except KeyError as e: raise ParserError('Parse error, "%s" key not found' % e) properties = link.get('properties', {}) graph.add_edge(source, dest, weight=cost, **properties) return graph
Converts a NetJSON 'NetworkGraph' object to a NetworkX Graph object,which is then returned. Additionally checks for protocol version, revision and metric.
def get_smart_contract_event_by_height(self, height: int, is_full: bool = False) -> List[dict]: """ This interface is used to get the corresponding smart contract event based on the height of block. :param height: a decimal height value. :param is_full: :return: the information of smart contract event in dictionary form. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_SMART_CONTRACT_EVENT, [height, 1]) response = self.__post(self.__url, payload) if is_full: return response event_list = response['result'] if event_list is None: event_list = list() return event_list
This interface is used to get the corresponding smart contract event based on the height of block. :param height: a decimal height value. :param is_full: :return: the information of smart contract event in dictionary form.
def _kp2(A, B): """Special case Kronecker tensor product of A[i] and B[i] at each time interval i for i = 0 .. N-1 Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0] """ N = A.shape[0] if B.shape[0] != N: raise(ValueError) newshape1 = A.shape[1]*B.shape[1] return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
Special case Kronecker tensor product of A[i] and B[i] at each time interval i for i = 0 .. N-1 Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
def create_storage_account(access_token, subscription_id, rgname, account_name, location, storage_type='Standard_LRS'): '''Create a new storage account in the named resource group, with the named location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new storage account. location (str): Azure data center location. E.g. westus. storage_type (str): Premium or Standard, local or globally redundant. Defaults to Standard_LRS. Returns: HTTP response. JSON body of storage account properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '?api-version=', STORAGE_API]) storage_body = {'location': location} storage_body['sku'] = {'name': storage_type} storage_body['kind'] = 'Storage' body = json.dumps(storage_body) return do_put(endpoint, body, access_token)
Create a new storage account in the named resource group, with the named location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new storage account. location (str): Azure data center location. E.g. westus. storage_type (str): Premium or Standard, local or globally redundant. Defaults to Standard_LRS. Returns: HTTP response. JSON body of storage account properties.
def add_event( request, template='swingtime/add_event.html', event_form_class=forms.EventForm, recurrence_form_class=forms.MultipleOccurrenceForm ): ''' Add a new ``Event`` instance and 1 or more associated ``Occurrence``s. Context parameters: ``dtstart`` a datetime.datetime object representing the GET request value if present, otherwise None ``event_form`` a form object for updating the event ``recurrence_form`` a form object for adding occurrences ''' dtstart = None if request.method == 'POST': event_form = event_form_class(request.POST) recurrence_form = recurrence_form_class(request.POST) if event_form.is_valid() and recurrence_form.is_valid(): event = event_form.save() recurrence_form.save(event) return http.HttpResponseRedirect(event.get_absolute_url()) else: if 'dtstart' in request.GET: try: dtstart = parser.parse(request.GET['dtstart']) except(TypeError, ValueError) as exc: # TODO: A badly formatted date is passed to add_event logging.warning(exc) dtstart = dtstart or datetime.now() event_form = event_form_class() recurrence_form = recurrence_form_class(initial={'dtstart': dtstart}) return render( request, template, {'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form} )
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s. Context parameters: ``dtstart`` a datetime.datetime object representing the GET request value if present, otherwise None ``event_form`` a form object for updating the event ``recurrence_form`` a form object for adding occurrences
def tai_jd(self, jd): """Build a `Time` from a TAI Julian date. Supply the International Atomic Time (TAI) as a Julian date: >>> t = ts.tai_jd(2456675.56640625) >>> t.tai 2456675.56640625 >>> t.tai_calendar() (2014, 1, 18, 1, 35, 37.5) """ tai = _to_array(jd) t = Time(self, tai + tt_minus_tai) t.tai = tai return t
Build a `Time` from a TAI Julian date. Supply the International Atomic Time (TAI) as a Julian date: >>> t = ts.tai_jd(2456675.56640625) >>> t.tai 2456675.56640625 >>> t.tai_calendar() (2014, 1, 18, 1, 35, 37.5)
def reset_calibrators(self, parameter): """ Reset all calibrators for the specified parameter to their original MDB value. """ req = mdb_pb2.ChangeParameterRequest() req.action = mdb_pb2.ChangeParameterRequest.RESET_CALIBRATORS calib_info = req.defaultCalibrator url = '/mdb/{}/{}/parameters/{}'.format( self._instance, self._processor, parameter) response = self._client.post_proto(url, data=req.SerializeToString())
Reset all calibrators for the specified parameter to their original MDB value.
def download_image(self, device_label, image_id, file_name): """ Download image taken by a smartcam Args: device_label (str): device label of camera image_id (str): image id from image series file_name (str): path to file """ response = None try: response = requests.get( urls.download_image(self._giid, device_label, image_id), headers={ 'Cookie': 'vid={}'.format(self._vid)}, stream=True) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) with open(file_name, 'wb') as image_file: for chunk in response.iter_content(chunk_size=1024): if chunk: image_file.write(chunk)
Download image taken by a smartcam Args: device_label (str): device label of camera image_id (str): image id from image series file_name (str): path to file
def _create(self, title, heads, refresh=None, path_start=None): """ Internal create method, uses yattag to generate a html document with result data. :param title: Title of report :param heads: Headers for report :param refresh: If set to True, adds a HTTP-EQUIV="refresh" to the report :param path_start: path to file where this is report is to be stored. :return: yattag document. """ # TODO: Refactor to make less complex doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') heads["Date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") heads["Pass rate"] = self.results.pass_rate() heads["Pass rate excluding retries"] = self.results.pass_rate(include_retries=False) with tag('html'): with tag('head'): doc.asis(self.head) if refresh: doc.asis('<META HTTP-EQUIV="refresh" CONTENT="' + str(refresh) + '">') with tag('body', id='body'): with tag('h1'): text(title) with tag('table'): for head in heads: with tag('tr'): with tag('th', width="100px"): text(head) with tag('td'): text(heads[head]) with tag('tr'): with tag('th'): text('Executed') with tag('td'): text(str(self.summary["count"])) with tag('tr'): with tag('th'): text('Pass:') with tag('td'): text(str(self.summary["pass"])) with tag('tr'): with tag('th'): text('Fails:') with tag('td'): text(str(self.summary["fail"])) with tag('tr'): with tag('th'): text('inconclusive:') with tag('td'): text(str(self.summary["inconclusive"])) with tag('tr'): with tag('th'): text('Skip:') with tag('td'): text(str(self.summary["skip"])) with tag('tr'): with tag('th'): text('Duration:') with tag('td'): text(self.duration_to_string(self.summary["duration"])) with tag('tr'): with tag('th'): text('{} version:'.format(get_fw_name())) with tag('td'): text(get_fw_version()) with tag('table', style='border-collapse: collapse;'): with tag('tr'): with tag('th'): text("Test Case") with tag('th'): text("Verdict") with tag('th'): text("Fail Reason") with tag('th'): text("Skip Reason") with tag('th'): text("Retried") with tag('th'): text("Duration") for result in self.results: if result.success: klass = 'item_pass' elif result.inconclusive: klass = 'item_inconc' else: klass = 'item_fail' with tag('tr', klass='item %s' % klass, onclick='showhide(this)'): with tag('td', width="200px"): text(result.get_tc_name()) with tag('td', width="100px"): if result.success: color = 'green' elif result.failure: color = 'red' else: color = 'black' with tag('font', color=color): text(result.get_verdict()) with tag('td', width="350px"): text(hex_escape_str(result.fail_reason)) with tag('td', width="300px"): text(result.skip_reason if result.skipped() else "") with tag('td', width="50px"): text("Yes" if result.retries_left != 0 else "No") with tag('td', width="100px"): text(str(result.duration)) with tag('tr', klass='info hidden'): with tag('td', colspan="5"): if hasattr(result, 'tc_git_info') and \ result.tc_git_info and \ "scm_link" in result.tc_git_info: # add tc git info only when available link = result.tc_git_info['scm_link'] with tag('a', href=link): text(link) doc.stag('br') for fil in result.logfiles: filepath = os.path.relpath(fil, path_start) with tag('a', href=filepath): text(filepath) doc.stag('br') return doc.getvalue()
Internal create method, uses yattag to generate a html document with result data. :param title: Title of report :param heads: Headers for report :param refresh: If set to True, adds a HTTP-EQUIV="refresh" to the report :param path_start: path to file where this is report is to be stored. :return: yattag document.
def upload_file_and_send_file_offer(self, file_name, user_id, data=None, input_file_path=None, content_type='application/octet-stream', auto_open=False, prevent_share=False, scope='content/send'): """ Upload a file of any type to store and return a FileId once file offer has been sent. No user authentication required """ if input_file_path: with open(input_file_path, 'rb') as f: data = f.read() if not data: raise ValueError('Either the data of a file or the path to a file must be provided') params = { 'fileName': file_name, 'userId': user_id, 'autoOpen': 'true' if auto_open else 'false', 'preventShare': 'true' if prevent_share else 'false', } return _post( token=self.oauth.get_app_token(scope), uri='/user/media/file/send?' + urllib.urlencode(params), data=data, content_type=content_type )
Upload a file of any type to store and return a FileId once file offer has been sent. No user authentication required
def run(self, host, port, **options): """For debugging purposes, you can run this as a standalone server. .. WARNING:: **Security vulnerability** This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use this in production, you should run :class:`Server` as a standard WSGI app with `uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server. .. versionadded:: 0.1.0 """ self.registry.debug = True debugged = DebuggedJsonRpcApplication(self, evalex=True) run_simple(host, port, debugged, use_reloader=True, **options)
For debugging purposes, you can run this as a standalone server. .. WARNING:: **Security vulnerability** This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use this in production, you should run :class:`Server` as a standard WSGI app with `uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server. .. versionadded:: 0.1.0
def get_aws_s3_handle(config_map): """Convenience function for getting AWS S3 objects Added by cjshaw@mit.edu, Jan 9, 2015 Added to aws_adapter build by birdland@mit.edu, Jan 25, 2015, and added support for Configuration May 25, 2017: Switch to boto3 """ url = 'https://' + config_map['s3_bucket'] + '.s3.amazonaws.com' if not AWS_CLIENT.is_aws_s3_client_set(): client = boto3.client( 's3', aws_access_key_id=config_map['put_public_key'], aws_secret_access_key=config_map['put_private_key'] ) AWS_CLIENT.set_aws_s3_client(client) else: client = AWS_CLIENT.s3 return client, url
Convenience function for getting AWS S3 objects Added by cjshaw@mit.edu, Jan 9, 2015 Added to aws_adapter build by birdland@mit.edu, Jan 25, 2015, and added support for Configuration May 25, 2017: Switch to boto3
def object_formatter(v, c, m, p): """Format object view link.""" endpoint = current_app.config['PIDSTORE_OBJECT_ENDPOINTS'].get( m.object_type) if endpoint and m.object_uuid: return Markup('<a href="{0}">{1}</a>'.format( url_for(endpoint, id=m.object_uuid), _('View'))) return ''
Format object view link.
def _assert_is_color(value): """ Assert that the given value is a valid brightness. :param value: The value to check. """ if not isinstance(value, tuple) or len(value) != 3: raise ValueError("Color must be a RGB tuple.") if not all(0 <= x <= 255 for x in value): raise ValueError("RGB values of color must be between 0 and 255.")
Assert that the given value is a valid brightness. :param value: The value to check.
def plot_cost(scores=np.random.rand(100), thresh=0.5, noise=0): """Plot the cost function topology (contours for each of several targets)""" c = pd.DataFrame(index=np.arange(0, 1, 0.01)) if isinstance(thresh, (int, float)): thresh = [thresh] elif not isinstance(thresh, (pd.Series, np.ndarray, list, tuple)): thresh = np.arange(0, 1, .2) cost_fun.fun = spec_from_thresh for t in thresh: labels = (scores / t / scores.max() / 1.00001).astype(int) cost_fun.target = t c['target=0.{}'.format(int(t * 10))] = np.array([cost_fun(x, labels, scores, verbose=True) for x in c.index]) c.plot() plt.show(block=False)
Plot the cost function topology (contours for each of several targets)