code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def flow2rgb(flow, color_wheel=None, unknown_thr=1000000.0): assert ((flow.ndim == 3) and (flow.shape[(- 1)] == 2)) if (color_wheel is None): color_wheel = make_color_wheel() assert ((color_wheel.ndim == 2) and (color_wheel.shape[1] == 3)) num_bins = color_wheel.shape[0] dx = flow[(:, :, 0)].copy() dy = flow[(:, :, 1)].copy() ignore_inds = (((np.isnan(dx) | np.isnan(dy)) | (np.abs(dx) > unknown_thr)) | (np.abs(dy) > unknown_thr)) dx[ignore_inds] = 0 dy[ignore_inds] = 0 rad = np.sqrt(((dx ** 2) + (dy ** 2))) if np.any((rad > np.finfo(float).eps)): max_rad = np.max(rad) dx /= max_rad dy /= max_rad [h, w] = dx.shape rad = np.sqrt(((dx ** 2) + (dy ** 2))) angle = (np.arctan2((- dy), (- dx)) / np.pi) bin_real = (((angle + 1) / 2) * (num_bins - 1)) bin_left = np.floor(bin_real).astype(int) bin_right = ((bin_left + 1) % num_bins) w = (bin_real - bin_left.astype(np.float32))[(..., None)] flow_img = (((1 - w) * color_wheel[(bin_left, :)]) + (w * color_wheel[(bin_right, :)])) small_ind = (rad <= 1) flow_img[small_ind] = (1 - (rad[(small_ind, None)] * (1 - flow_img[small_ind]))) flow_img[np.logical_not(small_ind)] *= 0.75 flow_img[(ignore_inds, :)] = 0 return flow_img
Convert flow map to RGB image. Args: flow (ndarray): Array of optical flow. color_wheel (ndarray or None): Color wheel used to map flow field to RGB colorspace. Default color wheel will be used if not specified. unknown_thr (str): Values above this threshold will be marked as unknown and thus ignored. Returns: ndarray: RGB image that can be visualized.
codesearchnet
def find(self, selector, **kwargs): self.debug_log(('Finding element with selector: %s' % selector)) elements = self.find_all(selector, **kwargs) if len(elements): self.debug_log(('find (%s): Element found' % selector)) return elements[0] else: self.debug_log(('find (%s): No element found' % selector)) return None
Find an element with a selector Args: selector (str): the selector used to find the element Kwargs: wait_until_present (bool) wait_until_visible (bool) raise_exception (bool) Returns: None if no element was found proxy_element is an element was found Raises: this function might raise an exception depending on the raise_exception kwargs or the config proxy_driver:raise_exception
codesearchnet
def multiply(x1, x2): if any_symbolic_tensors((x1, x2)): return Multiply().symbolic_call(x1, x2) return backend.numpy.multiply(x1, x2)
Multiply arguments element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise product of `x1` and `x2`.
github-repos
def add_inner_graph_id(self, inner_graph_id): assert isinstance(inner_graph_id, str) self._inner_graph_ids.append(inner_graph_id)
Add the debugger-generated ID of a graph nested within this graph. Args: inner_graph_id: The debugger-generated ID of the nested inner graph.
github-repos
def get_service(self, uuid): if uuid in self.services: return self.services[uuid] if pp_hex(uuid) in self.services: return self.services[pp_hex(uuid)] return None
Lookup information about a given GATT service. Args: uuid (str): a string containing the hex-encoded service UUID Returns: None if an error occurs, otherwise a :class:`Service` object.
juraj-google-style
def _call_post_with_user_override(self, sap_user_id, url, payload): SAPSuccessFactorsEnterpriseCustomerConfiguration = apps.get_model('sap_success_factors', 'SAPSuccessFactorsEnterpriseCustomerConfiguration') (oauth_access_token, _) = SAPSuccessFactorsAPIClient.get_oauth_access_token(self.enterprise_configuration.sapsf_base_url, self.enterprise_configuration.key, self.enterprise_configuration.secret, self.enterprise_configuration.sapsf_company_id, sap_user_id, SAPSuccessFactorsEnterpriseCustomerConfiguration.USER_TYPE_USER) response = requests.post(url, data=payload, headers={'Authorization': 'Bearer {}'.format(oauth_access_token), 'content-type': 'application/json'}) return (response.status_code, response.text)
Make a post request with an auth token acquired for a specific user to a SuccessFactors endpoint. Args: sap_user_id (str): The user to use to retrieve an auth token. url (str): The url to post to. payload (str): The json encoded payload to post.
codesearchnet
def pipe(self, format=None, renderer=None, formatter=None): if (format is None): format = self._format data = text_type(self.source).encode(self._encoding) out = backend.pipe(self._engine, format, data, renderer, formatter) return out
Return the source piped through the Graphviz layout command. Args: format: The output format used for rendering (``'pdf'``, ``'png'``, etc.). renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). Returns: Binary (encoded) stdout of the layout command. Raises: ValueError: If ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero.
codesearchnet
def zipf_distribution(nbr_symbols, alpha): tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha) zeta = np.r_[0.0, np.cumsum(tmp)] return [x / zeta[-1] for x in zeta]
Helper function: Create a Zipf distribution. Args: nbr_symbols: number of symbols to use in the distribution. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Returns: distr_map: list of float, Zipf's distribution over nbr_symbols.
juraj-google-style
def _ScanVolume(self, scan_context, scan_node, base_path_specs): if not scan_node or not scan_node.path_spec: raise errors.ScannerError('Invalid or missing scan node.') if scan_context.IsLockedScanNode(scan_node.path_spec): self._ScanEncryptedVolume(scan_context, scan_node) if scan_context.IsLockedScanNode(scan_node.path_spec): return if scan_node.IsVolumeSystemRoot(): self._ScanVolumeSystemRoot(scan_context, scan_node, base_path_specs) elif scan_node.IsFileSystem(): self._ScanFileSystem(scan_node, base_path_specs) elif scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW: path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_TSK, location='/', parent=scan_node.path_spec) base_path_specs.append(path_spec) else: for sub_scan_node in scan_node.sub_nodes: self._ScanVolume(scan_context, sub_scan_node, base_path_specs)
Scans a volume scan node for volume and file systems. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): volume scan node. base_path_specs (list[PathSpec]): file system base path specifications. Raises: ScannerError: if the format of or within the source is not supported or the scan node is invalid.
juraj-google-style
def ParsePathItem(item, opts=None): if (item == os.path.curdir): return CurrentComponent() if (item == os.path.pardir): return ParentComponent() recursion = PATH_RECURSION_REGEX.search(item) if (recursion is None): return GlobComponent(item, opts) (start, end) = recursion.span() if (not ((start == 0) and (end == len(item)))): raise ValueError('malformed recursive component') if recursion.group('max_depth'): max_depth = int(recursion.group('max_depth')) else: max_depth = None return RecursiveComponent(max_depth=max_depth, opts=opts)
Parses string path component to an `PathComponent` instance. Args: item: A path component string to be parsed. opts: A `PathOpts` object. Returns: `PathComponent` instance corresponding to given path fragment. Raises: ValueError: If the path item contains a recursive component fragment but cannot be parsed as such.
codesearchnet
def _CreateFolder(self, parent, name, visible=True, description=None): folder = ET.SubElement(parent, 'Folder') name_tag = ET.SubElement(folder, 'name') name_tag.text = name if description is not None: desc_tag = ET.SubElement(folder, 'description') desc_tag.text = description if not visible: visibility = ET.SubElement(folder, 'visibility') visibility.text = '0' return folder
Create a KML Folder element. Args: parent: The parent ElementTree.Element instance. name: The folder name as a string. visible: Whether the folder is initially visible or not. description: A description string or None. Returns: The folder ElementTree.Element instance.
juraj-google-style
def _load_data(self, resource, default=DEFAULT_VALUE_SAFEGUARD, **kwargs): default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {} try: return get_edx_api_data( api_config=CatalogIntegration.current(), resource=resource, api=self.client, **kwargs ) or default_val except (SlumberBaseException, ConnectionError, Timeout) as exc: LOGGER.exception( 'Failed to load data from resource [%s] with kwargs [%s] due to: [%s]', resource, kwargs, str(exc) ) return default_val
Load data from API client. Arguments: resource(string): type of resource to load default(any): value to return if API query returned empty result. Sensible values: [], {}, None etc. Returns: dict: Deserialized response from Course Catalog API
juraj-google-style
def extend_webfont_settings(webfont_settings): if not webfont_settings.get('fontdir_path', False): raise IcomoonSettingsError(("Webfont settings miss the required key " "item 'fontdir_path'")) if not webfont_settings.get('csspart_path', False): webfont_settings['csspart_path'] = None return webfont_settings
Validate a webfont settings and optionally fill missing ``csspart_path`` option. Args: webfont_settings (dict): Webfont settings (an item value from ``settings.ICOMOON_WEBFONTS``). Returns: dict: Webfont settings
juraj-google-style
def d_hkl(self, miller_index: Vector3Like) -> float: gstar = self.reciprocal_lattice_crystallographic.metric_tensor hkl = np.array(miller_index) return (1 / (dot(dot(hkl, gstar), hkl.T) ** (1 / 2)))
Returns the distance between the hkl plane and the origin Args: miller_index ([h,k,l]): Miller index of plane Returns: d_hkl (float)
codesearchnet
def list_users(): res = 0 user_list = [] dowhile = True try: while (res or dowhile): dowhile = False (users, _, res) = win32net.NetUserEnum(None, 0, win32netcon.FILTER_NORMAL_ACCOUNT, res, win32netcon.MAX_PREFERRED_LENGTH) for user in users: user_list.append(user['name']) return user_list except win32net.error: pass
Return a list of all users on Windows Returns: list: A list of all users on the system CLI Example: .. code-block:: bash salt '*' user.list_users
codesearchnet
def register_controller(self, module, required=True, min_number=1): verify_controller_module(module) module_ref_name = module.__name__.split('.')[-1] if module_ref_name in self._controller_objects: raise signals.ControllerError('Controller module %s has already been registered. It cannot be registered again.' % module_ref_name) module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME if module_config_name not in self.controller_configs: if required: raise signals.ControllerError('No corresponding config found for %s' % module_config_name) logging.warning('No corresponding config found for optional controller %s', module_config_name) return None try: original_config = self.controller_configs[module_config_name] controller_config = copy.deepcopy(original_config) objects = module.create(controller_config) except Exception: logging.exception('Failed to initialize objects for controller %s, abort!', module_config_name) raise if not isinstance(objects, list): raise signals.ControllerError('Controller module %s did not return a list of objects, abort.' % module_ref_name) actual_number = len(objects) if actual_number < min_number: module.destroy(objects) raise signals.ControllerError('Expected to get at least %d controller objects, got %d.' % (min_number, actual_number)) self._controller_objects[module_ref_name] = copy.copy(objects) logging.debug('Found %d objects for controller %s', len(objects), module_config_name) self._controller_modules[module_ref_name] = module return objects
Loads a controller module and returns its loaded devices. This is to be used in a mobly test class. Args: module: A module that follows the controller module interface. required: A bool. If True, failing to register the specified controller module raises exceptions. If False, the objects failed to instantiate will be skipped. min_number: An integer that is the minimum number of controller objects to be created. Default is one, since you should not register a controller module without expecting at least one object. Returns: A list of controller objects instantiated from controller_module, or None if no config existed for this controller and it was not a required controller. Raises: ControllerError: * The controller module has already been registered. * The actual number of objects instantiated is less than the * `min_number`. * `required` is True and no corresponding config can be found. * Any other error occurred in the registration process.
github-repos
def find_all_build_files(dir: str) -> List[Tuple[str, str]]: build_file_dirs = [] for root, _, files in os.walk(dir): for file in files: if file in BUILD_FILENAMES: root = root.strip('./') build_file_dirs.append((root, file)) return build_file_dirs
List all the BUILD files. Returns: The list of (directory, filename) of all BUILD files.
github-repos
def __init__(self, config: PretrainedConfig, generation_config: GenerationConfig, device: torch.device, dtype: torch.dtype=torch.float16, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]]=None, initial_prompt_shapes: Optional[List[List[int]]]=None) -> None: self.num_key_value_heads = config.num_attention_heads if getattr(config, 'num_key_value_heads', None) is None else config.num_key_value_heads self.head_dim = config.head_dim if hasattr(config, 'head_dim') else config.hidden_size self.num_hidden_layers = config.num_hidden_layers num_blocks = getattr(generation_config, 'num_blocks', None) block_size = getattr(generation_config, 'block_size', None) if num_blocks is None or block_size is None: logger.info('Calculating optimal block size and number...') num_blocks, block_size = compute_optimal_blocks(device, config, generation_config, initial_prompt_shapes or [], dtype, median_prefill_length=200) logger.info(f'Using calculated num_blocks={num_blocks}, block_size={block_size}') self.block_size = block_size self.num_blocks = num_blocks self.cache_shape = (self.num_key_value_heads, num_blocks, self.block_size, self.head_dim) self.dtype = dtype self.device = device self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] for idx in range(config.num_hidden_layers): layer_device = layer_device_map[idx] if layer_device_map is not None else device new_layer_key_cache = torch.zeros(self.cache_shape, dtype=self.dtype, device=layer_device) new_layer_value_cache = torch.zeros(self.cache_shape, dtype=self.dtype, device=layer_device) torch._dynamo.mark_static_address(new_layer_key_cache) torch._dynamo.mark_static_address(new_layer_value_cache) self.key_cache.append(new_layer_key_cache) self.value_cache.append(new_layer_value_cache) self._free_blocks = deque(range(num_blocks)) self._block_tables: Dict[str, List[int]] = {}
Initialize a paged attention cache for efficient memory usage. Args: config: Model configuration generation_config: Generation configuration containing cache parameters device: Device for the cache tensors dtype: Data type for the cache tensors layer_device_map: Optional mapping of layer indices to devices initial_prompt_shapes: Optional sample prompts to help calculate optimal cache size
github-repos
def percent_point(self, y, V): self.check_fit() if self.theta < 0: return V else: a = np.power(y, self.theta / (-1 - self.theta)) b = np.power(V, self.theta) u = np.power((a + b - 1) / b, -1 / self.theta) return u
Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1` Args: y: `np.ndarray` value of :math:`C(u|v)`. v: `np.ndarray` given value of v.
juraj-google-style
def potentially_ragged_concat(tensors): if len(tensors) == 1: return tensors[0] elif isinstance(tensors[0], tf.SparseTensor): return tf.sparse.concat(axis=0, sp_inputs=tensors) elif isinstance(tensors[0], tf.RaggedTensor): return tf.concat(tensors, axis=0) non_batch_shapes = tf.stack([tf.shape(tensor)[1:] for tensor in tensors]) constant_dims = tf.math.reduce_all(non_batch_shapes == non_batch_shapes[:1], axis=0) if tf.math.reduce_all(constant_dims).numpy().item(): if _is_scalar(tensors[0]): return tf.stack(tensors, axis=0) else: return tf.concat(tensors, axis=0) constant_inner_dimensions = constant_dims.numpy().tolist()[::-1].index(False) if constant_inner_dimensions == 0: constant_inner_shape = None else: constant_inner_shape = tensors[0].shape[-constant_inner_dimensions:] return tf.ragged.constant([tensor.numpy() for tensor in tensors], inner_shape=constant_inner_shape).merge_dims(0, 1)
Concats `Tensor`s along their first dimension. Args: tensors: List of `Tensor`s. Returns: Concatenation of the inputs along the first dimension -- of type `np.ndarray` if all input shapes are compatible, or `tf.RaggedTensor` if not.
github-repos
def ScanForWindowsVolume(self, source_path): windows_path_specs = self.GetBasePathSpecs(source_path) if (not windows_path_specs or self._source_type == definitions.SOURCE_TYPE_FILE): return False file_system_path_spec = windows_path_specs[0] self._file_system = resolver.Resolver.OpenFileSystem(file_system_path_spec) if file_system_path_spec.type_indicator == definitions.TYPE_INDICATOR_OS: mount_point = file_system_path_spec else: mount_point = file_system_path_spec.parent self._path_resolver = windows_path_resolver.WindowsPathResolver( self._file_system, mount_point) if not self._windows_directory: self._ScanFileSystemForWindowsDirectory(self._path_resolver) if not self._windows_directory: return False self._path_resolver.SetEnvironmentVariable( 'SystemRoot', self._windows_directory) self._path_resolver.SetEnvironmentVariable( 'WinDir', self._windows_directory) return True
Scans for a Windows volume. Args: source_path (str): source path. Returns: bool: True if a Windows volume was found. Raises: ScannerError: if the source path does not exists, or if the source path is not a file or directory, or if the format of or within the source file is not supported.
juraj-google-style
def match(self, path): this = self.segments that = path.split('/') current_var = None bindings = {} segment_count = self.segment_count j = 0 for i in range(0, len(this)): if (j >= len(that)): break if (this[i].kind == _TERMINAL): if (this[i].literal == '*'): bindings[current_var] = that[j] j += 1 elif (this[i].literal == '**'): until = (((j + len(that)) - segment_count) + 1) segment_count += (len(that) - segment_count) bindings[current_var] = '/'.join(that[j:until]) j = until elif (this[i].literal != that[j]): raise ValidationException(("mismatched literal: '%s' != '%s'" % (this[i].literal, that[j]))) else: j += 1 elif (this[i].kind == _BINDING): current_var = this[i].literal if ((j != len(that)) or (j != segment_count)): raise ValidationException('match error: could not render from the path template: {}'.format(path)) return bindings
Matches a fully qualified path template string. Args: path (str): A fully qualified path template string. Returns: dict: Var names to matched binding values. Raises: ValidationException: If path can't be matched to the template.
codesearchnet
async def receive(self, timeout: float=None) -> Union[(Message, None)]: if timeout: coro = self.queue.get() try: msg = (await asyncio.wait_for(coro, timeout=timeout)) except asyncio.TimeoutError: msg = None else: try: msg = self.queue.get_nowait() except asyncio.QueueEmpty: msg = None return msg
Receives a message for this behaviour. If timeout is not None it returns the message or "None" after timeout is done. Args: timeout (float): number of seconds until return Returns: spade.message.Message: a Message or None
codesearchnet
def _create_regexp_filter(regex): compiled_regex = re.compile(regex) def filter_fn(value): if (not isinstance(value, six.string_types)): raise error.HParamsError(('Cannot use a regexp filter for a value of type %s. Value: %s' % (type(value), value))) return (re.search(compiled_regex, value) is not None) return filter_fn
Returns a boolean function that filters strings based on a regular exp. Args: regex: A string describing the regexp to use. Returns: A function taking a string and returns True if any of its substrings matches regex.
codesearchnet
def _delete_record(self, identifier=None, rtype=None, name=None, content=None): success_url = self.URLS['dns'].format(self.domain_id) record_ids = self._get_matching_dns_entry_ids(identifier, rtype, name, content) LOGGER.debug('Record IDs to delete: %s', record_ids) success = True for rec_id in record_ids: delete_response = self.session.get(self.URLS['dns_delete_entry'].format(self.domain_id, rec_id)) self._invalidate_records_cache() self._log('Delete DNS entry {}'.format(rec_id), delete_response) success = (success and (delete_response.url == success_url)) return success
Delete one or more DNS entries in the domain zone that match the given criteria. Args: [identifier] (str): An ID to match against DNS entry easyname IDs. [rtype] (str): A DNS rtype (e.g. A, TXT, MX, etc) to match against DNS entry types. [name] (str): A name to match against DNS entry names. [content] (str): A content to match against a DNS entry contents. Returns: bool: True if the record(s) were deleted successfully, False otherwise.
codesearchnet
def DeserializeExclusiveData(self, reader): self.Type = TransactionType.ClaimTransaction if self.Version != 0: raise Exception('Format Exception') numrefs = reader.ReadVarInt() claims = [] for i in range(0, numrefs): c = CoinReference() c.Deserialize(reader) claims.append(c) self.Claims = claims if len(self.Claims) == 0: raise Exception('Format Exception')
Deserialize full object. Args: reader (neo.IO.BinaryReader): Raises: Exception: If the transaction type is incorrect or if there are no claims.
juraj-google-style
def items_sort(cls, items): class t(tuple): def __cmp__(self, other): for a, b in six.moves.zip_longest(self, other): if a != b: if a is None: return 1 if b is None: return -1 return a - b return 0 def __lt__(self, other): return self.__cmp__(other) < 0 def __gt_(self, other): return self.__cmp__(other) > 0 def __le__(self, other): return self.__cmp__(other) <= 0 def __ge_(self, other): return self.__cmp__(other) >= 0 def __eq__(self, other): return self.__cmp__(other) == 0 def __ne__(self, other): return self.__cmp__(other) != 0 def key_func(x): if x.indented: return t((int(x.parent_item.sort), int(x.sort))) return t((int(x.sort), )) return sorted(items, key=key_func, reverse=True)
Sort list items, taking into account parent items. Args: items (list[gkeepapi.node.ListItem]): Items to sort. Returns: list[gkeepapi.node.ListItem]: Sorted items.
juraj-google-style
async def sync_services(self): services = {} servs = (await self.list_services()) for (i, serv) in enumerate(servs): info = (await self.service_info(serv)) status = (await self.service_status(serv)) messages = (await self.get_messages(serv)) headline = (await self.get_headline(serv)) services[serv] = states.ServiceState(info['short_name'], info['long_name'], info['preregistered'], i) services[serv].state = status['numeric_status'] for message in messages: services[serv].post_message(message.level, message.message, message.count, message.created) if (headline is not None): services[serv].set_headline(headline.level, headline.message, headline.created) return services
Poll the current state of all services. Returns: dict: A dictionary mapping service name to service status
codesearchnet
def _build_projection_expression(clean_table_keys): projection_expression = '' for key in clean_table_keys[:(- 1)]: projection_expression += '{},'.format(key) projection_expression += clean_table_keys[(- 1)] return projection_expression
Given cleaned up keys, this will return a projection expression for the dynamodb lookup. Args: clean_table_keys (dict): keys without the data types attached Returns: str: A projection expression for the dynamodb lookup.
codesearchnet
def blit(self, console: tcod.console.Console, x: float, y: float, bg_blend: int, scale_x: float, scale_y: float, angle: float) -> None: lib.TCOD_image_blit(self.image_c, _console(console), x, y, bg_blend, scale_x, scale_y, angle)
Blit onto a Console using scaling and rotation. Args: console (Console): Blit destination Console. x (float): Console X position for the center of the Image blit. y (float): Console Y position for the center of the Image blit. The Image blit is centered on this position. bg_blend (int): Background blending mode to use. scale_x (float): Scaling along Image x axis. Set to 1 for no scaling. Must be over 0. scale_y (float): Scaling along Image y axis. Set to 1 for no scaling. Must be over 0. angle (float): Rotation angle in radians. (Clockwise?)
codesearchnet
def _collect_classes(m): from f311 import filetypes as ft from f311 import explorer as ex def _extend(classes, newclasses): classes.extend([class_ for class_ in newclasses if class_ not in classes]) file_classes = [class_ for class_ in a99.get_classes_in_module(m, ft.DataFile) if class_.flag_collect] _extend(_classes_txt, [class_ for class_ in file_classes if class_.flag_txt]) _extend(_classes_bin, [class_ for class_ in file_classes if not class_.flag_txt]) _extend(_classes_sp, [class_ for class_ in file_classes if issubclass(class_, ft.FileSpectrum)]) _extend(_classes_file, file_classes) _extend(_classes_vis, a99.get_classes_in_module(m, ex.Vis)) global _classes_file_superclass _classes_file_superclass = [cls.__bases__[0] for cls in _classes_file]
Adds entries to _classes_* Args: m: module object that must contain the following sub-modules: datatypes, vis
juraj-google-style
def as_list_data(self): element = ElementTree.Element(self.list_type) id_ = ElementTree.SubElement(element, 'id') id_.text = self.id name = ElementTree.SubElement(element, 'name') name.text = self.name return element
Return an Element to be used in a list. Most lists want an element with tag of list_type, and subelements of id and name. Returns: Element: list representation of object.
codesearchnet
def to_dict(self, rw = False): return {k:v for (k,v) in self.attributes.items() if (v is not None and (not rw or (k in self.rw_attr_keys)))}
Returns relevant attributes as a dict. Args: rw if True only returns the read/write enabled object attributes
juraj-google-style
def ChangePassword(self, password_old, password_new): if (not self.ValidatePassword(password_old)): return False if isinstance(password_new, str): password_new = password_new.encode('utf-8') password_key = hashlib.sha256(password_new) self.SaveStoredData('PasswordHash', password_key) self.SaveStoredData('MasterKey', AES.new(self._master_key, AES.MODE_CBC, self._iv)) return True
Change the password used to protect the private key. Args: password_old (str): the current password used to encrypt the private key. password_new (str): the new to be used password to encrypt the private key. Returns: bool: whether the password has been changed
codesearchnet
def __init__(self, endpoint_name, sagemaker_session=None): super(MXNetPredictor, self).__init__(endpoint_name, sagemaker_session, json_serializer, json_deserializer)
Initialize an ``MXNetPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
juraj-google-style
def bbox_clip(bboxes, img_shape): assert ((bboxes.shape[(- 1)] % 4) == 0) clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype) clipped_bboxes[(..., 0::2)] = np.maximum(np.minimum(bboxes[(..., 0::2)], (img_shape[1] - 1)), 0) clipped_bboxes[(..., 1::2)] = np.maximum(np.minimum(bboxes[(..., 1::2)], (img_shape[0] - 1)), 0) return clipped_bboxes
Clip bboxes to fit the image shape. Args: bboxes (ndarray): Shape (..., 4*k) img_shape (tuple): (height, width) of the image. Returns: ndarray: Clipped bboxes.
codesearchnet
def get_pb_ids(self) -> List[str]: values = DB.get_hash_value(self._key, 'processing_block_ids') return ast.literal_eval(values)
Return the list of PB ids associated with the SBI. Returns: list, Processing block ids
codesearchnet
def get_gruneisen_parameter(self, temperature=None, structure=None, quad=None): return (np.trace(self.get_tgt(temperature, structure, quad)) / 3.0)
Gets the single average gruneisen parameter from the TGT. Args: temperature (float): Temperature in kelvin, if not specified will return non-cv-normalized value structure (float): Structure to be used in directional heat capacity determination, only necessary if temperature is specified quad (dict): quadrature for integration, should be dictionary with "points" and "weights" keys defaults to quadpy.sphere.Lebedev(19) as read from file
codesearchnet
def items_sort(cls, items): class t(tuple): 'Tuple with element-based sorting' def __cmp__(self, other): for (a, b) in six.moves.zip_longest(self, other): if (a != b): if (a is None): return 1 if (b is None): return (- 1) return (a - b) return 0 def __lt__(self, other): return (self.__cmp__(other) < 0) def __gt_(self, other): return (self.__cmp__(other) > 0) def __le__(self, other): return (self.__cmp__(other) <= 0) def __ge_(self, other): return (self.__cmp__(other) >= 0) def __eq__(self, other): return (self.__cmp__(other) == 0) def __ne__(self, other): return (self.__cmp__(other) != 0) def key_func(x): if x.indented: return t((int(x.parent_item.sort), int(x.sort))) return t((int(x.sort),)) return sorted(items, key=key_func, reverse=True)
Sort list items, taking into account parent items. Args: items (list[gkeepapi.node.ListItem]): Items to sort. Returns: list[gkeepapi.node.ListItem]: Sorted items.
codesearchnet
def get_dependency_graph(self): from rez.vendor.pygraph.classes.digraph import digraph nodes = {} edges = set() for variant in self._resolved_packages: nodes[variant.name] = variant.qualified_package_name for request in variant.get_requires(): if (not request.conflict): edges.add((variant.name, request.name)) g = digraph() node_color = ' node_fontsize = 10 attrs = [('fontsize', node_fontsize), ('fillcolor', node_color), ('style', 'filled')] for (name, qname) in nodes.iteritems(): g.add_node(name, attrs=(attrs + [('label', qname)])) for edge in edges: g.add_edge(edge) return g
Generate the dependency graph. The dependency graph is a simpler subset of the resolve graph. It contains package name nodes connected directly to their dependencies. Weak references and conflict requests are not included in the graph. The dependency graph does not show conflicts. Returns: `pygraph.digraph` object.
codesearchnet
def list_container_services(access_token, subscription_id, resource_group): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token)
List the container services in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON model.
codesearchnet
def _update_workflow_definition(pb_config: dict): known_workflows = get_workflows() workflow_id = pb_config['workflow']['id'] workflow_version = pb_config['workflow']['version'] if ((workflow_id not in known_workflows) or (workflow_version not in known_workflows[workflow_id])): raise RuntimeError('Unknown workflow definition: {}:{}'.format(workflow_id, workflow_version)) workflow = get_workflow(workflow_id, workflow_version) for stage in workflow['stages']: stage['status'] = 'none' pb_config['workflow_parameters'] = pb_config['workflow']['parameters'] pb_config['workflow_id'] = pb_config['workflow']['id'] pb_config['workflow_version'] = pb_config['workflow']['version'] pb_config['workflow_stages'] = workflow['stages'] pb_config.pop('workflow', None)
Update the PB configuration workflow definition. Args: pb_config (dict): PB configuration dictionary Raises: RunTimeError, if the workflow definition (id, version) specified in the sbi_config is not known.
codesearchnet
def add_connection(self, name, **kwargs): name = 'connection:{}'.format(name) self.add_section(name) for (key, value) in list(kwargs.items()): self.set(name, key, value) self.generate_tags()
Adds a connection to the configuration This method will add a connection to the configuration. The connection added is only available for the lifetime of the object and is not persisted. Note: If a call is made to load() or reload(), any connections added with this method must be re-added to the config instance Args: name (str): The name of the connection to add to the config. The name provided will automatically be prepended with the string connection: **kwargs (dict); The set of properties used to provide the node configuration
codesearchnet
def _getargspec(target): fullargspecs = getfullargspec(target) defaults = fullargspecs.defaults or () if fullargspecs.kwonlydefaults: defaults += tuple(fullargspecs.kwonlydefaults.values()) if not defaults: defaults = None argspecs = ArgSpec(args=fullargspecs.args + fullargspecs.kwonlyargs, varargs=fullargspecs.varargs, keywords=fullargspecs.varkw, defaults=defaults) return argspecs
A python3 version of getargspec. Calls `getfullargspec` and assigns args, varargs, varkw, and defaults to a python 2/3 compatible `ArgSpec`. The parameter name 'varkw' is changed to 'keywords' to fit the `ArgSpec` struct. Args: target: the target object to inspect. Returns: An ArgSpec with args, varargs, keywords, and defaults parameters from FullArgSpec.
github-repos
def from_preset(preset): if preset == 'vesta_2019': cut_offs = loadfn(os.path.join(_directory, 'vesta_cutoffs.yaml')) return CutOffDictNN(cut_off_dict=cut_offs) else: raise ValueError("Unrecognised preset: {}".format(preset))
Initialise a CutOffDictNN according to a preset set of cut-offs. Args: preset (str): A preset name. The list of supported presets are: - "vesta_2019": The distance cut-offs used by the VESTA visualisation program. Returns: A CutOffDictNN using the preset cut-off dictionary.
juraj-google-style
def gradients(ys, xs, grad_ys=None): graph = ys[0].graph if not grad_ys: grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys] downstream = set(xs) for op in graph.operations: if op.has_gradient: if set(op.inputs) & downstream: downstream |= set(op.outputs) tensor_to_gradient = dict(zip(ys, grad_ys)) for op in graph.operations[::-1]: grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs] if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream): with tf.variable_scope(op.name + "/gradients"): input_grads = op.gradient(grad_outputs) for inp, grad in zip(op.inputs, input_grads): if inp in downstream and grad is not None: if inp in tensor_to_gradient: tensor_to_gradient[inp] += grad else: tensor_to_gradient[inp] = grad return [tensor_to_gradient.get(x, None) for x in xs]
Compute gradients in dtf. Args: ys: a list of Tensors xs: a list of Tensors grad_ys: an optional list of Tensors Returns: grad_xs: a list of Tensors
juraj-google-style
def _build_vocab(filename, vocab_dir, vocab_name): vocab_path = os.path.join(vocab_dir, vocab_name) if (not tf.gfile.Exists(vocab_path)): with tf.gfile.GFile(filename, 'r') as f: data = f.read().split() counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=(lambda x: ((- x[1]), x[0]))) (words, _) = list(zip(*count_pairs)) encoder = text_encoder.TokenTextEncoder(None, vocab_list=words) encoder.store_to_file(vocab_path) else: encoder = text_encoder.TokenTextEncoder(vocab_path) return encoder
Reads a file to build a vocabulary. Args: filename: file to read list of words from. vocab_dir: directory where to save the vocabulary. vocab_name: vocab file name. Returns: text encoder.
codesearchnet
def dump(self, content, filepath, indent=4): with open(filepath, 'w') as fp: pyaml.dump(content, dst=fp, indent=indent)
Dump settings content to filepath. Args: content (str): Settings content. filepath (str): Settings file location.
juraj-google-style
def __init__(self, ctx): if ctx.options.use_fiddle_overlay: member_map = {'Config': overlay.add_name('Config', BuildableBuilder), 'Partial': overlay.add_name('Partial', BuildableBuilder)} else: member_map = {} ast = ctx.loader.import_name('fiddle') super().__init__(ctx, 'fiddle', member_map, ast)
Initializes the FiddleOverlay. This function loads the AST for the fiddle module, which is used to access type information for any members that are not explicitly provided by the overlay. See get_attribute in attribute.py for how it's used. Args: ctx: An instance of context.Context.
github-repos
def _FormatInode(self, event): inode = event.inode if (inode is None): if (hasattr(event, 'pathspec') and hasattr(event.pathspec, 'image_inode')): inode = event.pathspec.image_inode if (inode is None): inode = '-' return inode
Formats the inode. Args: event (EventObject): event. Returns: str: inode field.
codesearchnet
def torque_on(self): data = [] data.append(0x0A) data.append(self.servoid) data.append(RAM_WRITE_REQ) data.append(TORQUE_CONTROL_RAM) data.append(0x01) data.append(0x60) send_data(data)
Enable the torques of Herkulex In this mode, position control and velocity control will work. Args: none
juraj-google-style
def _std(self): variance = tf.cond((self._count > 1), (lambda : (self._var_sum / tf.cast((self._count - 1), tf.float32))), (lambda : (tf.ones_like(self._var_sum) * float('nan')))) return tf.sqrt((variance + 0.0001))
Computes the current estimate of the standard deviation. Note that the standard deviation is not defined until at least two samples were seen. Returns: Tensor of current variance.
codesearchnet
def split_by_proportionally_distribute_labels(self, proportions={}, use_lengths=True): identifiers = {} for utterance in self.corpus.utterances.values(): if use_lengths: identifiers[utterance.idx] = {l: int((d * 100)) for (l, d) in utterance.label_total_duration().items()} else: identifiers[utterance.idx] = utterance.label_count() splits = utils.get_identifiers_splitted_by_weights(identifiers, proportions) return self._subviews_from_utterance_splits(splits)
Split the corpus into subsets, so the occurrence of the labels is distributed amongst the subsets according to the given proportions. Args: proportions (dict): A dictionary containing the relative size of the target subsets. The key is an identifier for the subset. use_lengths (bool): If True the lengths of the labels are considered for splitting proportionally, otherwise only the number of occurrences is taken into account. Returns: (dict): A dictionary containing the subsets with the identifier from the input as key.
codesearchnet
def getprop(self, prop_name): return self.shell( ['getprop', prop_name], timeout=DEFAULT_GETPROP_TIMEOUT_SEC).decode('utf-8').strip()
Get a property of the device. This is a convenience wrapper for "adb shell getprop xxx". Args: prop_name: A string that is the name of the property to get. Returns: A string that is the value of the property, or None if the property doesn't exist.
juraj-google-style
def _save_states(self, state, serialized_readers_entity): mr_id = state.key().id_or_name() fresh_state = model.MapreduceState.get_by_job_id(mr_id) if (not self._check_mr_state(fresh_state, mr_id)): return False if (fresh_state.active_shards != 0): logging.warning('Mapreduce %s already has active shards. Looks like spurious task execution.', mr_id) return None config = util.create_datastore_write_config(state.mapreduce_spec) db.put([state, serialized_readers_entity], config=config) return True
Run transaction to save state. Args: state: a model.MapreduceState entity. serialized_readers_entity: a model._HugeTaskPayload entity containing json serialized input readers. Returns: False if a fatal error is encountered and this task should be dropped immediately. True if transaction is successful. None if a previous attempt of this same transaction has already succeeded.
codesearchnet
def as_graph(self, depth=0): if (depth in self._graph_cache): return self._graph_cache[depth] self._graph_cache[depth] = graph = Graph(self, depth=depth) return graph
Create a graph with self as node, cache it, return it. Args: depth (int): depth of the graph. Returns: Graph: an instance of Graph.
codesearchnet
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): if already_has_special_tokens: if token_ids_1 is not None: raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1]
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `encode` method. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`str`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def update_video_status(edx_video_id, status): try: video = _get_video(edx_video_id) except Video.DoesNotExist: error_message = u'Video not found when trying to update video status with edx_video_id: {0}'.format(edx_video_id) raise ValVideoNotFoundError(error_message) video.status = status video.save()
Update status for an existing video. Args: edx_video_id: ID of the video status: video status Raises: Raises ValVideoNotFoundError if the video cannot be retrieved.
codesearchnet
def VisitNamedType(self, node): return pytd.ClassType(node.name)
Converts a named type to a class type, to be filled in later. Args: node: The NamedType. This type only has a name. Returns: A ClassType. This ClassType will (temporarily) only have a name.
github-repos
def quadratic_jacobian_polynomial(nodes): jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER) jac_at_nodes = np.empty((1, 6), order='F') jac_at_nodes[(0, 0)] = two_by_two_det(jac_parts[(:, :2)]) jac_at_nodes[(0, 1)] = two_by_two_det(jac_parts[(:, 2:4)]) jac_at_nodes[(0, 2)] = two_by_two_det(jac_parts[(:, 4:6)]) jac_at_nodes[(0, 3)] = two_by_two_det(jac_parts[(:, 6:8)]) jac_at_nodes[(0, 4)] = two_by_two_det(jac_parts[(:, 8:10)]) jac_at_nodes[(0, 5)] = two_by_two_det(jac_parts[(:, 10:)]) bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN) return bernstein
r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis.
codesearchnet
def Match(self, event): if not self._matcher: return True self._decision = self._matcher.Matches(event) return self._decision
Determines if an event matches the filter. Args: event (EventObject): an event. Returns: bool: True if the event matches the filter.
juraj-google-style
def day_of_year(self): if self._day_of_year is None: cumul_days_in_month_nonleap = tf.math.cumsum(_DAYS_IN_MONTHS_NON_LEAP, exclusive=True) cumul_days_in_month_leap = tf.math.cumsum(_DAYS_IN_MONTHS_LEAP, exclusive=True) days_before_month_non_leap = tf.gather(cumul_days_in_month_nonleap, self.month() - 1) days_before_month_leap = tf.gather(cumul_days_in_month_leap, self.month() - 1) days_before_month = tf.where(date_utils.is_leap_year(self.year()), days_before_month_leap, days_before_month_non_leap) self._day_of_year = days_before_month + self.day() return self._day_of_year
Calculates the number of days since the beginning of the year. Returns: Tensor of int32 type with elements in range [1, 366]. January 1st yields "1". #### Example ```python dt = tff.datetime.dates_from_tuples([(2019, 1, 25), (2020, 3, 2)]) dt.day_of_year() # [25, 62] ```
github-repos
def next(self) -> 'ArrayEntry': try: (newval, naft) = self.after.pop() except IndexError: raise NonexistentInstance(self.json_pointer(), 'next of last') from None return ArrayEntry((self.index + 1), self.before.cons(self.value), naft, newval, self.parinst, self.schema_node, self.timestamp)
Return an instance node corresponding to the next entry. Raises: NonexistentInstance: If the receiver is the last entry of the parent array.
codesearchnet
def gene_filter(self, query, mongo_query): LOG.debug('Adding panel and genes-related parameters to the query') gene_query = [] if (query.get('hgnc_symbols') and query.get('gene_panels')): gene_query.append({'hgnc_symbols': {'$in': query['hgnc_symbols']}}) gene_query.append({'panels': {'$in': query['gene_panels']}}) mongo_query['$or'] = gene_query else: if query.get('hgnc_symbols'): hgnc_symbols = query['hgnc_symbols'] mongo_query['hgnc_symbols'] = {'$in': hgnc_symbols} LOG.debug(('Adding hgnc_symbols: %s to query' % ', '.join(hgnc_symbols))) if query.get('gene_panels'): gene_panels = query['gene_panels'] mongo_query['panels'] = {'$in': gene_panels} return gene_query
Adds gene-related filters to the query object Args: query(dict): a dictionary of query filters specified by the users mongo_query(dict): the query that is going to be submitted to the database Returns: mongo_query(dict): returned object contains gene and panel-related filters
codesearchnet
def Named(self, name): self._name = name return self
Adds a prefix to the subject, when it is displayed in error messages. This is especially useful in the context of types that have no helpful string representation (e.g., boolean). Writing AssertThat(foo).Named('foo').IsTrue() then results in a more reasonable error. Args: name: string, the name to display along with the actual value. Returns: self
github-repos
def add_cookie_header(self, request, referrer_host=None): new_request = convert_http_request(request, referrer_host) self._cookie_jar.add_cookie_header(new_request) request.fields.clear() for name, value in new_request.header_items(): request.fields.add(name, value)
Wrapped ``add_cookie_header``. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL.
juraj-google-style
def persist_as_png(structure_dict, filepath): graph = _create_graph(structure_dict) graph.write(filepath, format='png')
Saves pipeline diagram to disk as png file. Args: structure_dict (dict): dict returned by :func:`~steppy.base.Step.upstream_structure` filepath (str): filepath to which the png with pipeline visualization should be persisted
juraj-google-style
def download(self, url_or_urls): with self._downloader.tqdm(): return _map_promise(self._download, url_or_urls)
Download given url(s). Args: url_or_urls: url or `list`/`dict` of urls to download and extract. Each url can be a `str` or `tfds.download.Resource`. Returns: downloaded_path(s): `str`, The downloaded paths matching the given input url_or_urls.
juraj-google-style
def forward(self, hidden_state): num_channels = hidden_state.shape[-1] x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous() q, ctx, gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1) ctx_all = 0 for level in range(self.focal_level): ctx = self.focal_layers[level](ctx) ctx_all = ctx_all + ctx * gates[:, level:level + 1] ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True)) ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:] if self.normalize_modulator: ctx_all = ctx_all / (self.focal_level + 1) modulator = self.projection_context(ctx_all) x_out = q * modulator x_out = x_out.permute(0, 2, 3, 1).contiguous() if self.use_post_layernorm_in_modulation: x_out = self.layernorm(x_out) x_out = self.projection_out(x_out) x_out = self.projection_dropout(x_out) return x_out
Args: hidden_state: Input features with shape of (batch_size, height, width, num_channels)
github-repos
class BayesianDetectorConfig(PretrainedConfig): def __init__(self, watermarking_depth: Optional[int]=None, base_rate: float=0.5, **kwargs): self.watermarking_depth = watermarking_depth self.base_rate = base_rate self.model_name = None self.watermarking_config = None super().__init__(**kwargs) def set_detector_information(self, model_name, watermarking_config): self.model_name = model_name self.watermarking_config = watermarking_config
This is the configuration class to store the configuration of a [`BayesianDetectorModel`]. It is used to instantiate a Bayesian Detector model according to the specified arguments. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: watermarking_depth (`int`, *optional*): The number of tournament layers. base_rate (`float1`, *optional*, defaults to 0.5): Prior probability P(w) that a text is watermarked.
github-repos
def predict_proba(self, a, b, **kwargs): return self.cds_score(b, a) - self.cds_score(a, b)
Infer causal relationships between 2 variables using the CDS statistic Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
juraj-google-style
def get_parameters(params=None, path='', grad_only=True): global current_scope if params is None: params = OrderedDict() for k, v in iteritems(current_scope): if isinstance(v, dict): with parameter_scope(k): params = get_parameters( params, '/'.join([path, k]) if path else k, grad_only=grad_only) else: assert isinstance(v, nn.Variable) if not grad_only or v.need_grad: params['/'.join([path, k]) if path else k] = v return params
Get parameter Variables under the current parameter scope. Args: params (dict): Internal use. User doesn't set it manually. path (str): Internal use. User doesn't set it manually. grad_only (bool): Retrieve all parameters under the current scope if False, while only parameters with need_grad=True are retrieved if True. Returns: dict: {:obj:`str` : :obj:`~nnabla.Variable`}
juraj-google-style
def move(self, x, y): self._cursor = self._normalizePoint(x, y)
Move the virtual cursor. Args: x (int): x-coordinate to place the cursor. y (int): y-coordinate to place the cursor. .. seealso:: :any:`get_cursor`, :any:`print_str`, :any:`write`
juraj-google-style
def parse_str_to_expression(fiql_str): nesting_lvl = 0 last_element = None expression = Expression() for (preamble, selector, comparison, argument) in iter_parse(fiql_str): if preamble: for char in preamble: if (char == '('): if isinstance(last_element, BaseExpression): raise FiqlFormatException(('%s can not be followed by %s' % (last_element.__class__, Expression))) expression = expression.create_nested_expression() nesting_lvl += 1 elif (char == ')'): expression = expression.get_parent() last_element = expression nesting_lvl -= 1 else: if (not expression.has_constraint()): raise FiqlFormatException(('%s proceeding initial %s' % (Operator, Constraint))) if isinstance(last_element, Operator): raise FiqlFormatException(('%s can not be followed by %s' % (Operator, Operator))) last_element = Operator(char) expression = expression.add_operator(last_element) if selector: if isinstance(last_element, BaseExpression): raise FiqlFormatException(('%s can not be followed by %s' % (last_element.__class__, Constraint))) last_element = Constraint(selector, comparison, argument) expression.add_element(last_element) if (nesting_lvl != 0): raise FiqlFormatException('At least one nested expression was not correctly closed') if (not expression.has_constraint()): raise FiqlFormatException(("Parsed string '%s' contained no constraint" % fiql_str)) return expression
Parse a FIQL formatted string into an ``Expression``. Args: fiql_str (string): The FIQL formatted string we want to parse. Returns: Expression: An ``Expression`` object representing the parsed FIQL string. Raises: FiqlFormatException: Unable to parse string due to incorrect formatting. Example: >>> expression = parse_str_to_expression( ... "name==bar,dob=gt=1990-01-01")
codesearchnet
def __init__(self, **kwargs): if kwargs: self.attributes = {} self.attributes.update(**kwargs) else: self.attributes = dict.fromkeys(self.__class__.disp_attr_keys)
Initializes class attributes Args: kwargs takes kwargs or a dict
juraj-google-style
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False): residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def insert(self, entity_id, property_uri, value): if not entity_id.startswith("http"): entity_uri = urllib.parse.urljoin(self.base_url, entity_id) else: entity_uri = entity_id if entity_uri.endswith("/"): entity_uri = entity_uri[:-1] if not entity_id.endswith("fcr:metadata"): entity_uri = "/".join([entity_uri, "fcr:metadata"]) if not self.exists(entity_id): self.create(entity_id) sparql_template = Template() sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_uri=property_uri, value_str=self.__value_format__(value)) update_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) try: response = urllib.request.urlopen(update_request) except urllib.error.HTTPError: print("Error trying patch {}, sparql=\n{}".format(entity_uri, sparql)) return False if response.code < 400: return True return False
Method inserts a new entity's property in Fedora4 Repository Args: entity_id(string): Unique ID of Fedora object property_uri(string): URI of property value: Value of the property, can be literal or URI reference Returns: boolean: True if successful changed in Fedora, False otherwise
juraj-google-style
def _recurse(self, matrix, m_list, indices, output_m_list=[]): if self._finished: return while m_list[-1][1] == 0: m_list = copy(m_list) m_list.pop() if not m_list: matrix_sum = np.sum(matrix) if matrix_sum < self._current_minimum: self.add_m_list(matrix_sum, output_m_list) return if m_list[-1][1] > len(indices.intersection(m_list[-1][2])): return if len(m_list) == 1 or m_list[-1][1] > 1: if self.best_case(matrix, m_list, indices) > self._current_minimum: return index = self.get_next_index(matrix, m_list[-1], indices) m_list[-1][2].remove(index) matrix2 = np.copy(matrix) m_list2 = deepcopy(m_list) output_m_list2 = copy(output_m_list) matrix2[index, :] *= m_list[-1][0] matrix2[:, index] *= m_list[-1][0] output_m_list2.append([index, m_list[-1][3]]) indices2 = copy(indices) indices2.remove(index) m_list2[-1][1] -= 1 self._recurse(matrix2, m_list2, indices2, output_m_list2) self._recurse(matrix, m_list, indices, output_m_list)
This method recursively finds the minimal permutations using a binary tree search strategy. Args: matrix: The current matrix (with some permutations already performed). m_list: The list of permutations still to be performed indices: Set of indices which haven't had a permutation performed on them.
juraj-google-style
def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"): if kernel_size == 1: return dense(inputs, filters, name=name, use_bias=True) if padding == "SAME": assert kernel_size % 2 == 1 first_offset = -((kernel_size - 1) else: assert padding == "LEFT" first_offset = -(kernel_size - 1) last_offset = first_offset + kernel_size - 1 results = [] padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]]) for i in range(kernel_size): shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs shifted.set_shape(inputs.get_shape()) results.append( dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i)) ret = tf.add_n(results) ret *= kernel_size**-0.5 return ret
Version of conv1d that works on TPU (as of 11/2017). Args: inputs: a Tensor with shape [batch, length, input_depth]. filters: an integer. kernel_size: an integer. padding: a string - "SAME" or "LEFT". name: a string. Returns: a Tensor with shape [batch, length, filters].
juraj-google-style
def report_proto_path(self, trace_dir, summary_tag_name): filename = _TT_REPORT_PROTO + '.' + summary_tag_name.replace('/', '_') return os.path.join(trace_dir, filename)
Returns the path where report proto should be written. Args: trace_dir: String denoting the trace directory. summary_tag_name: Name of the unique tag that relates to the report. Returns: A string denoting the path to the report proto.
github-repos
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None): total_cm = metric_variable([num_classes, num_classes], dtypes.float64, name='total_confusion_matrix') predictions = math_ops.cast(predictions, dtypes.int64) labels = math_ops.cast(labels, dtypes.int64) num_classes = math_ops.cast(num_classes, dtypes.int64) if predictions.get_shape().ndims > 1: predictions = array_ops.reshape(predictions, [-1]) if labels.get_shape().ndims > 1: labels = array_ops.reshape(labels, [-1]) if weights is not None and weights.get_shape().ndims > 1: weights = array_ops.reshape(weights, [-1]) current_cm = confusion_matrix.confusion_matrix(labels, predictions, num_classes, weights=weights, dtype=dtypes.float64) update_op = state_ops.assign_add(total_cm, current_cm) return (total_cm, update_op)
Calculate a streaming confusion matrix. Calculates a confusion matrix. For estimation over a stream of data, the function creates an `update_op` operation. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: total_cm: A `Tensor` representing the confusion matrix. update_op: An operation that increments the confusion matrix.
github-repos
def __init__(self, json, Api): self.json = json self.Api = Api self.type = json["type"] self.content = json["content"] self.timestamp = json["origin_server_ts"] self.id = json["room_id"] if "sender" in json: self.mxid = json["sender"] else: self.mxid = json["user_id"]
Instantiates Event instance. Args: json(dict): Event json from homeserver. Api(func): Creates api for calling homeserver.
juraj-google-style
def run(self): target = getattr(self, '_Thread__target', getattr(self, '_target', None)) args = getattr(self, '_Thread__args', getattr(self, '_args', None)) kwargs = getattr(self, '_Thread__kwargs', getattr(self, '_kwargs', None)) if (target is not None): self._return = target(*args, **kwargs) return None
Runs the thread. Args: self (ThreadReturn): the ``ThreadReturn`` instance Returns: ``None``
codesearchnet
def CreateClass(cls, data_type_definition): cls._ValidateDataTypeDefinition(data_type_definition) class_definition = cls._CreateClassTemplate(data_type_definition) namespace = {'__builtins__': {'object': builtins.object, 'super': builtins.super}, '__name__': '{0:s}'.format(data_type_definition.name)} if (sys.version_info[0] >= 3): namespace['__builtins__']['__build_class__'] = builtins.__build_class__ exec(class_definition, namespace) return namespace[data_type_definition.name]
Creates a new structure values class. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: class: structure values class.
codesearchnet
def declaration_path(decl): if not decl: return [] if not decl.cache.declaration_path: result = [decl.name] parent = decl.parent while parent: if parent.cache.declaration_path: result.reverse() decl.cache.declaration_path = parent.cache.declaration_path + \ result return decl.cache.declaration_path else: result.append(parent.name) parent = parent.parent result.reverse() decl.cache.declaration_path = result return result return decl.cache.declaration_path
Returns a list of parent declarations names. Args: decl (declaration_t): declaration for which declaration path should be calculated. Returns: list[(str | basestring)]: list of names, where first item is the top parent name and last item the inputted declaration name.
juraj-google-style
def _collect_unused(self, start: GridQubit, used: Set[GridQubit]) -> Set[GridQubit]: def collect(n: GridQubit, visited: Set[GridQubit]): visited.add(n) for m in self._c_adj[n]: if m not in used and m not in visited: collect(m, visited) visited = set() collect(start, visited) return visited
Lists all the qubits that are reachable from given qubit. Args: start: The first qubit for which connectivity should be calculated. Might be a member of used set. used: Already used qubits, which cannot be used during the collection. Returns: Set of qubits that are reachable from starting qubit without traversing any of the used qubits.
juraj-google-style
def scaled_dot_product_attention_simple(q, k, v, bias, name=None): with tf.variable_scope( name, default_name="scaled_dot_product_attention_simple"): scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2])) logits = tf.matmul(q * scalar, k, transpose_b=True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") if common_layers.should_generate_summaries(): tf.summary.image( "attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1) return tf.matmul(weights, v)
Scaled dot-product attention. One head. One spatial dimension. Args: q: a Tensor with shape [batch, length_q, depth_k] k: a Tensor with shape [batch, length_kv, depth_k] v: a Tensor with shape [batch, length_kv, depth_v] bias: optional Tensor broadcastable to [batch, length_q, length_kv] name: an optional string Returns: A Tensor.
juraj-google-style
def _validated_config_filename(self, name): dir_name = self._make_config_dir() filename = os.path.join(dir_name, (name.split('.json')[0] + '.json')) return filename
Make config dir and return full file path and extension Args: name (str): Filename without dir or extension Returns: str: Full path including extension
codesearchnet
def select_by_value(self, value): self._selected_key = None self._selected_item = None for k in self.children: item = self.children[k] item.attributes['selected'] = False if (value == item.get_value()): self._selected_key = k self._selected_item = item self._selected_item.attributes['selected'] = True
Selects an item by the text content of the child. Args: value (str): Text content of the item that have to be selected.
codesearchnet
def market_close(self, session, mins) -> Session: if (session not in self.exch): return SessNA end_time = self.exch[session][(- 1)] return Session(shift_time(end_time, ((- int(mins)) + 1)), end_time)
Time intervals for market close Args: session: [allday, day, am, pm, night] mins: mintues before close Returns: Session of start_time and end_time
codesearchnet
def HandleGetBlocksMessageReceived(self, payload): if not self.leader.ServiceEnabled: return inventory = IOHelper.AsSerializableWithType(payload, 'neo.Network.Payloads.GetBlocksPayload.GetBlocksPayload') if not inventory: return blockchain = BC.Default() hash = inventory.HashStart[0] if not blockchain.GetHeader(hash): return hashes = [] hcount = 0 while hash != inventory.HashStop and hcount < 500: hash = blockchain.GetNextBlockHash(hash) if hash is None: break hashes.append(hash) hcount += 1 if hcount > 0: self.SendSerializedMessage(Message('inv', InvPayload(type=InventoryType.Block, hashes=hashes)))
Process a GetBlocksPayload payload. Args: payload (neo.Network.Payloads.GetBlocksPayload):
juraj-google-style
def setup_formatters(self, *args): formatters = [] col_offset = 0 if self.rownum: formatters.append(fmt.RowNumberFormatter.setup(0)) col_offset += 1 if self.timestamp: formatters.append(fmt.DatetimeFormatter.setup( datetime.datetime.now(), fmt='{:%Y-%m-%d %H:%M:%S.%f}'.format, col_width=26)) col_offset += 1 if self.time_diff: formatters.append(fmt.TimeDeltaFormatter.setup(0)) col_offset += 1 for coli, value in enumerate(args): fmt_class = type2fmt.get(type(value), fmt.GenericFormatter) kwargs = {} if self.default_colwidth is not None: kwargs['col_width'] = self.default_colwidth if coli in self.column_widths: kwargs['col_width'] = self.column_widths[coli] elif self.columns and self.columns[coli + col_offset] in self.column_widths: kwargs['col_width'] = self.column_widths[self.columns[coli + col_offset]] if fmt_class == fmt.FloatFormatter and self.float_format is not None: kwargs['fmt'] = self.float_format if coli in self.column_formatters: kwargs['fmt'] = self.column_formatters[coli] elif self.columns and self.columns[coli + col_offset] in self.column_formatters: kwargs['fmt'] = self.column_formatters[self.columns[coli + col_offset]] formatter = fmt_class.setup(value, **kwargs) formatters.append(formatter) self.formatters = formatters
Setup formatters by observing the first row. Args: *args: row cells
juraj-google-style
def _is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length) return tf.reduce_all(tf.equal(tf.cast(mask, dtype='bool'), tf.cast(right_padded_mask, dtype='bool')))
Check the mask tensor and see if it right padded. cuDNN uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then cuDNN won't work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] Returns: boolean scalar tensor, whether the mask is strictly right padded.
github-repos
def transform(self, tables, table_metas=None, missing=None): if (missing is None): missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('transform'), DeprecationWarning) transformed = {} for table_name in tables: table = tables[table_name] if (table_metas is None): table_meta = self.table_dict[table_name][1] else: table_meta = table_metas[table_name] transformed[table_name] = self.transform_table(table, table_meta) return transformed
Apply all the saved transformers to `tables`. Args: tables(dict): mapping of table names to `tuple` where each tuple is on the form (`pandas.DataFrame`, `dict`). The `DataFrame` contains the table data and the `dict` the corresponding meta information. If not specified, the tables will be retrieved using the meta_file. table_metas(dict): Full metadata file for the dataset. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).
codesearchnet
def file_digest(source): hash_sha256 = hashlib.sha256() should_close = False if isinstance(source, six.string_types): should_close = True source = open(source, 'rb') for chunk in iter(lambda: source.read(_BUFFER_SIZE), b''): hash_sha256.update(chunk) if should_close: source.close() return hash_sha256.hexdigest()
Calculates SHA256 digest of a file. Args: source: either a file-like object or a path to file
juraj-google-style
def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1): def get_gpu_info(): gpu_info = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu"]).decode() gpu_info = gpu_info.split('\n') gpu_info_array = [] for line in gpu_info: if len(line) > 0: gpu_id, total_memory, free_memory, used_memory, gpu_util = line.split(',') gpu_memory_util = float(used_memory) / float(total_memory) gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id)) return(gpu_info_array) num_times_to_average = 5 current_array = [] for ind in range(num_times_to_average): current_array.append(get_gpu_info()) time.sleep(1) num_gpus = len(current_array[0]) avg_array = [(0, 0, str(x)) for x in range(num_gpus)] for ind in range(num_times_to_average): for gpu_ind in range(num_gpus): avg_array[gpu_ind] = (avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0], avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1], avg_array[gpu_ind][2]) for gpu_ind in range(num_gpus): avg_array[gpu_ind] = (float(avg_array[gpu_ind][0]) / num_times_to_average, float(avg_array[gpu_ind][1]) / num_times_to_average, avg_array[gpu_ind][2]) avg_array.sort() gpus_found = 0 gpus_to_use = "" free_memory = 1.0 for current_gpu in avg_array: if current_gpu[0] < max_gpu_utilization and (1 - current_gpu[1]) > min_free_memory: if gpus_found == 0: gpus_to_use = current_gpu[2] free_memory = 1 - current_gpu[1] else: gpus_to_use = gpus_to_use + "," + current_gpu[2] free_memory = min(free_memory, 1 - current_gpu[1]) gpus_found = gpus_found + 1 if gpus_found == num_gpu: break return gpus_to_use, free_memory
Get available GPUs according to utilization thresholds. Args: :max_gpu_utilization: percent utilization threshold to consider a GPU "free" :min_free_memory: percent free memory to consider a GPU "free" :num_gpu: number of requested GPUs Returns: A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory is the lowest amount of free memory available on the available_gpus.
juraj-google-style
def buid(valu=None): if valu is None: return os.urandom(32) byts = s_msgpack.en(valu) return hashlib.sha256(byts).digest()
A binary GUID like sequence of 32 bytes. Args: valu (object): Optional, if provided, the hash of the msgpack encoded form of the object is returned. This can be used to create stable buids. Notes: By default, this returns a random 32 byte value. Returns: bytes: A 32 byte value.
juraj-google-style
def daylight_saving_start_day(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `daylight_saving_start_day`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `daylight_saving_start_day`') self._daylight_saving_start_day = value
Corresponds to IDD Field `daylight_saving_start_day` Args: value (str): value for IDD Field `daylight_saving_start_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def get_images_by_catid(self, catid): self.logger.debug('Retrieving IDAHO metadata') footprint = self.catalog.get_strip_footprint_wkt(catid) try: footprint = from_wkt(footprint).geoms[0].wkt except: pass if not footprint: self.logger.debug( % catid) return None return self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=footprint)
Retrieves the IDAHO image records associated with a given catid. Args: catid (str): The source catalog ID from the platform catalog. Returns: results (json): The full catalog-search response for IDAHO images within the catID.
juraj-google-style
def _indent(lines, prefix=" "): indented = [] for line in lines.split("\n"): indented.append(prefix + line) return "\n".join(indented)
Indent some text. Note that this is present as ``textwrap.indent``, but not in Python 2. Args: lines (str): The newline delimited string to be indented. prefix (Optional[str]): The prefix to indent each line with. Default to two spaces. Returns: str: The newly indented content.
juraj-google-style