code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def name(self, name): self._data['name'] = name request = self._base_request request['name'] = name return self._tc_requests.update(request, owner=self.owner)
Updates the security labels name. Args: name:
codesearchnet
def __init__(self, mtf_graph, mesh_shape): self._splittable_mtf_dimension_names = self._initialize_splittable_dimensions( mtf_graph) self._mtf_dimension_name_to_size_gcd = ( self._initialize_mtf_dimension_name_to_size_gcd(mtf_graph)) self._mesh_dimension_name_to_size = self._initialize_mesh_dimension_name_to_size( mesh_shape)
Initializer. Args: mtf_graph: an mtf.Graph, representing the Mesh TensorFlow computation of interest. mesh_shape: an mtf.Shape, representing the mesh of interest.
juraj-google-style
def RetrievePluginAsset(self, run, plugin_name, asset_name): accumulator = self.GetAccumulator(run) return accumulator.RetrievePluginAsset(plugin_name, asset_name)
Return the contents for a specific plugin asset from a run. Args: run: The string name of the run. plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
codesearchnet
def ScanForFileSystem(self, source_path_spec): if source_path_spec.type_indicator == ( definitions.TYPE_INDICATOR_APFS_CONTAINER): return path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_APFS, location='/', parent=source_path_spec) try: type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators( source_path_spec, resolver_context=self._resolver_context) except RuntimeError as exception: raise errors.BackEndError(( 'Unable to process source path specification with error: ' '{0!s}').format(exception)) if not type_indicators: return None type_indicator = type_indicators[0] if len(type_indicators) > 1: if definitions.PREFERRED_NTFS_BACK_END not in type_indicators: raise errors.BackEndError( 'Unsupported source found more than one file system types.') type_indicator = definitions.PREFERRED_NTFS_BACK_END if type_indicator == definitions.TYPE_INDICATOR_NTFS: root_location = '\\' else: root_location = '/' file_system_path_spec = path_spec_factory.Factory.NewPathSpec( type_indicator, location=root_location, parent=source_path_spec) if type_indicator == definitions.TYPE_INDICATOR_TSK: try: file_system = resolver.Resolver.OpenFileSystem( file_system_path_spec, resolver_context=self._resolver_context) file_system.Close() except errors.BackEndError: file_system_path_spec = None return file_system_path_spec
Scans the path specification for a supported file system format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: file system path specification or None if no supported file system type was found. Raises: BackEndError: if the source cannot be scanned or more than one file system type is found.
juraj-google-style
def get(self, network_id, *args, **kwargs): return self.prepare_model(self.client.api.inspect_network(network_id, *args, **kwargs))
Get a network by its ID. Args: network_id (str): The ID of the network. verbose (bool): Retrieve the service details across the cluster in swarm mode. scope (str): Filter the network by scope (``swarm``, ``global`` or ``local``). Returns: (:py:class:`Network`) The network. Raises: :py:class:`docker.errors.NotFound` If the network does not exist. :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def el_to_path_vector(el): path = [] while el.parent: path.append(el) el = el.parent return list(reversed((path + [el])))
Convert `el` to vector of foregoing elements. Attr: el (obj): Double-linked HTMLElement instance. Returns: list: HTMLElements which considered as path from root to `el`.
codesearchnet
def find_replace(obj, find, replace): try: if isinstance(obj, dict): return {find_replace(key, find, replace): find_replace(value, find, replace) for (key, value) in obj.items()} elif isinstance(obj, list): return [find_replace(element, find, replace) for element in obj] elif (obj == find): return unicode_convert(replace) else: try: return unicode_convert(find_replace_string(obj, find, replace)) except: return unicode_convert(obj) except: (line, filename, synerror) = trace() raise ArcRestHelperError({'function': 'find_replace', 'line': line, 'filename': filename, 'synerror': synerror}) finally: pass
Searches an object and performs a find and replace. Args: obj (object): The object to iterate and find/replace. find (str): The string to search for. replace (str): The string to replace with. Returns: object: The object with replaced strings.
codesearchnet
def contains(self, time: datetime.datetime, inclusive: bool=True) -> bool: if inclusive: return (self.start <= time <= self.end) else: return (self.start < time < self.end)
Does the interval contain a momentary time? Args: time: the ``datetime.datetime`` to check inclusive: use inclusive rather than exclusive range checks?
codesearchnet
def _apply(self, ctx: ExtensionContext) -> Any: _, external_path = ctx.node return ctx.mentor.load_yaml(self.locator( external_path, cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None ))
Loads a yaml fragment from an external file. Args: ctx: The processing context. Returns: The external resource as a python dictionary. The fragment is already send through the processor as well.
juraj-google-style
def run_cmd(self, *args, **kwargs): timeout = kwargs.pop('timeout', None) p = self.raw_cmd(*args, **kwargs) return p.communicate(timeout=timeout)[0].decode('utf-8').replace('\r\n', '\n')
Unix style output, already replace \r\n to \n Args: - timeout (float): timeout for a command exec
codesearchnet
def _FormatHostname(self, event): hostname = self._output_mediator.GetHostname(event) return self._FormatField(hostname)
Formats the hostname. Args: event (EventObject): event. Returns: str: formatted hostname field.
juraj-google-style
def service_headline(self, short_name): if (short_name not in self.services): raise ArgumentError('Unknown service name', short_name=short_name) return self.services[short_name]['state'].headline
Get the headline stored for a service. Args: short_name (string): The short name of the service to get messages for Returns: ServiceMessage: the headline or None if there is no headline
codesearchnet
def eval(self, session=None, feed_dict=None): return self.value.eval(session=session, feed_dict=feed_dict)
In a session, computes and returns the value of this random variable. This is not a graph construction method, it does not add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. Args: session: tf.BaseSession. The `tf.Session` to use to evaluate this random variable. If none, the default session is used. feed_dict: dict. A dictionary that maps `tf.Tensor` objects to feed values. See `tf.Session.run()` for a description of the valid feed values. Returns: Value of the random variable. #### Examples ```python x = Normal(0.0, 1.0) with tf.Session() as sess: # Usage passing the session explicitly. print(x.eval(sess)) # Usage with the default session. The 'with' block # above makes 'sess' the default session. print(x.eval()) ```
codesearchnet
def inquire(self, name=True, lifetime=True, usage=True, mechs=True): res = rcreds.inquire_cred(self, name, lifetime, usage, mechs) if (res.name is not None): res_name = names.Name(res.name) else: res_name = None return tuples.InquireCredResult(res_name, res.lifetime, res.usage, res.mechs)
Inspect these credentials for information This method inspects these credentials for information about them. Args: name (bool): get the name associated with the credentials lifetime (bool): get the remaining lifetime for the credentials usage (bool): get the usage for the credentials mechs (bool): get the mechanisms associated with the credentials Returns: InquireCredResult: the information about the credentials, with None used when the corresponding argument was False Raises: MissingCredentialsError InvalidCredentialsError ExpiredCredentialsError
codesearchnet
def get_storage_usage(access_token, subscription_id, location): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Storage/locations/', location, '/usages', '?api-version=', STORAGE_API]) return do_get(endpoint, access_token)
Returns storage usage and quota information for the specified subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of storage account usage.
juraj-google-style
def get_structure_from_name(self, structure_name): return next((st for st in self.structures if st.name == structure_name), None)
Return a structure from a name Args: structure_name (str): name of the structure Returns: Structure
juraj-google-style
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): split_image = images_kwargs.get('split_image', None) or self.split_image max_image_size = images_kwargs.get('max_image_size', None) or self.max_image_size resized_height, resized_width = select_best_resolution((height, width), self.split_resolutions) num_patches = 1 if not split_image else resized_height return num_patches
A utility that returns number of image patches for a given image size. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. images_kwargs (`dict`, *optional*) Any kwargs to override defaults of the image processor. Returns: `int`: Number of patches per image.
github-repos
def ricker(f, length, dt): t = np.linspace((- int((length / 2))), int(((length - dt) / 2)), int((length / dt))) y = ((1.0 - (((2.0 * (np.pi ** 2)) * (f ** 2)) * (t ** 2))) * np.exp((((- (np.pi ** 2)) * (f ** 2)) * (t ** 2)))) return (t, y)
A Ricker wavelet. Args: f (float): frequency in Haz, e.g. 25 Hz. length (float): Length in s, e.g. 0.128. dt (float): sample interval in s, e.g. 0.001. Returns: tuple. time basis, amplitude values.
codesearchnet
def debug_op(self): return self._debug_op
Name of the debug op. Returns: (`str`) debug op name (e.g., `DebugIdentity`).
github-repos
def _process_for_docstring(self, node, node_type): if node.doc is not None: if node_type == 'module': if not node.body: for key in list(self._tokenized_triple_quotes.keys()): quote_record = self._tokenized_triple_quotes.get(key) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[key] else: for i in range(0, node.body[0].lineno): quote_record = self._tokenized_triple_quotes.get(i) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[i] break else: if not node.body: lineno = self._find_docstring_line_for_no_body(node.fromlineno) quote_record = self._tokenized_triple_quotes.get(lineno) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[lineno] else: doc_row = self._find_docstring_line(node.fromlineno, node.tolineno) quote_record = self._tokenized_triple_quotes.get(doc_row) if quote_record: self._check_docstring_quotes(quote_record) del self._tokenized_triple_quotes[doc_row]
Check for docstring quote consistency. Args: node: the AST node being visited. node_type: the type of node being operated on.
juraj-google-style
def _handle_error_response(response_body): try: error_data = json.loads(response_body) error_details = '{}: {}'.format(error_data['error'], error_data.get('error_description')) except (KeyError, ValueError): error_details = response_body raise exceptions.RefreshError(error_details, response_body)
Translates an error response into an exception. Args: response_body (str): The decoded response data. Raises: google.auth.exceptions.RefreshError
codesearchnet
def _serialize_scalar_from_string_representation_factory(type_name, types, str_func=str): def serialize(ion_event): value = ion_event.value validate_scalar_value(value, types) return six.b(str_func(value)) serialize.__name__ = '_serialize_' + type_name return serialize
Builds functions that leverage Python ``str()`` or similar functionality. Args: type_name (str): The name of the Ion type. types (Union[Sequence[type],type]): The Python types to validate for. str_func (Optional[Callable]): The function to convert the value with, defaults to ``str``. Returns: function: The function for serializing scalars of a given type to Ion text bytes.
juraj-google-style
def build_markdown_table(headers, rows, row_keys=None): row_maxes = _find_row_maxes(headers, rows) row_keys = (row_keys or [key for (key, value) in headers.items()]) table = [_build_row(headers, row_maxes, row_keys), _build_separator(row_maxes, row_keys)] for row in rows: table.append(_build_row(row, row_maxes, row_keys)) return ('\n'.join(table) + '\n')
Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string.
codesearchnet
def pre_scan(self, func=operator.add, seed=0): if self.closed(): raise ValueError('Attempt to call pre_scan() on a closed Queryable.') if (not is_callable(func)): raise TypeError('pre_scan() parameter func={0} is not callable'.format(repr(func))) return self._create(self._generate_pre_scan_result(func, seed))
An exclusive prefix sum which returns the cumulative application of the supplied function up to but excluding the current element. Args: func: An optional binary function which is commutative - that is, the order of the arguments is unimportant. Defaults to a summing operator. seed: The first element of the prefix sum and therefore also the first element of the returned sequence. Returns: A Queryable such that the nth element is the sum of the first n-1 elements of the source sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If func is not callable.
codesearchnet
def serializable_value(self, obj): value = self.__get__(obj, obj.__class__) return self.property.serialize_value(value)
Produce the value as it should be serialized. Sometimes it is desirable for the serialized value to differ from the ``__get__`` in order for the ``__get__`` value to appear simpler for user or developer convenience. Args: obj (HasProps) : the object to get the serialized attribute for Returns: JSON-like
juraj-google-style
def get_glob(path): if isinstance(path, str): return glob.glob(path, recursive=True) if isinstance(path, os.PathLike): return glob.glob(str(path), recursive=True) elif isinstance(path, (list, tuple)): return list(chain.from_iterable((glob.glob(str(p), recursive=True) for p in path))) else: raise TypeError(f"path should be string, path-like or a list. Instead, it's a {type(path)}")
Process the input path, applying globbing and formatting. Do note that this will returns files AND directories that match the glob. No tilde expansion is done, but *, ?, and character ranges expressed with [] will be correctly matched. Escape all special characters ('?', '*' and '['). For a literal match, wrap the meta-characters in brackets. For example, '[?]' matches the character '?'. If passing in an iterable of paths, will expand matches for each path in the iterable. The function will return all the matches for each path glob expression combined into a single list. Args: path: Path-like string, or iterable (list or tuple ) of paths. Returns: Combined list of paths found for input glob.
codesearchnet
def format_map(self, format_string, mapping): return self.vformat(format_string, args=None, kwargs=mapping)
format a string by a map Args: format_string(str): A format string mapping(dict): A map to format the string Returns: A formatted string. Raises: KeyError: if key is not provided by the given map.
juraj-google-style
def _best_subset(self, n_qubits): if (n_qubits == 1): return np.array([0]) device_qubits = self.coupling_map.size() cmap = np.asarray(self.coupling_map.get_edges()) data = np.ones_like(cmap[(:, 0)]) sp_cmap = sp.coo_matrix((data, (cmap[(:, 0)], cmap[(:, 1)])), shape=(device_qubits, device_qubits)).tocsr() best = 0 best_map = None for k in range(sp_cmap.shape[0]): bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False, return_predecessors=False) connection_count = 0 sub_graph = [] for i in range(n_qubits): node_idx = bfs[i] for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[(node_idx + 1)]): node = sp_cmap.indices[j] for counter in range(n_qubits): if (node == bfs[counter]): connection_count += 1 sub_graph.append([node_idx, node]) break if (connection_count > best): best = connection_count best_map = bfs[0:n_qubits] mapping = {} for edge in range(best_map.shape[0]): mapping[best_map[edge]] = edge new_cmap = [[mapping[c[0]], mapping[c[1]]] for c in sub_graph] rows = [edge[0] for edge in new_cmap] cols = [edge[1] for edge in new_cmap] data = ([1] * len(rows)) sp_sub_graph = sp.coo_matrix((data, (rows, cols)), shape=(n_qubits, n_qubits)).tocsr() perm = cs.reverse_cuthill_mckee(sp_sub_graph) best_map = best_map[perm] return best_map
Computes the qubit mapping with the best connectivity. Args: n_qubits (int): Number of subset qubits to consider. Returns: ndarray: Array of qubits to use for best connectivity mapping.
codesearchnet
def setErrorHandler(self, errorhandler): class ErrorHandlerWrapper(ErrorHandler): def __init__(self, errorhandler): self.errorhandler = errorhandler self.last_exception = None def error(self, exception): if isinstance(exception, amplpython.AMPLException): exception = AMPLException(exception) try: self.errorhandler.error(exception) except Exception as e: self.last_exception = e def warning(self, exception): if isinstance(exception, amplpython.AMPLException): exception = AMPLException(exception) try: self.errorhandler.warning(exception) except Exception as e: self.last_exception = e def check(self): if (self.last_exception is not None): (e, self.last_exception) = (self.last_exception, None) raise e errorhandler_wrapper = ErrorHandlerWrapper(errorhandler) class InnerErrorHandler(amplpython.ErrorHandler): def error(self, exception): errorhandler_wrapper.error(exception) def warning(self, exception): errorhandler_wrapper.warning(exception) self._errorhandler = errorhandler self._errorhandler_inner = InnerErrorHandler() self._errorhandler_wrapper = errorhandler_wrapper lock_and_call((lambda : self._impl.setErrorHandler(self._errorhandler_inner)), self._lock)
Sets a new error handler. Args: errorhandler: The object handling AMPL errors and warnings.
codesearchnet
class XSoftmax(torch.autograd.Function): @staticmethod def forward(ctx, input, mask, dim): ctx.dim = dim rmask = ~mask.to(torch.bool) output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) output = torch.softmax(output, ctx.dim) output.masked_fill_(rmask, 0) ctx.save_for_backward(output) return output @staticmethod def backward(ctx, grad_output): output, = ctx.saved_tensors inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output) return (inputGrad, None, None) @staticmethod def symbolic(g, self, mask, dim): import torch.onnx.symbolic_helper as sym_help from torch.onnx.symbolic_opset9 import masked_fill, softmax mask_cast_value = g.op('Cast', mask, to_i=sym_help.cast_pytorch_to_onnx['Long']) r_mask = g.op('Cast', g.op('Sub', g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), to_i=sym_help.cast_pytorch_to_onnx['Bool']) output = masked_fill(g, self, r_mask, g.op('Constant', value_t=torch.tensor(torch.finfo(self.type().dtype()).min))) output = softmax(g, output, dim) return masked_fill(g, output, r_mask, g.op('Constant', value_t=torch.tensor(0, dtype=torch.bool)))
Masked Softmax which is optimized for saving memory Args: input (`torch.tensor`): The input tensor that will apply softmax. mask (`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation. dim (int): The dimension that will apply softmax Example: ```python >>> import torch >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax >>> # Make a tensor >>> x = torch.randn([4, 20, 100]) >>> # Create a mask >>> mask = (x > 0).int() >>> # Specify the dimension to apply softmax >>> dim = -1 >>> y = XSoftmax.apply(x, mask, dim) ```
github-repos
def _compute_template(val: BaseValue) -> Sequence[BaseValue]: if isinstance(val, _abstract.PyTDClass): return [val.ctx.convert.constant_to_value(itm.type_param) for itm in val.pytd_cls.template] elif not isinstance(val, _abstract.InterpreterClass): return () bases = [abstract_utils.get_atomic_value(base, default=val.ctx.convert.unsolvable) for base in val.bases()] template = [] for base in bases: if base.full_name == 'typing.Generic': if isinstance(base, _abstract.PyTDClass): raise abstract_utils.GenericTypeError(val, 'Cannot inherit from plain Generic') if template: raise abstract_utils.GenericTypeError(val, 'Cannot inherit from Generic[...] multiple times') for item in base.template: param = base.formal_type_parameters.get(item.name) template.append(param.with_scope(val.full_name)) if template: for base in bases: if base.full_name != 'typing.Generic': if isinstance(base, _abstract.ParameterizedClass): for item in base.template: param = base.formal_type_parameters.get(item.name) if isinstance(base, _abstract.TypeParameter): t = param.with_scope(val.full_name) if t not in template: raise abstract_utils.GenericTypeError(val, 'Generic should contain all the type variables') else: seqs = [] for base in bases: if isinstance(base, _abstract.ParameterizedClass): seq = [] for item in base.template: param = base.formal_type_parameters.get(item.name) if isinstance(param, _abstract.TypeParameter): seq.append(param.with_scope(val.full_name)) seqs.append(seq) try: template.extend(mro.MergeSequences(seqs)) except ValueError as e: raise abstract_utils.GenericTypeError(val, f'Illegal type parameter order in class {val.name}') from e return template
Compute the precedence list of template parameters according to C3. 1. For the base class list, if it contains `typing.Generic`, then all the type parameters should be provided. That means we don't need to parse extra base class and then we can get all the type parameters. 2. If there is no `typing.Generic`, parse the precedence list according to C3 based on all the base classes. 3. If `typing.Generic` exists, it must contain at least one type parameters. And there is at most one `typing.Generic` in the base classes. Report error if the check fails. Args: val: The abstract.BaseValue to compute a template for. Returns: parsed type parameters Raises: GenericTypeError: if the type annotation for generic type is incorrect
github-repos
def GetSystemConfigurationArtifact(self, session_identifier=CURRENT_SESSION): system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.code_page = self.GetValue('codepage', default_value=self._codepage) system_configuration.hostname = self._hostnames.get(session_identifier, None) system_configuration.keyboard_layout = self.GetValue('keyboard_layout') system_configuration.operating_system = self.GetValue('operating_system') system_configuration.operating_system_product = self.GetValue('operating_system_product') system_configuration.operating_system_version = self.GetValue('operating_system_version') date_time = datetime.datetime(2017, 1, 1) time_zone = self._time_zone.tzname(date_time) if (time_zone and isinstance(time_zone, py2to3.BYTES_TYPE)): time_zone = time_zone.decode('ascii') system_configuration.time_zone = time_zone user_accounts = self._user_accounts.get(session_identifier, {}) system_configuration.user_accounts = list(user_accounts.values()) return system_configuration
Retrieves the knowledge base as a system configuration artifact. Args: session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. Returns: SystemConfigurationArtifact: system configuration artifact.
codesearchnet
def retry(retries=0, delay=timedelta(), conditions=[]): delay_in_seconds = delay.total_seconds() def decorator(function): '\n The actual decorator for retrying.\n ' @wraps(function) def wrapper(*args, **kwargs): '\n The actual wrapper for retrying.\n ' func = partial(function, *args, **kwargs) return retry_loop(retries, delay_in_seconds, conditions, func) return wrapper return decorator
A decorator for making a function that retries on failure. Args: retries (Integral): The number of times to retry if a failure occurs. delay (timedelta, optional, 0 seconds): A timedelta representing the amount of time to delay between retries. conditions (list): A list of retry conditions.
codesearchnet
def build(cls, **kwargs): return cls.add(cls.new(**kwargs), commit=False)
Similar to create. But the transaction is not committed Args: **kwargs : The keyword arguments for the constructor Returns: A model instance which has been added to db session. But session transaction has not been committed yet.
codesearchnet
def run_docker(self, commands): try: import docker except ImportError: print( '{}{}Could not import docker module (try "pip install docker").'.format( c.Style.BRIGHT, c.Fore.RED ) ) sys.exit(1) app_args_data = self.profile.get('profile_args').data install_json = self.profile.get('install_json') client = docker.from_env() app_dir = os.getcwd() ports = {} if self.args.vscd: ports = {'{}/tcp'.format(self.args.vscd_port): self.args.vscd_port} volumes = {} in_path = '{}/{}'.format(app_dir, app_args_data.get('tc_in_path')) if app_args_data.get('tc_in_path') is not None: volumes[in_path] = {'bind': in_path} log_path = '{}/{}'.format(app_dir, app_args_data.get('tc_log_path')) if app_args_data.get('tc_log_path') is not None: volumes[log_path] = {'bind': log_path} out_path = '{}/{}'.format(app_dir, app_args_data.get('tc_out_path')) if app_args_data.get('tc_out_path') is not None: volumes[out_path] = {'bind': out_path} temp_path = '{}/{}'.format(app_dir, app_args_data.get('tc_temp_path')) if app_args_data.get('tc_temp_path') is not None: volumes[temp_path] = {'bind': temp_path} volumes[app_dir] = {'bind': app_dir} if self.args.docker_image is not None: docker_image = self.args.docker_image else: docker_image = self.profile.get( 'dockerImage', install_json.get('dockerImage', self.docker_image) ) status_code = 1 try: self.container = client.containers.run( docker_image, entrypoint=commands.get('cli_command'), environment=['PYTHONPATH={}/lib_latest'.format(app_dir)], detach=True, ports=ports, remove=True, volumes=volumes, working_dir=app_dir, ) results = self.container.wait() status_code = results.get('StatusCode') error = results.get('Error') if error: print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, error)) except Exception as e: print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e)) sys.exit() return self.run_exit_code(status_code)
Run App in Docker Container. Args: commands (dict): A dictionary of the CLI commands. Returns: int: The exit code of the subprocess command.
juraj-google-style
def get_assets(cls, lat, lon, begin=None, end=None): instance = cls('planetary/earth/assets') filters = { 'lat': lat, 'lon': lon, 'begin': begin, 'end': end, } return instance.get_resource(**filters)
Returns date and ids of flyovers Args: lat: latitude float lon: longitude float begin: date instance end: date instance Returns: json
juraj-google-style
def __init__(self, redir_file, to_file): self.redir_file = redir_file self._from_fd = redir_file.fileno() self._to_fd = to_file.fileno() self.orig_file = os.fdopen(os.dup(self._from_fd), 'wb', 0)
Constructor Args: redir_file: (file) The file object to redirect to_file: (file) The file object `redir_file` should be redirected to.
juraj-google-style
def clean(exclude): pretend = context.get('pretend', False) exclude = list(exclude) + conf.get('clean.exclude', []) clean_patterns = conf.get('clean.patterns', [ '*__pycache__*', '*.py[cod]', '*.swp', ]) num_files = 0 with util.timed_block() as t: files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude) for path in files: try: num_files += 1 if not isdir(path): log.info(' <91>[file] <90>{}', path) not pretend and os.remove(path) else: log.info(' <91>[dir] <90>{}', path) not pretend and rmtree(path) except OSError: log.info("<33>Failed to remove <90>{}", path) if pretend: msg = "Would delete <33>{}<32> files. Took <33>{}<32>s" else: msg = "Deleted <33>{}<32> files in <33>{}<32>s" log.info(msg.format(num_files, t.elapsed_s))
Remove all unnecessary files. Args: pretend (bool): If set to **True**, do not delete any files, just show what would be deleted. exclude (list[str]): A list of path patterns to exclude from deletion.
juraj-google-style
def _RecurseOverObject(obj, factory, parent=None): if _IsSudsIterable(obj): copy_of_obj = tuple(obj) for item in copy_of_obj: if _IsSudsIterable(item): if 'xsi_type' in item: if isinstance(obj, tuple): parent[obj[0]] = _PackForSuds(obj[1], factory) else: obj.remove(item) obj.append(_PackForSuds(item, factory)) _RecurseOverObject(item, factory, obj)
Recurses over a nested structure to look for changes in Suds objects. Args: obj: A parameter for a SOAP request field which is to be inspected and will be packed for Suds if an xsi_type is specified, otherwise will be left unaltered. factory: The suds.client.Factory object which can create instances of the classes generated from the WSDL. parent: The parent object that contains the obj parameter to be inspected.
juraj-google-style
def run_graph(self, device, n, m, k, transpose_a, transpose_b, num_iters, dtype): graph = ops.Graph() with graph.as_default(): output = build_graph(device, n, m, k, transpose_a, transpose_b, dtype) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() for _ in range(500): session.run(output) start_time = time.time() for _ in range(num_iters): session.run(output) duration = time.time() - start_time num_items = n * m * k * 2 throughput = num_items * num_iters / duration / 1000000000.0 print('%s %s input_info:%s %d %.4fsec, %.4fGitems/s.' % (device, str(dtype), str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + '.tb:' + str(transpose_b), num_iters, duration, throughput)) name_template = 'matmul_{device}_{dtype}_input_info_{inputinfo}' self.report_benchmark(name=name_template.format(device=device, dtype=str(dtype).replace(' ', ''), inputinfo=str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + ',tb:' + str(transpose_b)).replace(' ', ''), iters=num_iters, wall_time=duration) return duration
Run the graph and print its execution time. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. num_iters: number of iterations to run the benchmark. dtype: numpy data type of the input tensor. Returns: The duration of the run in seconds.
github-repos
def find(lst, a, case_sensitive=True): a = force_list(a) if not case_sensitive: lst = [x.lower() for x in lst] a = [y.lower() for y in a] return [i for i, x in enumerate(lst) if x in a]
Return indices of a list which have elements that match an object or list of objects Args: lst: list of values a: object(s) to check equality case_sensitive: if the search should be case sensitive Returns: list: list of indicies of lst which equal a
juraj-google-style
def upsert_sweep(self, config): mutation = gql() def no_retry_400_or_404(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400 and e.response.status_code != 404: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'config': yaml.dump(config), 'description': config.get("description"), 'entityName': self.settings("entity"), 'projectName': self.settings("project")}, check_retry_fn=no_retry_400_or_404) return response['upsertSweep']['sweep']['name']
Upsert a sweep object. Args: config (str): sweep config (will be converted to yaml)
juraj-google-style
def __init__(self, name, property): super(BasicPropertyDescriptor, self).__init__(name) self.property = property self.__doc__ = self.property.__doc__
Create a PropertyDescriptor for basic Bokeh properties. Args: name (str) : The attribute name that this property is for property (Property) : A basic property to create a descriptor for
juraj-google-style
def _ungroup_and_make_mirrored(grouped_reduced, destinations, reduce_op, num_between_graph_workers=1): num_replicas = len(get_devices_from(destinations)) * num_between_graph_workers index = [[] for _ in range(len(grouped_reduced[0]))] for per_replica_reduced in grouped_reduced: for i, (v, _) in enumerate(per_replica_reduced): if reduce_op == reduce_util.ReduceOp.MEAN: with ops.device(v.device): index[i].append(v / num_replicas) else: index[i].append(v) return [distribute_utils.regroup(v, wrap_class=value_lib.Mirrored) for v in index]
Ungroup results from all-reduce and make Mirrored objects. Each all-reduce result will be divided by the number of destinations before Mirrored objects are created if reduce_op is "mean". Args: grouped_reduced: a list of lists, each sublist has components for each device, paired with a None. It is the result from cross_device_utils.aggregate_gradients_using*. destinations: a value to colocate the result with. reduce_op: Indicates how values will be aggregated. Accepted values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`. num_between_graph_workers: number of workers in the between-graph replication. Returns: a list of Mirrored objects.
github-repos
def primal_and_adjoint_for_tracing(self, node): primal_template = grads.primals[tracing.Traceable] adjoint_template = grads.adjoints[tracing.Traceable] to_pack = node.args target = ast_.copy_node(self.orig_target) vjp = quoting.quote(self.namer.unique(('%s_grad' % node.func.id))) tmp = create.create_temp(quoting.quote('tmp'), self.namer) assert (len(node.keywords) == 0) primal = template.replace(primal_template, namer=self.namer, result=target, fn=node.func, tmp=tmp, vjp=vjp, args=gast.Tuple(elts=to_pack, ctx=gast.Load())) dto_pack = gast.Tuple(elts=[create.create_temp_grad(arg, self.namer) for arg in to_pack], ctx=gast.Store()) adjoint = template.replace(adjoint_template, namer=self.namer, result=target, vjp=vjp, dargs=dto_pack) return (primal, adjoint)
Build the primal and adjoint of a traceable function. Args: node: ast.Call node of a function we wish to trace, instead of transform Returns: primal: new ast.Assign node to replace the original primal call adjoint: new ast.Assign node using the VJP generated in primal to calculate the adjoint.
codesearchnet
def has_no_flat_neurites(neuron, tol=0.1, method='ratio'): return CheckResult((len(get_flat_neurites(neuron, tol, method)) == 0))
Check that a neuron has no flat neurites Arguments: neuron(Neuron): The neuron object to test tol(float): tolerance method(string): way of determining flatness, 'tolerance', 'ratio' \ as described in :meth:`neurom.check.morphtree.get_flat_neurites` Returns: CheckResult with result
codesearchnet
def get(self, item): if item not in self._item_cache: try: item = self.__getitem__(item) except KeyError: item = None self._item_cache[item] = item return self._item_cache[item]
Get item through ``__getitem__`` and cache the result. Args: item (str): name of package or module. Returns: Package/Module: the corresponding object.
juraj-google-style
def start(self, input_data, output_data, transform_resources, **kwargs): self.transform_resources = transform_resources self.input_data = input_data self.output_data = output_data image = self.primary_container['Image'] instance_type = transform_resources['InstanceType'] instance_count = 1 environment = self._get_container_environment(**kwargs) self.container = _SageMakerContainer(instance_type, instance_count, image, self.local_session) self.container.serve(self.primary_container['ModelDataUrl'], environment) serving_port = (get_config_value('local.serving_port', self.local_session.config) or 8080) _wait_for_serving_container(serving_port) endpoint_url = ('http: (response, code) = _perform_request(endpoint_url) if (code == 200): execution_parameters = json.loads(response.read()) for setting in ('BatchStrategy', 'MaxPayloadInMB'): if ((setting not in kwargs) and (setting in execution_parameters)): kwargs[setting] = execution_parameters[setting] kwargs.update(self._get_required_defaults(**kwargs)) self.start_time = datetime.datetime.now() self.batch_strategy = kwargs['BatchStrategy'] if ('Environment' in kwargs): self.environment = kwargs['Environment'] self._perform_batch_inference(input_data, output_data, **kwargs) self.end_time = datetime.datetime.now() self.state = self._COMPLETED
Start the Local Transform Job Args: input_data (dict): Describes the dataset to be transformed and the location where it is stored. output_data (dict): Identifies the location where to save the results from the transform job transform_resources (dict): compute instances for the transform job. Currently only supports local or local_gpu **kwargs: additional arguments coming from the boto request object
codesearchnet
def create_saveable_object(name, key, factory, call_with_mapped_captures): if call_with_mapped_captures is None: return factory(name=key) if name == trackable_utils.SERIALIZE_TO_TENSORS_NAME: return factory(name=key, call_with_mapped_captures=call_with_mapped_captures) elif is_factory_for_restored_saveable_object(factory): concrete_save_fn = factory.keywords['save_function'] def save_fn(name): return call_with_mapped_captures(concrete_save_fn, [name]) concrete_restore_fn = factory.keywords['restore_function'] def restore_fn(*restored_tensors): return call_with_mapped_captures(concrete_restore_fn, restored_tensors) return factory(save_function=save_fn, restore_function=restore_fn, name=key) else: return factory(name=key)
Creates a SaveableObject while potentially in a different graph. When creating the frozen saver for SavedModel, the save and restore ops are placed in a separate graph. Since RestoredSaveableObject uses tf.functions to save and restore, the function captures must be mapped to the new graph. Args: name: Name of SaveableObject factory. key: Checkpoint key of this SaveableObject. factory: Factory method for creating the SaveableObject. call_with_mapped_captures: Helper that calls a tf.function while remapping the captures. Returns: a SaveableObject.
github-repos
def start_engine(self, **kwargs): self.current = WFCurrent(**kwargs) self.wf_state = {'in_external': False, 'finished': False} if not self.current.new_token: self.wf_state = self.current.wf_cache.get(self.wf_state) self.current.workflow_name = self.wf_state['name'] if 'subject' in self.wf_state: self.current.input['id'] = self.wf_state['subject'] self.current.task_data['object_id'] = self.wf_state['subject'] self.check_for_authentication() self.check_for_permission() self.workflow = self.load_or_create_workflow() if 'form' in self.current.input: form = self.current.input['form'] if 'form_name' in form: self.current.task_data[form['form_name']] = form start_init_values = self.workflow_spec.wf_properties.get('init', 'False') == 'True' if start_init_values: WFInit = get_object_from_path(settings.WF_INITIAL_VALUES)() WFInit.assign_wf_initial_values(self.current) log_msg = ("\n\n::::::::::: ENGINE STARTED :::::::::::\n" "\tWF: %s (Possible) TASK:%s\n" "\tCMD:%s\n" "\tSUBCMD:%s" % ( self.workflow.name, self.workflow.get_tasks(Task.READY), self.current.input.get('cmd'), self.current.input.get('subcmd'))) log.debug(log_msg) sys._zops_wf_state_log = log_msg self.current.workflow = self.workflow
Initializes the workflow with given request, response objects and diagram name. Args: session: input: workflow_name (str): Name of workflow diagram without ".bpmn" suffix. File must be placed under one of configured :py:attr:`~zengine.settings.WORKFLOW_PACKAGES_PATHS`
juraj-google-style
def _model_ready_for_local_init(self, sess: session.Session) -> Tuple[bool, Optional[str]]: return _ready(self._ready_for_local_init_op, sess, 'Model not ready for local init')
Checks if the model is ready to run local_init_op. Args: sess: A `Session`. Returns: A tuple (is_ready, msg), where is_ready is True if ready to run local_init_op and False otherwise, and msg is `None` if the model is ready to run local_init_op, a `String` with the reason why it is not ready otherwise.
github-repos
def list_objects(root_trackable): return util.list_objects(graph_view_lib.ObjectGraphView(root_trackable))
Traverse the object graph and list all accessible objects. Looks for `Trackable` objects which are dependencies of `root_trackable`. Includes slot variables only if the variable they are slotting for and the optimizer are dependencies of `root_trackable` (i.e. if they would be saved with a checkpoint). Args: root_trackable: A `Trackable` object whose dependencies should be flattened. Returns: A flat list of objects.
github-repos
def setZeroResettableKWH(self, password='00000000'): result = False self.setContext('setZeroResettableKWH') try: if (not self.requestA()): self.writeCmdMsg('Bad read CRC on setting') elif (not self.serialCmdPwdAuth(password)): self.writeCmdMsg('Password failure') else: req_str = '0157310230304433282903' req_str += self.calc_crc16(req_str[2:].decode('hex')) self.m_serial_port.write(req_str.decode('hex')) if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'): self.writeCmdMsg('Success: 06 returned.') result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext('') return result
Serial call to zero resettable kWh registers. Args: password (str): Optional password. Returns: bool: True on completion and ACK.
codesearchnet
def get_collection_ref(key) -> list[Any]: return get_default_graph().get_collection_ref(key)
Wrapper for `Graph.get_collection_ref()` using the default graph. See `tf.Graph.get_collection_ref` for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. Note that this returns the collection list itself, which can be modified in place to change the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility
github-repos
def containsParamSubset(self, params): for key in params.keys(): if (key not in self.params): return False if (params[key] != self.params[key]): return False return True
Test whether this element contains at least all `params`, or more. Args: params (dict/SpecialDict): Subset of parameters. Returns: bool: True if all `params` are contained in this element.
codesearchnet
def AddArguments(cls, argument_group): argument_group.add_argument( '--slice', metavar='DATE', dest='slice', type=str, default='', action='store', help=( 'Create a time slice around a certain date. This parameter, if ' 'defined will display all events that happened X minutes before ' 'and after the defined date. X is controlled by the parameter ' '--slice_size but defaults to 5 minutes.')) argument_group.add_argument( '--slice_size', '--slice-size', dest='slice_size', type=int, default=5, action='store', help=( 'Defines the slice size. In the case of a regular time slice it ' 'defines the number of minutes the slice size should be. In the ' 'case of the --slicer it determines the number of events before ' 'and after a filter match has been made that will be included in ' 'the result set. The default value is 5. See --slice or --slicer ' 'for more details about this option.')) argument_group.add_argument( '--slicer', dest='slicer', action='store_true', default=False, help=( 'Create a time slice around every filter match. This parameter, ' 'if defined will save all X events before and after a filter ' 'match has been made. X is defined by the --slice_size ' 'parameter.')) argument_group.add_argument( 'filter', nargs='?', action='store', metavar='FILTER', default=None, type=str, help=( 'A filter that can be used to filter the dataset before it ' 'is written into storage. More information about the filters ' 'and how to use them can be found here: {0:s}').format( cls._DOCUMENTATION_URL))
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
juraj-google-style
def transition_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> Dict[(str, TensorFluent)]: scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) return scope
Returns the complete transition fluent scope for the current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
codesearchnet
def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16): if dest < 0 or src < 0 or count < 0: raise ValueError fobj.seek(0, 2) filesize = fobj.tell() if max(dest, src) + count > filesize: raise ValueError("area outside of file") if src > dest: moved = 0 while count - moved: this_move = min(BUFFER_SIZE, count - moved) fobj.seek(src + moved) buf = fobj.read(this_move) fobj.seek(dest + moved) fobj.write(buf) moved += this_move fobj.flush() else: while count: this_move = min(BUFFER_SIZE, count) fobj.seek(src + count - this_move) buf = fobj.read(this_move) fobj.seek(count + dest - this_move) fobj.write(buf) count -= this_move fobj.flush()
Moves data around using read()/write(). Args: fileobj (fileobj) dest (int): The destination offset src (int): The source offset count (int) The amount of data to move Raises: IOError: In case an operation on the fileobj fails ValueError: In case invalid parameters were given
juraj-google-style
def _gen_indicator_method(self, name, custom_class, value_count): method_name = name.replace(' ', '_').lower() tcex = self.tcex def method_1(owner, value1, **kwargs): return custom_class(tcex, value1, owner=owner, **kwargs) def method_2(owner, value1, value2, **kwargs): return custom_class(tcex, value1, value2, owner=owner, **kwargs) def method_3(owner, value1, value2, value3, **kwargs): return custom_class(tcex, value1, value2, value3, owner=owner, **kwargs) method = locals()['method_{}'.format(value_count)] setattr(self, method_name, method)
Dynamically generate custom Indicator methods. Args: name (str): The name of the method. custom_class (object): The class to add. value_count (int): The number of value parameters to support.
juraj-google-style
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs): for entry in top_level: datetime_value = entry.get('date', None) package_identifiers = entry.get('packageIdentifiers', []) if not datetime_value or not package_identifiers: continue display_name = entry.get('displayName', '<UNKNOWN>') display_version = entry.get('displayVersion', '<DISPLAY_VERSION>') process_name = entry.get('processName', '<PROCESS_NAME>') package_identifiers = ', '.join(package_identifiers) event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: ' '{3:s}.').format( display_name, display_version, process_name, package_identifiers) event_data.key = '' event_data.root = '/item' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts relevant install history entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. top_level (dict[str, object]): plist top-level key.
juraj-google-style
def binarize_sets(df, columns, cast=False, drop=True, min_freq=None): for column in columns: d = df[column].dropna() if cast: d = d.apply(set) values = columns[column] if isinstance(columns, dict) else util.union(d) for value in values: name = values[value] if type(values) is dict else str(value) column_name = column + '_' + name.replace(' ', '_') series = d.apply(lambda c: value in c) series.fillna(0, inplace=True) if not min_freq or series.sum() >= min_freq: df[column_name] = series if drop: df.drop(list(columns), axis=1, inplace=True) return df
Create dummies for the elements of a set-valued column. Operates in place. Args: df: data frame columns: either a dictionary of column: values pairs or a collection of columns. cast: whether or not to cast values to set drop: whether or not to drop the binarized columns TODO: make interface same as binarize(). merge the two?
juraj-google-style
def _get_compile_args(self, user_metrics=True): self._assert_compile_was_called() saved_metrics = self.compiled_metrics._user_metrics saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics if not user_metrics: if saved_metrics is not None: saved_metrics = self.compiled_metrics._metrics if saved_weighted_metrics is not None: saved_weighted_metrics = self.compiled_metrics._weighted_metrics compile_args = {'optimizer': self.optimizer, 'loss': self.compiled_loss._user_losses, 'metrics': saved_metrics, 'weighted_metrics': saved_weighted_metrics, 'loss_weights': self.compiled_loss._user_loss_weights} return compile_args
Used for saving or cloning a Model. Args: user_metrics: Whether to return user-supplied metrics or `Metric` objects. Defaults to returning the user-supplied metrics. Returns: Dictionary of arguments that were used when compiling the model.
github-repos
def register_command(self, name: str, f: Callable): self._commands.append((name, f))
Registers an existing callable object as a command callback This method can be used instead of the ``@command`` decorator. Both do the same thing, but this method is useful for registering callbacks for methods defined before or outside the scope of your bot object, allowing you to define methods in another file or wherever, import them, and register them. See the documentation for the ``@command`` decorator for more information on what you method will receive. Example: def process_hello(data): # do stuff # later, somewhere else, etc. pycord.register_command('hello', process_hello) Args: name: the command to trigger the callback (see ``@command`` documentation) f: callable that will be triggered on command processing
codesearchnet
def variant_case(store, case_obj, variant_obj): case_obj['bam_files'] = [] case_obj['mt_bams'] = [] case_obj['bai_files'] = [] case_obj['mt_bais'] = [] case_obj['sample_names'] = [] for individual in case_obj['individuals']: bam_path = individual.get('bam_file') mt_bam = individual.get('mt_bam') case_obj['sample_names'].append(individual.get('display_name')) if (bam_path and os.path.exists(bam_path)): case_obj['bam_files'].append(individual['bam_file']) case_obj['bai_files'].append(find_bai_file(individual['bam_file'])) if (mt_bam and os.path.exists(mt_bam)): case_obj['mt_bams'].append(individual['mt_bam']) case_obj['mt_bais'].append(find_bai_file(individual['mt_bam'])) else: LOG.debug('%s: no bam file found', individual['individual_id']) try: genes = variant_obj.get('genes', []) if (len(genes) == 1): hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id']) if hgnc_gene_obj: vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj) case_obj['region_vcf_file'] = vcf_path else: case_obj['region_vcf_file'] = None elif (len(genes) > 1): chrom = variant_obj['genes'][0]['common']['chromosome'] start = min((gene['common']['start'] for gene in variant_obj['genes'])) end = max((gene['common']['end'] for gene in variant_obj['genes'])) vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end) case_obj['region_vcf_file'] = vcf_path except (SyntaxError, Exception): LOG.warning('skip VCF region for alignment view')
Pre-process case for the variant view. Adds information about files from case obj to variant Args: store(scout.adapter.MongoAdapter) case_obj(scout.models.Case) variant_obj(scout.models.Variant)
codesearchnet
def get_symmetrically_distinct_miller_indices(structure, max_index): r = list(range(-max_index, max_index + 1)) r.reverse() conv_hkl_list = [miller for miller in itertools.product(r, r, r) if any([i != 0 for i in miller])] sg = SpacegroupAnalyzer(structure) if sg.get_crystal_system() == "trigonal": transf = sg.get_conventional_to_primitive_transformation_matrix() miller_list = [hkl_transformation(transf, hkl) for hkl in conv_hkl_list] prim_structure = SpacegroupAnalyzer(structure).get_primitive_standard_structure() symm_ops = get_recp_symmetry_operation(prim_structure) else: miller_list = conv_hkl_list symm_ops = get_recp_symmetry_operation(structure) unique_millers, unique_millers_conv = [], [] def is_already_analyzed(miller_index): for op in symm_ops: if in_coord_list(unique_millers, op.operate(miller_index)): return True return False for i, miller in enumerate(miller_list): d = abs(reduce(gcd, miller)) miller = tuple([int(i / d) for i in miller]) if not is_already_analyzed(miller): if sg.get_crystal_system() == "trigonal": unique_millers.append(miller) d = abs(reduce(gcd, conv_hkl_list[i])) cmiller = tuple([int(i / d) for i in conv_hkl_list[i]]) unique_millers_conv.append(cmiller) else: unique_millers.append(miller) unique_millers_conv.append(miller) return unique_millers_conv
Returns all symmetrically distinct indices below a certain max-index for a given structure. Analysis is based on the symmetry of the reciprocal lattice of the structure. Args: structure (Structure): input structure. max_index (int): The maximum index. For example, a max_index of 1 means that (100), (110), and (111) are returned for the cubic structure. All other indices are equivalent to one of these.
juraj-google-style
def get_bool(self, name, default=None): if (name not in self): if (default is not None): return default raise EnvironmentError.not_found(self._prefix, name) return bool(self.get_int(name))
Retrieves an environment variable value as ``bool``. Integer values are converted as expected: zero evaluates to ``False``, and non-zero to ``True``. String values of ``'true'`` and ``'false'`` are evaluated case insensitive. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: bool: The environment variable's value as a ``bool``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value could not be interpreted as a ``bool``.
codesearchnet
def build_batch(cls, size, **kwargs): return [cls.build(**kwargs) for _ in range(size)]
Build a batch of instances of the given class, with overriden attrs. Args: size (int): the number of instances to build Returns: object list: the built instances
codesearchnet
def _scan( self, fs, dir_path, namespaces=None, ): try: for info in fs.scandir(dir_path, namespaces=namespaces): yield info except FSError as error: if not self.on_error(dir_path, error): six.reraise(type(error), error)
Get an iterator of `Info` objects for a directory path. Arguments: fs (FS): A filesystem instance. dir_path (str): A path to a directory on the filesystem. namespaces (list): A list of additional namespaces to include in the `Info` objects. Returns: ~collections.Iterator: iterator of `Info` objects for resources within the given path.
juraj-google-style
def make_parser(): def add_kythe_field(parser, field): parser.add_argument('--' + field, dest=field, type=str, action='store', default='', help="Part of kythe's file-level vname proto.") parser = argparse.ArgumentParser(usage='%(prog)s [options] input') add_kythe_field(parser, 'kythe_corpus') add_kythe_field(parser, 'kythe_root') add_kythe_field(parser, 'kythe_path') parser.add_argument('--show-types', action='store_true', dest='show_types', default=None, help='Display inferred types.') parser.add_argument('--show-kythe', action='store_true', dest='show_kythe', default=None, help='Display kythe facts.') parser.add_argument('--show-spans', action='store_true', dest='show_spans', default=None, help='Display kythe spans.') parser.add_argument('--skip-stdlib', action='store_true', dest='skip_stdlib', default=None, help='Display inferred types.') wrapper = datatypes.ParserWrapper(parser) pytype_config.add_basic_options(wrapper) with wrapper.add_only(['--imports_info', '--debug']): pytype_config.add_infrastructure_options(wrapper) pytype_config.add_debug_options(wrapper) wrapper.add_argument('input', metavar='input', nargs=1, help='A .py file to index') return XrefParser(parser, pytype_single_args=wrapper.actions)
Make parser for command line args. Returns: A Parser object.
github-repos
def kill(self, signal=None): return self.client.api.kill(self.id, signal=signal)
Kill or send a signal to the container. Args: signal (str or int): The signal to send. Defaults to ``SIGKILL`` Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def get_assigned_value(self, name): message_type = type(self) try: field = message_type.field_by_name(name) except KeyError: raise AttributeError(('Message %s has no field %s' % (message_type.__name__, name))) return self.__tags.get(field.number)
Get the assigned value of an attribute. Get the underlying value of an attribute. If value has not been set, will not return the default for the field. Args: name: Name of attribute to get. Returns: Value of attribute, None if it has not been set.
codesearchnet
def make_config_get(conf_path): project_root = _get_project_root_from_conf_path(conf_path) config = load_config_in_dir(project_root) return partial(config_get, config)
Return a function to get configuration options for a specific project Args: conf_path (path-like): path to project's conf file (i.e. foo.conf module)
codesearchnet
async def add(self, useriden, query: str, reqs, incunit=None, incvals=None): iden = s_common.guid() recur = (incunit is not None) indx = self._next_indx self._next_indx += 1 if (reqs is None): reqs = {} if (not query): raise ValueError('empty query') if ((not reqs) and (incunit is None)): raise ValueError('at least one of reqs and incunit must be non-empty') if ((incunit is not None) and (incvals is None)): raise ValueError('incvals must be non-None if incunit is non-None') if isinstance(reqs, Mapping): reqs = [reqs] recs = [] for req in reqs: reqdicts = self._dictproduct(req) if (not isinstance(incvals, Iterable)): incvals = (incvals,) recs.extend((ApptRec(rd, incunit, v) for (rd, v) in itertools.product(reqdicts, incvals))) appt = _Appt(iden, recur, indx, query, useriden, recs) self._addappt(iden, appt) (await self._storeAppt(appt)) return iden
Persistently adds an appointment Args: query (str): storm query to run reqs (Union[None, Dict[TimeUnit, Union[int, Tuple[int]], List[...]): one or more dicts of the fixed aspects of the appointment. dict value may be a single or multiple. May be an empty dict or None. incunit (Union[None, TimeUnit]): the unit that changes for recurring, or None for non-recurring. It is an error for this value to match a key in reqdict. incvals (Union[None, int, Iterable[int]): count of units of incunit or explicit day of week or day of month. Not allowed for incunit == None, required for others (1 would be a typical value) Notes: For values in reqs that are lists and incvals if a list, all combinations of all values (the product) are used Returns: iden of new appointment
codesearchnet
def xor_bytes(a, b): assert isinstance(a, bytes) assert isinstance(b, bytes) assert (len(a) == len(b)) res = bytearray() for i in range(len(a)): res.append((a[i] ^ b[i])) return bytes(res)
XOR on two bytes objects Args: a (bytes): object 1 b (bytes): object 2 Returns: bytes: The XOR result
codesearchnet
def get_asset_path(self, filename): if os.path.exists(os.path.join(self._asset_path, filename)): return os.path.join(self._asset_path, filename) else: raise AssetNotFoundError(u('Cannot find asset: {0}').format(filename))
Get the full system path of a given asset if it exists. Otherwise it throws an error. Args: filename (str) - File name of a file in /assets folder to fetch the path for. Returns: str - path to the target file. Raises: AssetNotFoundError - if asset does not exist in the asset folder. Usage:: path = WTF_ASSET_MANAGER.get_asset_path("my_asset.png") # path = /your/workspace/location/WTFProjectName/assets/my_asset.png
codesearchnet
def distribute_tensor(tensor, layout): from keras.src.distribution import TensorLayout if isinstance(layout, TensorLayout): layout = layout.backend_layout if jax_utils.is_in_jax_tracing_scope(): return jax.lax.with_sharding_constraint(tensor, layout) if isinstance(tensor, jax.Array): if isinstance(layout, jax.sharding.Sharding) and tensor.sharding.is_equivalent_to(layout, ndim=len(tensor.shape)): return tensor elif isinstance(layout, jax_layout.Layout): current_layout = getattr(tensor, 'layout', None) if current_layout == layout: return tensor return jax.device_put(tensor, layout)
Distribute the tensor based on the layout. Note that this function can be used both in eager context, or within a jitted function. Args: tensor: `jax.Array` that need to be distributed. layout: `TensorLayout` for the created variable, or a JAX-supported layout instance (e.g. `jax.experimental.layout.Layout`, `jax.sharding.Sharding`). Returns: Distributed value.
github-repos
def setTime(self, yy, mm, dd, hh, minutes, ss, password="00000000"): result = False self.setContext("setTime") try: if mm < 1 or mm > 12: self.writeCmdMsg("Month must be between 1 and 12") self.setContext("") return result if dd < 1 or dd > 31: self.writeCmdMsg("Day must be between 1 and 31") self.setContext("") return result if hh < 0 or hh > 23: self.writeCmdMsg("Hour must be between 0 and 23, inclusive") self.setContext("") return result if minutes < 0 or minutes > 59: self.writeCmdMsg("Minutes must be between 0 and 59, inclusive") self.setContext("") return result if ss < 0 or ss > 59: self.writeCmdMsg("Seconds must be between 0 and 59, inclusive") self.setContext("") return result if len(password) != 8: self.writeCmdMsg("Invalid password length.") self.setContext("") return result if not self.request(False): self.writeCmdMsg("Bad read CRC on setting") else: if not self.serialCmdPwdAuth(password): self.writeCmdMsg("Password failure") else: dt_buf = datetime.datetime(int(yy), int(mm), int(dd), int(hh), int(minutes), int(ss)) ekm_log("Writing Date and Time " + dt_buf.strftime("%Y-%m-%d %H:%M")) dayofweek = dt_buf.date().isoweekday() ekm_log("Calculated weekday " + str(dayofweek)) req_str = "015731023030363028" req_str += binascii.hexlify(str(yy)[-2:]) req_str += binascii.hexlify(str(mm).zfill(2)) req_str += binascii.hexlify(str(dd).zfill(2)) req_str += binascii.hexlify(str(dayofweek).zfill(2)) req_str += binascii.hexlify(str(hh).zfill(2)) req_str += binascii.hexlify(str(minutes).zfill(2)) req_str += binascii.hexlify(str(ss).zfill(2)) req_str += "2903" req_str += self.calc_crc16(req_str[2:].decode("hex")) self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": self.writeCmdMsg("Success(setTime): 06 returned.") result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return result
Serial set time with day of week calculation. Args: yy (int): Last two digits of year. mm (int): Month 1-12. dd (int): Day 1-31 hh (int): Hour 0 to 23. minutes (int): Minutes 0 to 59. ss (int): Seconds 0 to 59. password (str): Optional password. Returns: bool: True on completion and ACK.
juraj-google-style
def dec(self, byts): envl = s_msgpack.un(byts) iv = envl.get('iv', b'') asscd = envl.get('asscd', b'') data = envl.get('data', b'') decryptor = AESGCM(self.ekey) try: data = decryptor.decrypt(iv, data, asscd) except Exception: logger.exception('Error decrypting data') return None return data
Decode an envelope dict and decrypt the given bytes. Args: byts (bytes): Bytes to decrypt. Returns: bytes: Decrypted message.
codesearchnet
def Process(self, parser_mediator, **kwargs): if kwargs: raise ValueError('Unused keyword arguments: {0:s}.'.format(', '.join(kwargs.keys())))
Evaluates if this is the correct plugin and processes data accordingly. The purpose of the process function is to evaluate if this particular plugin is the correct one for the particular data structure at hand. This function accepts one value to use for evaluation, that could be a registry key, list of table names for a database or any other criteria that can be used to evaluate if the plugin should be run or not. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. kwargs (dict[str, object]): Depending on the plugin they may require different sets of arguments to be able to evaluate whether or not this is the correct plugin. Raises: ValueError: when there are unused keyword arguments.
codesearchnet
def build_masked_loss(loss_function, mask_value): def masked_loss_function(y_true, y_pred): mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) return loss_function((y_true * mask), (y_pred * mask)) return masked_loss_function
Builds a loss function that masks based on targets Args: loss_function: The loss function to mask mask_value: The value to mask in the targets Returns: function: a loss function that acts like loss_function with masked inputs
codesearchnet
def div(numerator, denominator): try: return numerator/denominator except ZeroDivisionError: if numerator == 0: return 0. elif denominator == 0: return float('inf') else: return numerator/denominator
Returns numerator / denominator, but instead of a ZeroDivisionError: 0 / 0 = 0. x / 0 = float('inf') This is not mathematically correct, but often practically OK. Args: numerator (float or int) denominator (float or int) Returns: (float) Raises: -
juraj-google-style
def renumerate_stages(pipeline): stages = pipeline['stages'] main_index = 0 branch_index = 0 previous_refid = '' for stage in stages: current_refid = stage['refId'].lower() if (current_refid == 'master'): if (main_index == 0): stage['requisiteStageRefIds'] = [] else: stage['requisiteStageRefIds'] = [str(main_index)] main_index += 1 stage['refId'] = str(main_index) elif (current_refid == 'branch'): if (previous_refid == 'branch'): branch_index += 1 else: branch_index = 0 stage['refId'] = str(((main_index * 100) + branch_index)) stage['requisiteStageRefIds'] = [str(main_index)] elif (current_refid == 'merge'): pass previous_refid = current_refid LOG.debug('step=%(name)s\trefId=%(refId)s\trequisiteStageRefIds=%(requisiteStageRefIds)s', stage) return pipeline
Renumber Pipeline Stage reference IDs to account for dependencies. stage order is defined in the templates. The ``refId`` field dictates if a stage should be mainline or parallel to other stages. * ``master`` - A mainline required stage. Other stages depend on it * ``branch`` - A stage that should be ran in parallel to master stages. * ``merge`` - A stage thatis parallel but other stages still depend on it. Args: pipeline (dict): Completed Pipeline ready for renumeration. Returns: dict: Pipeline ready to be sent to Spinnaker.
codesearchnet
def create(cls, five9, data, refresh=False): return cls._call_and_serialize(five9.configuration.createDisposition, data, refresh)
Create a record on Five9. Args: five9 (five9.Five9): The authenticated Five9 remote. data (dict): A data dictionary that can be fed to ``deserialize``. refresh (bool, optional): Set to ``True`` to get the record data from Five9 before returning the record. Returns: BaseModel: The newly created record. If ``refresh`` is ``True``, this will be fetched from Five9. Otherwise, it's the data record that was sent to the server.
codesearchnet
def get_jwt_claims(self, auth_token): def _decode_and_verify(): jwt_claims = jwt.JWT().unpack(auth_token).payload() _verify_required_claims_exist(jwt_claims) issuer = jwt_claims[u'iss'] keys = self._jwks_supplier.supply(issuer) try: return jws.JWS().verify_compact(auth_token, keys) except (jwkest.BadSignature, jws.NoSuitableSigningKeys, jws.SignerAlgError) as exception: raise suppliers.UnauthenticatedException(u'Signature verification failed', exception) return self._cache.get_or_create(auth_token, _decode_and_verify)
Decodes the auth_token into JWT claims represented as a JSON object. This method first tries to look up the cache and returns the result immediately in case of a cache hit. When cache misses, the method tries to decode the given auth token, verify its signature, and check the existence of required JWT claims. When successful, the decoded JWT claims are loaded into the cache and then returned. Args: auth_token: the auth token to be decoded. Returns: The decoded JWT claims. Raises: UnauthenticatedException: When the signature verification fails, or when required claims are missing.
codesearchnet
def send(self, **req_kwargs): i = 0 while True: response = self._send(**req_kwargs).json() if 'error' not in response: break error = response['error'] if error['code'] != 401: raise exception.APIException(error['code'], error) if i >= self.RETRY_CNT: raise exception.APIException(error['code'], error) logger.info('Refreshing access token') self._auth.refresh() i += 1 return response
Send an authenticated request to a Google API. Automatically retries if the access token has expired. Args: **req_kwargs: Arbitrary keyword arguments to pass to Requests. Return: dict: The parsed JSON response. Raises: APIException: If the server returns an error. LoginException: If :py:meth:`login` has not been called.
juraj-google-style
def netflix(es, ps, e0, l=0.0001): m = len(es) n = len(ps[0]) X = np.stack(ps).T pTy = (0.5 * (((n * (e0 ** 2)) + (X ** 2).sum(axis=0)) - (n * (np.array(es) ** 2)))) w = np.linalg.pinv((X.T.dot(X) + ((l * n) * np.eye(m)))).dot(pTy) return (X.dot(w), w)
Combine predictions with the optimal weights to minimize RMSE. Args: es (list of float): RMSEs of predictions ps (list of np.array): predictions e0 (float): RMSE of all zero prediction l (float): lambda as in the ridge regression Returns: Ensemble prediction (np.array) and weights (np.array) for input predictions
codesearchnet
def install_table(self, connection, table, logger = None): queries = [] query_tmpl = 'SELECT * FROM {}' for partition in table.partitions: partition.localize() installed_name = self.install(connection, partition) queries.append(query_tmpl.format(installed_name)) query = 'CREATE VIEW {} AS {} '.format( table.vid, '\nUNION ALL\n'.join(queries)) logger.debug('Creating view for table.\n table: {}\n query: {}'.format(table.vid, query)) self._execute(connection, query, fetch=False)
Installs all partitons of the table and create view with union of all partitons. Args: connection: connection to database who stores mpr data. table (orm.Table):
juraj-google-style
def ast_to_html(self, ast, link_resolver): out, _ = cmark.ast_to_html(ast, link_resolver) return out
See the documentation of `to_ast` for more information. Args: ast: PyCapsule, a capsule as returned by `to_ast` link_resolver: hotdoc.core.links.LinkResolver, a link resolver instance.
juraj-google-style
def getall(self, key, default=[]): return (self.data[key] if (key in self.data) else default)
Return the list of all values for the specified key. Arguments: key (object): Key default (list): Default value to return if the key does not exist, defaults to ``[]``, i.e. an empty list. Returns: list: List of all values for the specified key if the key exists, ``default`` otherwise.
codesearchnet
def __init__(self, replay_dir, data_dir, tmp_dir, cwd=None, env=None): self.replay_dir = replay_dir self.data_dir = data_dir self.tmp_dir = tmp_dir self.cwd = cwd self.env = env
Initialize the runconfig with the various directories needed. Args: replay_dir: Where to find replays. Might not be accessible to SC2. data_dir: Where SC2 should find the data and battle.net cache. tmp_dir: The temporary directory. None is system default. cwd: Where to set the current working directory. env: What to pass as the environment variables.
juraj-google-style
def AddTripDecoration(self, triplist, color=' tmpstr = self._DrawTrips(triplist, color) self._decorators.append(tmpstr)
Flushes existing decorations and highlights the given trips. Args: # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] # An optional string with a html color code color: "#fff"
codesearchnet
def _build_query_components(query: str, found: Dict[str, beam.PCollection], output_name: str, run: bool=True) -> Tuple[str, Union[Dict[str, beam.PCollection], beam.PCollection, beam.Pipeline], SqlChain]: if found: user_pipeline = ie.current_env().user_pipeline(next(iter(found.values())).pipeline) sql_pipeline = beam.Pipeline(options=user_pipeline._options) ie.current_env().add_derived_pipeline(user_pipeline, sql_pipeline) sql_source = {} if run: if has_source_to_cache(user_pipeline): sql_source = pcolls_from_streaming_cache(user_pipeline, sql_pipeline, found) else: cache_manager = ie.current_env().get_cache_manager(user_pipeline, create_if_absent=True) for pcoll_name, pcoll in found.items(): cache_key = CacheKey.from_pcoll(pcoll_name, pcoll).to_str() sql_source[pcoll_name] = unreify_from_cache(pipeline=sql_pipeline, cache_key=cache_key, cache_manager=cache_manager, element_type=pcoll.element_type) else: sql_source = found if len(sql_source) == 1: query = replace_single_pcoll_token(query, next(iter(sql_source.keys()))) sql_source = next(iter(sql_source.values())) node = SqlNode(output_name=output_name, source=set(found.keys()), query=query) chain = ie.current_env().get_sql_chain(user_pipeline, set_user_pipeline=True).append(node) else: sql_source = beam.Pipeline() ie.current_env().add_user_pipeline(sql_source) node = SqlNode(output_name=output_name, source=sql_source, query=query) chain = ie.current_env().get_sql_chain(sql_source).append(node) return (query, sql_source, chain)
Builds necessary components needed to apply the SqlTransform. Args: query: The SQL query to be executed by the magic. found: The PCollections with variable names found to be used by the query. output_name: The output variable name in __main__ module. run: Whether to prepare components for a local run or not. Returns: The processed query to be executed by the magic; a source to apply the SqlTransform to: a dictionary of tagged PCollections, or a single PCollection, or the pipeline to execute the query; the chain of applied beam_sql magics this one belongs to.
github-repos
def validate_options(options): if (not options): return for (k, v) in options.iteritems(): if (not isinstance(k, str)): raise TypeError(('option %r should be a str.' % k)) if (not any((k.lower().startswith(valid) for valid in _GCS_OPTIONS))): raise ValueError(('option %s is not supported.' % k)) if (not isinstance(v, basestring)): raise TypeError(('value %r for option %s should be of type basestring.' % (v, k)))
Validate Google Cloud Storage options. Args: options: a str->basestring dict of options to pass to Google Cloud Storage. Raises: ValueError: if option is not supported. TypeError: if option is not of type str or value of an option is not of type basestring.
codesearchnet
def events(self, institute, case=None, variant_id=None, level=None, comments=False, panel=None): query = {} if variant_id: if comments: LOG.debug("Fetching all comments for institute {0} case {1} variant {2}".format( institute['_id'], case['_id'], variant_id)) query = { '$or': [ { 'category' : 'variant', 'variant_id' : variant_id, 'verb' : 'comment', 'level' : 'global' }, { 'category' : 'variant', 'variant_id' : variant_id, 'institute' : institute['_id'], 'case' : case['_id'], 'verb' : 'comment', 'level' : 'specific' } ] } else: query['institute'] = institute['_id'] query['category'] = 'variant' query['variant_id'] = variant_id query['case'] = case['_id'] else: query['institute'] = institute['_id'] if panel: query['panel'] = panel else: query['category'] = 'case' if case: query['case'] = case['_id'] if comments: query['verb'] = 'comment' return self.event_collection.find(query).sort('created_at', pymongo.DESCENDING)
Fetch events from the database. Args: institute (dict): A institute case (dict): A case variant_id (str, optional): global variant id level (str, optional): restrict comments to 'specific' or 'global' comments (bool, optional): restrict events to include only comments panel (str): A panel name Returns: pymongo.Cursor: Query result
juraj-google-style
def parseConfig(cls, value): if ('enabled' in value): value['enabled'] = bool(value['enabled']) if ('exclude_paths' in value): value['exclude_paths'] = [n.strip() for n in ast.literal_eval(value['exclude_paths'])] return value
Parse the config values Args: value (dict): Dictionary which contains the checker config Returns: dict: The checker config with parsed values
codesearchnet
def build_polyline_dict(self, path, stroke_color=' if (not isinstance(path, list)): raise AttributeError('To build a map path a list of dictionaries of latitude and logitudes is required') polyline = {'path': path, 'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight} return polyline
Set a dictionary with the javascript class Polyline parameters This function sets a default drawing configuration if the user just pass the polyline path, but also allows to set each parameter individually if the user wish so. Args: path (list): A list of latitude and longitude point for the polyline stroke_color (str): Sets the color of the rectangle border using hexadecimal color notation stroke_opacity (float): Sets the opacity of the rectangle border in percentage. If stroke_opacity = 0, the border is transparent stroke_weight (int): Sets the stroke girth in pixels.
codesearchnet
def layer_norm(x, dim, epsilon=1e-06, name='layer_prepostprocess'): with tf.variable_scope((name + '/layer_norm')): scale = mtf.get_variable(x.mesh, 'layer_norm_scale', mtf.Shape([dim]), initializer=tf.ones_initializer(), activation_dtype=x.dtype) bias = mtf.get_variable(x.mesh, 'layer_norm_bias', mtf.Shape([dim]), initializer=tf.zeros_initializer(), activation_dtype=x.dtype) reduced_shape = (x.shape - dim) mean = mtf.reduce_mean(x, output_shape=reduced_shape) variance = mtf.reduce_mean(mtf.square((x - mean)), output_shape=reduced_shape) norm_x = ((x - mean) * mtf.rsqrt((variance + epsilon))) return ((norm_x * scale) + bias)
Layer normalization over dimension dim. Args: x: a mtf.Tensor whose shape contains dim. dim: a mtf.Dimension epsilon: a floating point number name: a string. variable scope. Returns: a mtf.Tensor with same shape as x.
codesearchnet
def copy_cwl_files(from_dir=CWL_PATH, to_dir=None): cwl_files = glob.glob('{}{}*.cwl'.format(from_dir, os.sep)) if len(cwl_files) > 0: create_dirs(to_dir) for fi in cwl_files: fo = os.path.join(to_dir, os.path.basename(fi)) shutil.copy2(fi, fo) return len(cwl_files)
Copy cwl files to a directory where the cwl-runner can find them. Args: from_dir (str): Path to directory where to copy files from (default: the cwl directory of nlppln). to_dir (str): Path to directory where the files should be copied to (e.g., the CWL working directory).
juraj-google-style
def _generate_mark_code(rule_name): code = ''.join([i for i in str(rule_name) if i.isdigit()]) code = code.zfill(2) return code
Generates a two digit string based on a provided string Args: rule_name (str): A configured rule name 'pytest_mark3'. Returns: str: A two digit code based on the provided string '03'
codesearchnet
def retrieve_file_from_url(url): try: alias_source, _ = urlretrieve(url) with open(alias_source, 'r') as f: content = f.read() if content[:3].isdigit(): raise CLIError(ALIAS_FILE_URL_ERROR.format(url, content.strip())) except Exception as exception: if isinstance(exception, CLIError): raise raise CLIError(ALIAS_FILE_URL_ERROR.format(url, exception)) return alias_source
Retrieve a file from an URL Args: url: The URL to retrieve the file from. Returns: The absolute path of the downloaded file.
juraj-google-style