code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def add(self, username, user_api, filename=None): keys = API.__get_keys(filename) user = user_api.find(username)[0] distinguished_name = user.entry_dn if ('ldapPublicKey' not in user.objectClass): raise ldap3.core.exceptions.LDAPNoSuchAttributeResult(('LDAP Public Key Object Class not found. ' + 'Please ensure user was created correctly.')) else: for key in list(set(keys)): print(key) try: SSHKey(key).parse() except Exception as err: raise err from None else: operation = {'sshPublicKey': [(ldap3.MODIFY_ADD, [key])]} self.client.modify(distinguished_name, operation)
Add SSH public key to a user's profile. Args: username: Username to attach SSH public key to filename: Filename containing keys to add (optional) Raises: ldap3.core.exceptions.LDAPNoSuchAttributeResult: ldapPublicKey isn't attached to objectClass
codesearchnet
def add_update_resources(self, resources, ignore_datasetid=False): if (not isinstance(resources, list)): raise HDXError('Resources should be a list!') for resource in resources: self.add_update_resource(resource, ignore_datasetid)
Add new or update existing resources with new metadata to the dataset Args: resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False. Returns: None
codesearchnet
def mv(src, dst): if not exists(src): raise File404(src) try: shutil.move(src, dst) except Exception as e: raise IOError(str(e))
Move a file or directory. If the destination already exists, this will attempt to overwrite it. Arguments: src (string): path to the source file or directory. dst (string): path to the destination file or directory. Raises: File404: if source does not exist. IOError: in case of error.
juraj-google-style
def AnalyzeClient(self, client): client_id = self._ClientIdFromURN(client.urn) keywords = [self._NormalizeKeyword(client_id), "."] def TryAppend(prefix, keyword): precondition.AssertType(prefix, Text) if keyword: keyword_string = self._NormalizeKeyword(Text(keyword)) keywords.append(keyword_string) if prefix: keywords.append(prefix + ":" + keyword_string) def TryAppendPrefixes(prefix, keyword, delimiter): if keyword is None: return 0 TryAppend(prefix, keyword) segments = keyword.split(delimiter) for i in range(1, len(segments)): TryAppend(prefix, delimiter.join(segments[0:i])) return len(segments) def TryAppendIP(ip): TryAppend("ip", ip) if TryAppendPrefixes("ip", str(ip), ".") == 4: return TryAppendPrefixes("ip", str(ip), ":") def TryAppendMac(mac): TryAppend("mac", mac) if len(mac) == 12: TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)])) s = client.Schema TryAppend("host", client.Get(s.HOSTNAME)) TryAppendPrefixes("host", client.Get(s.HOSTNAME), "-") TryAppend("host", client.Get(s.FQDN)) TryAppendPrefixes("host", client.Get(s.FQDN), ".") TryAppend("", client.Get(s.SYSTEM)) TryAppend("", client.Get(s.UNAME)) TryAppend("", client.Get(s.OS_RELEASE)) TryAppend("", client.Get(s.OS_VERSION)) TryAppend("", client.Get(s.KERNEL)) TryAppend("", client.Get(s.ARCH)) kb = client.Get(s.KNOWLEDGE_BASE) if kb: for user in kb.users: TryAppend("user", user.username) TryAppend("", user.full_name) if user.full_name: for name in user.full_name.split(): TryAppend("", name.strip("\"'()")) for username in client.Get(s.USERNAMES, []): TryAppend("user", username) for interface in client.Get(s.INTERFACES, []): if interface.mac_address: TryAppendMac(interface.mac_address.human_readable_address) for ip in interface.GetIPAddresses(): TryAppendIP(ip) if client.Get(s.MAC_ADDRESS): for mac in str(client.Get(s.MAC_ADDRESS)).split("\n"): TryAppendMac(mac) ip_list = client.Get(s.HOST_IPS, "") for ip in str(ip_list).split("\n"): TryAppendIP(ip) client_info = client.Get(s.CLIENT_INFO) if client_info: TryAppend("client", client_info.client_name) TryAppend("client", client_info.client_version) if client_info.labels: for label in client_info.labels: TryAppend("label", label) for label in client.GetLabelsNames(): TryAppend("label", label) return client_id, keywords
Finds the client_id and keywords for a client. Args: client: A VFSGRRClient record to find keywords for. Returns: A tuple (client_id, keywords) where client_id is the client identifier and keywords is a list of keywords related to client.
juraj-google-style
def request_json(link, outfile, force_rerun_flag, outdir=None): if not outdir: outdir = '' outfile = op.join(outdir, outfile) if force_rerun(flag=force_rerun_flag, outfile=outfile): text_raw = requests.get(link) my_dict = text_raw.json() with open(outfile, 'w') as f: json.dump(my_dict, f) log.debug('Loaded and saved {} to {}'.format(link, outfile)) else: with open(outfile, 'r') as f: my_dict = json.load(f) log.debug('Loaded {}'.format(outfile)) return my_dict
Download a file in JSON format from a web request Args: link: Link to web request outfile: Name of output file outdir: Directory of output file force_rerun_flag: If true, redownload the file Returns: dict: contents of the JSON request
juraj-google-style
def delete(self, vid): command = ('no vlan %s' % vid) return (self.configure(command) if isvlan(vid) else False)
Deletes a VLAN from the running configuration Args: vid (str): The VLAN ID to delete Returns: True if the operation was successful otherwise False
codesearchnet
def get_student_certificates(self, username, course_ids=None): if course_ids is None: enrollments_client = CourseEnrollments(self.requester, self.base_url) enrollments = enrollments_client.get_student_enrollments() course_ids = list(enrollments.get_enrolled_course_ids()) all_certificates = [] for course_id in course_ids: try: all_certificates.append(self.get_student_certificate(username, course_id)) except HTTPError as error: if error.response.status_code >= 500: raise return Certificates(all_certificates)
Returns an Certificates object with the user certificates Args: username (str): an edx user's username course_ids (list): a list of edX course ids. Returns: Certificates: object representing the student certificates for a course
juraj-google-style
def read(file_path): actual_file_path = os.path.expanduser(file_path) with open(actual_file_path, 'r') as f: lines = f.readlines() gmt = [] for line_num, line in enumerate(lines): fields = line.split('\t') assert len(fields) > 2, ( "Each line must have at least 3 tab-delimited items. " + "line_num: {}, fields: {}").format(line_num, fields) fields[-1] = fields[-1].rstrip() entries = fields[2:] entries = [x for x in entries if x] assert len(set(entries)) == len(entries), ( "There should not be duplicate entries for the same set. " + "line_num: {}, entries: {}").format(line_num, entries) line_dict = {SET_IDENTIFIER_FIELD: fields[0], SET_DESC_FIELD: fields[1], SET_MEMBERS_FIELD: entries} gmt.append(line_dict) verify_gmt_integrity(gmt) return gmt
Read a gmt file at the path specified by file_path. Args: file_path (string): path to gmt file Returns: gmt (GMT object): list of dicts, where each dict corresponds to one line of the GMT file
juraj-google-style
def _create_extension(o, otype, fqdn, pmodule): import types xdict = {'__acornext__': o, '__doc__': o.__doc__} if (otype == 'classes'): classname = o.__name__ try: if (fqdn in _explicit_subclasses): xclass = eval(_explicit_subclasses[fqdn]) xclass.__acornext__ = o else: xclass = type(classname, (o,), xdict) xclass.__module__ = o.__module__ return xclass except TypeError: _final_objs.append(id(o)) return o elif ((otype in ['functions', 'descriptors', 'unknowns']) or ((otype == 'builtins') and (isinstance(o, types.BuiltinFunctionType) or isinstance(o, types.BuiltinMethodType)))): def xwrapper(*args, **kwargs): try: return o(*args, **kwargs) except: targs = list(map(type, args)) kargs = list(kwargs.keys()) msg.err('xwrapper: {}({}, {})'.format(o, targs, kargs), 2) pass for (attr, val) in xdict.items(): setattr(xwrapper, attr, val) failed = False setattr(xwrapper, '__getattribute__', _safe_getattr(xwrapper)) failed = (not _update_attrs(xwrapper, o, ['__call__'])) if ((otype in ['descriptors', 'unknowns']) and inspect.ismodule(pmodule)): if hasattr(o, '__objclass__'): setattr(xwrapper, '__module__', pmodule.__name__) elif (hasattr(o, '__class__') and (o.__class__ is not None)): setattr(xwrapper, '__module__', pmodule.__name__) if (not failed): return xwrapper
Creates an extension object to represent `o` that can have attributes set, but which behaves identically to the given object. Args: o: object to create an extension for; no checks are performed to see if extension is actually required. otype (str): object types; one of ["classes", "functions", "methods", "modules"]. fqdn (str): fully qualified name of the package that the object belongs to. pmodule: the parent module (or class) that `o` belongs to; used for setting the special __module__ attribute.
codesearchnet
def parse_arguments(argv): parser = argparse.ArgumentParser(description='online-clustering') parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local') parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID) args, _ = parser.parse_known_args(args=argv) return args
Parses the arguments passed to the command line and returns them as an object Args: argv: The arguments passed to the command line. Returns: The arguments that are being passed in.
github-repos
def recursive_create_dir_v2(path): _pywrap_file_io.RecursivelyCreateDir(compat.path_to_bytes(path))
Creates a directory and all parent/intermediate directories. It succeeds if path already exists and is writable. Args: path: string, name of the directory to be created Raises: errors.OpError: If the operation fails.
github-repos
def add_candidate_peer_endpoints(self, peer_endpoints): if self._topology: self._topology.add_candidate_peer_endpoints(peer_endpoints) else: LOGGER.debug("Could not add peer endpoints to topology. " "ConnectionManager does not exist.")
Adds candidate endpoints to the list of endpoints to attempt to peer with. Args: peer_endpoints ([str]): A list of public uri's which the validator can attempt to peer with.
juraj-google-style
def parent_nodes(self): node_deps = [] for kt in self.arguments.keras_tensors: op = kt._keras_history.operation node_index = kt._keras_history.node_index if op is not None: node_deps.append(op._inbound_nodes[node_index]) return node_deps
The parent `Node`s. Returns: all the `Node`s whose output this node immediately depends on.
github-repos
def sample(self, samples=[], bounds=None, **sample_values): if util.config.future_deprecations: self.param.warning('The HoloMap.sample method is deprecated, for equivalent functionality use HoloMap.apply.sample().collapse().') dims = self.last.ndims if (isinstance(samples, tuple) or np.isscalar(samples)): if (dims == 1): xlim = self.last.range(0) (lower, upper) = ((xlim[0], xlim[1]) if (bounds is None) else bounds) edges = np.linspace(lower, upper, (samples + 1)) linsamples = [((l + u) / 2.0) for (l, u) in zip(edges[:(- 1)], edges[1:])] elif (dims == 2): (rows, cols) = samples if bounds: (l, b, r, t) = bounds else: (l, r) = self.last.range(0) (b, t) = self.last.range(1) xedges = np.linspace(l, r, (cols + 1)) yedges = np.linspace(b, t, (rows + 1)) xsamples = [((lx + ux) / 2.0) for (lx, ux) in zip(xedges[:(- 1)], xedges[1:])] ysamples = [((ly + uy) / 2.0) for (ly, uy) in zip(yedges[:(- 1)], yedges[1:])] (Y, X) = np.meshgrid(ysamples, xsamples) linsamples = list(zip(X.flat, Y.flat)) else: raise NotImplementedError('Regular sampling not implemented for elements with more than two dimensions.') samples = list(util.unique_iterator(self.last.closest(linsamples))) sampled = self.clone([(k, view.sample(samples, closest=False, **sample_values)) for (k, view) in self.data.items()]) from ..element import Table return Table(sampled.collapse())
Samples element values at supplied coordinates. Allows sampling of element with a list of coordinates matching the key dimensions, returning a new object containing just the selected samples. Supports multiple signatures: Sampling with a list of coordinates, e.g.: ds.sample([(0, 0), (0.1, 0.2), ...]) Sampling a range or grid of coordinates, e.g.: 1D: ds.sample(3) 2D: ds.sample((3, 3)) Sampling by keyword, e.g.: ds.sample(x=0) Args: samples: List of nd-coordinates to sample bounds: Bounds of the region to sample Defined as two-tuple for 1D sampling and four-tuple for 2D sampling. closest: Whether to snap to closest coordinates **kwargs: Coordinates specified as keyword pairs Keywords of dimensions and scalar coordinates Returns: A Table containing the sampled coordinates
codesearchnet
def set_geometry(self, geom): (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta) = geom
A convenience function to set the geometry variables. Args: geom: A tuple containing (thet0, thet, phi0, phi, alpha, beta). See the Scatterer class documentation for a description of these angles.
juraj-google-style
def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): encrypted_stream = resolver.Resolver.OpenFileObject( path_spec, resolver_context=resolver_context) if not encrypted_stream: raise errors.BackEndError( 'Unable to open encrypted stream: {0:s}.'.format( self.path_spec.comparable)) super(EncryptedStreamFileEntry, self).__init__( resolver_context, file_system, path_spec, is_root=is_root, is_virtual=is_virtual) self._encrypted_stream = encrypted_stream self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
Initializes a file entry. Args: resolver_context (Context): resolver context. file_system (FileSystem): file system. path_spec (PathSpec): path specification. is_root (Optional[bool]): True if the file entry is the root file entry of the corresponding file system. is_virtual (Optional[bool]): True if the file entry is a virtual file Raises: BackEndError: when the encrypted stream is missing.
juraj-google-style
def pretty_description(description, wrap_at=None, indent=0): if ((wrap_at is None) or (wrap_at < 0)): width = console_width(default=79) if (wrap_at is None): wrap_at = width else: wrap_at += width indent = (' ' * indent) text_wrapper = textwrap.TextWrapper(width=wrap_at, replace_whitespace=False, initial_indent=indent, subsequent_indent=indent) new_desc = [] for line in description.split('\n'): new_desc.append(line.replace('\n', '').strip()) while (not new_desc[0]): del new_desc[0] while (not new_desc[(- 1)]): del new_desc[(- 1)] separators = [i for (i, l) in enumerate(new_desc) if (not l)] paragraphs = [] if separators: (start, end) = (0, separators[0]) paragraphs.append(new_desc[start:end]) for i in range((len(separators) - 1)): start = (end + 1) end = separators[(i + 1)] paragraphs.append(new_desc[start:end]) paragraphs.append(new_desc[(end + 1):]) return '\n\n'.join((text_wrapper.fill(' '.join(p)) for p in paragraphs)) return text_wrapper.fill(' '.join(new_desc))
Return a pretty formatted string given some text. Args: description (str): string to format. wrap_at (int): maximum length of a line. indent (int): level of indentation. Returns: str: pretty formatted string.
codesearchnet
def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype): shape = self.shape num_features = self.num_features zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) return zeros
Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. Returns: tensor of shape '[batch_size x shape[0] x shape[1] x num_features] filled with zeros
juraj-google-style
def _has_valid_tensors(self): return self._input_tensors is not None and self._output_tensors
Checks if the input and output tensors have been initialized. Returns: Bool.
github-repos
def expired(self, cfgstr=None, product=None): products = self._rectify_products(product) certificate = self._get_certificate(cfgstr=cfgstr) if (certificate is None): is_expired = True elif (products is None): is_expired = False elif (not all(map(os.path.exists, products))): is_expired = True else: product_file_hash = self._product_file_hash(products) certificate_hash = certificate.get('product_file_hash', None) is_expired = (product_file_hash != certificate_hash) return is_expired
Check to see if a previously existing stamp is still valid and if the expected result of that computation still exists. Args: cfgstr (str, optional): override the default cfgstr if specified product (PathLike or Sequence[PathLike], optional): override the default product if specified
codesearchnet
def unpackVersion(ver): major = (ver >> 20 * 2) & mask20 minor = (ver >> 20) & mask20 patch = ver & mask20 return major, minor, patch
Unpack a system normalized integer representing a softare version into its component parts. Args: ver (int): System normalized integer value to unpack into a tuple. Returns: (int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.
juraj-google-style
def obj_from_dict(info, parent=None, default_args=None): assert (isinstance(info, dict) and ('type' in info)) assert (isinstance(default_args, dict) or (default_args is None)) args = info.copy() obj_type = args.pop('type') if mmcv.is_str(obj_type): if (parent is not None): obj_type = getattr(parent, obj_type) else: obj_type = sys.modules[obj_type] elif (not isinstance(obj_type, type)): raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type))) if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) return obj_type(**args)
Initialize an object from dict. The dict must contain the key "type", which indicates the object type, it can be either a string or type, such as "list" or ``list``. Remaining fields are treated as the arguments for constructing the object. Args: info (dict): Object types and arguments. parent (:class:`module`): Module which may containing expected object classes. default_args (dict, optional): Default arguments for initializing the object. Returns: any type: Object built from the dict.
codesearchnet
def opt(parser: Union[(Parser, Sequence[Input])]) -> OptionalParser: if isinstance(parser, str): parser = lit(parser) return OptionalParser(parser)
Optionally match a parser. An ``OptionalParser`` attempts to match ``parser``. If it succeeds, it returns a list of length one with the value returned by the parser as the only element. If it fails, it returns an empty list. Args: parser: Parser or literal
codesearchnet
def sun_rise_set(latitude, longitude, date, mode='rise', timezone=0, zenith=None): if (not date): date = datetime.date.today() zenith = ZENITH[zenith] n = (date - datetime.date((date.year - 1), 12, 31)).days lng_hour = (longitude / 15) if (mode == 'rise'): t = (n + ((6 - lng_hour) / 24)) elif (mode == 'set'): t = (n + ((18 - lng_hour) / 24)) else: raise ValueError(('Unknown mode value %r' % mode)) m = ((0.9856 * t) - 3.289) l = (((m + (1.916 * math.sin(math.radians(m)))) + (0.02 * math.sin((2 * math.radians(m))))) + 282.634) l = (abs(l) % 360) ra = math.degrees(math.atan((0.91764 * math.tan(math.radians(l))))) l_quandrant = (math.floor((l / 90)) * 90) ra_quandrant = (math.floor((ra / 90)) * 90) ra = (ra + (l_quandrant - ra_quandrant)) ra = (ra / 15) sin_dec = (0.39782 * math.sin(math.radians(l))) cos_dec = math.cos(math.asin(sin_dec)) cos_h = ((math.radians(zenith) - (sin_dec * math.sin(math.radians(latitude)))) / (cos_dec * math.cos(math.radians(latitude)))) if (cos_h > 1): return None elif (cos_h < (- 1)): return None if (mode == 'rise'): h = (360 - math.degrees(math.acos(cos_h))) else: h = math.degrees(math.acos(cos_h)) h = (h / 15) t = (((h + ra) - (0.06571 * t)) - 6.622) utc = (t - lng_hour) local_t = (utc + (timezone / 60)) if (local_t < 0): local_t += 24 elif (local_t > 23): local_t -= 24 hour = int(local_t) if (hour == 0): minute = int((60 * local_t)) else: minute = int((60 * (local_t % hour))) if (minute < 0): minute += 60 return datetime.time(hour, minute)
Calculate sunrise or sunset for a specific location. This function calculates the time sunrise or sunset, or optionally the beginning or end of a specified twilight period. Source:: Almanac for Computers, 1990 published by Nautical Almanac Office United States Naval Observatory Washington, DC 20392 Args: latitude (float): Location's latitude longitude (float): Location's longitude date (datetime.date): Calculate rise or set for given date mode (str): Which time to calculate timezone (int): Offset from UTC in minutes zenith (str): Calculate rise/set events, or twilight times Returns: datetime.time or None: The time for the given event in the specified timezone, or ``None`` if the event doesn't occur on the given date Raises: ValueError: Unknown value for ``mode``
codesearchnet
def Uniform(cls, low: 'TensorFluent', high: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]: if (low.scope != high.scope): raise ValueError('Uniform distribution: parameters must have same scope!') dist = tf.distributions.Uniform(low.tensor, high.tensor) batch = (low.batch or high.batch) if ((not batch) and (batch_size is not None)): t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = low.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))
Returns a TensorFluent for the Uniform sampling op with given low and high parameters. Args: low: The low parameter of the Uniform distribution. high: The high parameter of the Uniform distribution. batch_size: The size of the batch (optional). Returns: The Uniform distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope.
codesearchnet
def FindEndOfExpressionInLine(line, startpos, stack): for i in xrange(startpos, len(line)): char = line[i] if (char in '([{'): stack.append(char) elif (char == '<'): if ((i > 0) and (line[(i - 1)] == '<')): if (stack and (stack[(- 1)] == '<')): stack.pop() if (not stack): return ((- 1), None) elif ((i > 0) and Search('\\boperator\\s*$', line[0:i])): continue else: stack.append('<') elif (char in ')]}'): while (stack and (stack[(- 1)] == '<')): stack.pop() if (not stack): return ((- 1), None) if (((stack[(- 1)] == '(') and (char == ')')) or ((stack[(- 1)] == '[') and (char == ']')) or ((stack[(- 1)] == '{') and (char == '}'))): stack.pop() if (not stack): return ((i + 1), None) else: return ((- 1), None) elif (char == '>'): if ((i > 0) and ((line[(i - 1)] == '-') or Search('\\boperator\\s*$', line[0:(i - 1)]))): continue if stack: if (stack[(- 1)] == '<'): stack.pop() if (not stack): return ((i + 1), None) elif (char == ';'): while (stack and (stack[(- 1)] == '<')): stack.pop() if (not stack): return ((- 1), None) return ((- 1), stack)
Find the position just after the end of current parenthesized expression. Args: line: a CleansedLines line. startpos: start searching at this position. stack: nesting stack at startpos. Returns: On finding matching end: (index just after matching end, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line)
codesearchnet
def manual_shuffle(self, axis, shuffle_func, lengths): if axis: partitions = self.row_partitions else: partitions = self.column_partitions func = self.preprocess_func(shuffle_func) result = np.array([part.shuffle(func, lengths) for part in partitions]) return self.__constructor__(result) if axis else self.__constructor__(result.T)
Shuffle the partitions based on the `shuffle_func`. Args: axis: The axis to shuffle across. shuffle_func: The function to apply before splitting the result. lengths: The length of each partition to split the result into. Returns: A new BaseFrameManager object, the type of object that called this.
juraj-google-style
def get_config(): return context().config
Get the ConfigProto of Context. Returns: The ConfigProto of Context.
github-repos
def segment(self, eps, min_time): new_segments = [] for segment in self.segments: segmented = segment.segment(eps, min_time) for seg in segmented: new_segments.append(Segment(seg)) self.segments = new_segments return self
In-place segmentation of segments Spatio-temporal segmentation of each segment The number of segments may increse after this step Returns: This track
codesearchnet
def _flat_structure(self): return {'output_shapes': self._flat_shapes, 'output_types': self._flat_types}
Helper for setting `output_shapes` and `output_types` attrs of an op. Most dataset op constructors expect `output_shapes` and `output_types` arguments that represent the flattened structure of an element. This helper function generates these attrs as a keyword argument dictionary, allowing `Dataset._variant_tensor` implementations to pass `**self._flat_structure` to the op constructor. Returns: A dictionary of keyword arguments that can be passed to a dataset op constructor.
github-repos
def open_workshared_model(self, model_path, central=False, detached=False, keep_worksets=True, audit=False, show_workset_config=1): if detached: if audit: if keep_worksets: self._add_entry( templates.CENTRAL_OPEN_DETACH_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN_DETACH_AUDIT_DISCARD .format(model_path=model_path, workset_config=show_workset_config) ) else: if keep_worksets: self._add_entry( templates.CENTRAL_OPEN_DETACH .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN_DETACH_DISCARD .format(model_path=model_path, workset_config=show_workset_config) ) elif central: if audit: self._add_entry( templates.CENTRAL_OPEN_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN .format(model_path=model_path, workset_config=show_workset_config) ) else: if audit: self._add_entry( templates.WORKSHARED_OPEN_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.WORKSHARED_OPEN .format(model_path=model_path, workset_config=show_workset_config) )
Append a open workshared model entry to the journal. This instructs Revit to open a workshared model. Args: model_path (str): full path to workshared model central (bool): if True opens central model and not local detached (bool): if True opens a detached model keep_worksets (bool): if True keeps worksets when detaching audit (bool): if True audits the model when opening
juraj-google-style
def CmdVersion(self): self.logger.debug('CmdVersion') response = self.InternalSendApdu(apdu.CommandApdu(0, apdu.CMD_VERSION, 0, 0)) if (not response.IsSuccess()): raise errors.ApduError(response.sw1, response.sw2) return response.body
Obtain the version of the device and test transport format. Obtains the version of the device and determines whether to use ISO 7816-4 or the U2f variant. This function should be called at least once before CmdAuthenticate or CmdRegister to make sure the object is using the proper transport for the device. Returns: The version of the U2F protocol in use.
codesearchnet
def dbInsert(self, def_buf, raw_a, raw_b): self.dbExec(self.sqlInsert(def_buf, raw_a, raw_b))
Call overridden dbExec() with built insert statement. Args: def_buf (SerialBlock): Block of read buffer fields to write. raw_a (str): Hex string of raw A read. raw_b (str): Hex string of raw B read or empty.
juraj-google-style
def load(png_filename): png_filename = os.path.expanduser(png_filename) try: img = Image.open(png_filename) except Exception as e: raise ValueError('Could not load file {0} for conversion.'.format(png_filename)) raise return numpy.array(img)
Import a png file into a numpy array. Arguments: png_filename (str): A string filename of a png datafile Returns: A numpy array with data from the png file
codesearchnet
def WritePreprocessingInformation(self, knowledge_base): self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Preprocessing information not supported by storage type.')
Writes preprocessing information. Args: knowledge_base (KnowledgeBase): used to store the preprocessing information. Raises: IOError: if the storage type does not support writing preprocessing information or when the storage writer is closed. OSError: if the storage type does not support writing preprocessing information or when the storage writer is closed.
juraj-google-style
def _CreateComplexTypeFromData( self, elem_type, type_is_override, data, set_type_attrs): elem_arguments = dict(elem_type.elements) instantiated_arguments = { k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs) for k, v in data if k != 'xsi_type'} if set_type_attrs: found_type_attr = next((e_name for e_name, _ in elem_type.elements if e_name.endswith('.Type')), None) if found_type_attr and type_is_override: instantiated_arguments[found_type_attr] = elem_type.qname.localname return elem_type(**instantiated_arguments)
Initialize a SOAP element with specific data. Args: elem_type: The type of the element to create. type_is_override: A boolean specifying if the type is being overridden. data: The data to hydrate the type with. set_type_attrs: A boolean indicating whether or not attributes that end in .Type should be set. This is only necessary for batch job service. Returns: An fully initialized SOAP element.
juraj-google-style
def __setitem__(self, key, value): if not isinstance(key, tuple) or len(key) != 2: raise IndexError('Invalid index: {0}'.format(key)) self._dim0.add(key[0]) self._dim1.add(key[1]) self._items[key] = value
Sets element of the matrix at position indexed by key. Args: key: tuple of (row_idx, column_idx) value: new value of the element of the matrix Raises: IndexError: if key is invalid.
juraj-google-style
def ParseBookmarkFolderRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) title = self._GetRowValue(query_hash, row, 'title') event_data = FirefoxPlacesBookmarkFolderEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = (title or 'N/A') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a bookmark folder row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def find_elements_by_name(self, name, update=False) -> Elements: return self.find_elements(by=By.NAME, value=name, update=update)
Finds multiple elements by name. Args: name: The name of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_name('foo')
juraj-google-style
def Main(url, similarity_mode="TfIdfCosine", similarity_limit=0.75): web_scrape = WebScraping() web_scrape.readable_web_pdf = WebPDFReading() document = web_scrape.scrape(url) if similarity_mode == "TfIdfCosine": similarity_filter = TfIdfCosine() elif similarity_mode == "Dice": similarity_filter = Dice() elif similarity_mode == "Jaccard": similarity_filter = Jaccard() elif similarity_mode == "Simpson": similarity_filter = Simpson() else: raise ValueError() nlp_base = NlpBase() nlp_base.tokenizable_doc = MeCabTokenizer() similarity_filter.nlp_base = nlp_base similarity_filter.similarity_limit = similarity_limit auto_abstractor = AutoAbstractor() auto_abstractor.tokenizable_doc = MeCabTokenizer() abstractable_doc = TopNRankAbstractor() result_dict = auto_abstractor.summarize(document, abstractable_doc, similarity_filter) [print(result_dict["summarize_result"][i]) for i in range(len(result_dict["summarize_result"])) if i < 3]
Entry Point. Args: url: PDF url.
juraj-google-style
def encode(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]: encoded_inputs = self.encode_plus(table, query=query, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs) return encoded_inputs['input_ids']
Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc. which are necessary for the model to work correctly. Use that method if you want to build your processing on your own, otherwise refer to `__call__`. Args: table (`pd.DataFrame`): Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas dataframe to convert it to string. query (`str` or `List[str]`): Question related to a table to be encoded.
github-repos
def fog(x, severity=1): c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1] x = np.array(x) / 255. max_val = x.max() mapsize = 512 shape = x.shape max_length = max(shape[0], shape[1]) if max_length > mapsize: mapsize = 2**int(np.ceil(np.log2(float(max_length)))) tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1]) tmp = tmp[:x.shape[0], :x.shape[1]] tmp = tmp[..., np.newaxis] x += c[0] * tmp x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255 return around_and_astype(x_clip)
Fog corruption to images. Adding fog to images. Fog is generated by diamond-square algorithm. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added fog.
juraj-google-style
def _get_bucket_statistics(self, bucket_name, bucket_region, storage_type, statistic, days): cw = self.session.client('cloudwatch', region_name=bucket_region) try: obj_stats = cw.get_metric_statistics( Namespace='AWS/S3', MetricName=statistic, Dimensions=[ { 'Name': 'StorageType', 'Value': storage_type }, { 'Name': 'BucketName', 'Value': bucket_name } ], Period=86400, StartTime=datetime.utcnow() - timedelta(days=days), EndTime=datetime.utcnow(), Statistics=[ 'Average' ] ) stat_value = obj_stats['Datapoints'][0]['Average'] if obj_stats['Datapoints'] else 'NO_DATA' return stat_value except Exception as e: self.log.error( 'Could not get bucket statistic for account {} / bucket {} / {}'.format(self.account.account_name, bucket_name, e)) finally: del cw
Returns datapoints from cloudwatch for bucket statistics. Args: bucket_name `(str)`: The name of the bucket statistic `(str)`: The statistic you want to fetch from days `(int)`: Sample period for the statistic
juraj-google-style
def predict(self, X): logger.info('predicting ...') ps = self.predict_raw(X) return sigm(ps[:, 0])
Predict targets for a feature matrix. Args: X (np.array of float): feature matrix for prediction Returns: prediction (np.array)
juraj-google-style
def _setup_selection_range(self, f_start=None, f_stop=None, t_start=None, t_stop=None, init=False): if (init is True): if (t_start is None): t_start = self.t_begin if (t_stop is None): t_stop = self.t_end if (f_start is None): f_start = self.f_begin if (f_stop is None): f_stop = self.f_end else: if (f_start is None): f_start = self.f_start if (f_stop is None): f_stop = self.f_stop if (t_start is None): t_start = self.t_start if (t_stop is None): t_stop = self.t_stop if ((t_stop >= 0) and (t_start >= 0) and (t_stop < t_start)): (t_stop, t_start) = (t_start, t_stop) logger.warning('Given t_stop < t_start, assuming reversed values.') if (f_stop and f_start and (f_stop < f_start)): (f_stop, f_start) = (f_start, f_stop) logger.warning('Given f_stop < f_start, assuming reversed values.') if ((t_start >= self.t_begin) and (t_start < self.t_end)): self.t_start = int(t_start) else: if ((init is False) or (t_start != None)): logger.warning(('Setting t_start = %f, since t_start not given or not valid.' % self.t_begin)) self.t_start = self.t_begin if ((t_stop <= self.t_end) and (t_stop > self.t_begin)): self.t_stop = int(t_stop) else: if ((init is False) or t_stop): logger.warning(('Setting t_stop = %f, since t_stop not given or not valid.' % self.t_end)) self.t_stop = self.t_end if ((f_start >= self.f_begin) and (f_start < self.f_end)): self.f_start = f_start else: if ((init is False) or f_start): logger.warning(('Setting f_start = %f, since f_start not given or not valid.' % self.f_begin)) self.f_start = self.f_begin if ((f_stop <= self.f_end) and (f_stop > self.f_begin)): self.f_stop = f_stop else: if ((init is False) or f_stop): logger.warning(('Setting f_stop = %f, since f_stop not given or not valid.' % self.f_end)) self.f_stop = self.f_end self.selection_shape = self._calc_selection_shape()
Making sure the selection if time and frequency are within the file limits. Args: init (bool): If call during __init__
codesearchnet
def __init__(self, loss=None, predictions=None, metrics=None): if loss is not None: loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME) self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME) if predictions is not None: pred_dict = self._wrap_and_check_outputs(predictions, self.PREDICTIONS_NAME) self._predictions = self._prefix_output_keys(pred_dict, self.PREDICTIONS_NAME) if metrics is not None: self._metrics = self._wrap_and_check_metrics(metrics)
Constructor for SupervisedOutput (ie, Train or Eval output). Args: loss: dict of Tensors or single Tensor representing calculated loss. predictions: dict of Tensors or single Tensor representing model predictions. metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of `Metric` class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Raises: ValueError: if any of the outputs' dict keys are not strings or tuples of strings or the values are not Tensors (or Operations in the case of update_op).
github-repos
def reset(self): self.output_file.remove() self.log_file.remove() self.stderr_file.remove() self.start_lockfile.remove() self.qerr_file.remove() self.qout_file.remove() if self.mpiabort_file.exists: self.mpiabort_file.remove() self.set_status(self.S_INIT, msg=('Reset on %s' % time.asctime())) self.num_restarts = 0 self.set_qjob(None) self.work.finalized = False self.flow.finalized = False return 0
Reset the task status. Mainly used if we made a silly mistake in the initial setup of the queue manager and we want to fix it and rerun the task. Returns: 0 on success, 1 if reset failed.
codesearchnet
def write(self, writer: WriteStream) -> None: for part in self._raw: writer.write(bytes(part))
Write the object to the stream, with one or more calls to :meth:`~pymap.bytes.WriteStream.write`. Args: writer: The output stream.
juraj-google-style
def get_course_enrollments(self, enterprise_customer, days): return CourseEnrollment.objects.filter( created__gt=datetime.datetime.now() - datetime.timedelta(days=days) ).filter( user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True) )
Get course enrollments for all the learners of given enterprise customer. Arguments: enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners of this enterprise customer. days (int): Include course enrollment of this number of days. Returns: (list): A list of CourseEnrollment objects.
juraj-google-style
def _orthogonal_kernel(self, ksize, cin, cout): if cin > cout: raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).') orth = self._orthogonal_matrix(cout)[0:cin, :] if ksize == 1: return array_ops.expand_dims(array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0), 0) p = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout)) for _ in range(ksize - 2): temp = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout)) p = self._matrix_conv(p, temp) for i in range(ksize): for j in range(ksize): for k in range(ksize): p[i, j, k] = math_ops.matmul(orth, p[i, j, k]) return self._dict_to_tensor(p, ksize, ksize, ksize)
Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout.
github-repos
def __call__(self, data, dtype=None): if isinstance(data, np.ndarray): if not data.size > 0: raise ValueError("empty array can't be serialized") return _npy_serialize(data) if isinstance(data, list): if not len(data) > 0: raise ValueError("empty array can't be serialized") return _npy_serialize(np.array(data, dtype)) if hasattr(data, 'read'): return data.read() return _npy_serialize(np.array(data))
Serialize data into the request body in NPY format. Args: data (object): Data to be serialized. Can be a numpy array, list, file, or buffer. Returns: object: NPY serialized data used for the request.
juraj-google-style
def __enter__(self) -> TestCheckWriter: if self._context_manager_active: raise RuntimeError('Tried to enter two simultaneous `with` blocks managing the same `TestCheckWriter` instance.') self._context_manager_active = True assert self._worker_pool is None if self._worker_count is None or self._worker_count > 1: self._worker_pool = multiprocessing.pool.ThreadPool(self._worker_count) return self
Context manager setup. Initializes `self._worker_pool` if `self._worker_count` is either `None` or an integer greater than 1. (In the former case, the worker count will be inferred.) Returns: `self`. Raises: RuntimeError: If this instance already has an active context manager.
github-repos
def shortest_undirected_path(self, physical_qubit1, physical_qubit2): try: return nx.shortest_path(self.graph.to_undirected(as_view=True), source=physical_qubit1, target=physical_qubit2) except nx.exception.NetworkXNoPath: raise CouplingError( "Nodes %s and %s are not connected" % (str(physical_qubit1), str(physical_qubit2)))
Returns the shortest undirected path between physical_qubit1 and physical_qubit2. Args: physical_qubit1 (int): A physical qubit physical_qubit2 (int): Another physical qubit Returns: List: The shortest undirected path Raises: CouplingError: When there is no path between physical_qubit1, physical_qubit2.
juraj-google-style
def get_path(*args, module=a99): p = os.path.abspath(os.path.join(os.path.split(module.__file__)[0], *args)) return p
Returns full path to specified module Args: *args: are added at the end of module path with os.path.join() module: Python module, defaults to a99 Returns: path string >>> get_path()
juraj-google-style
def build_graph(steps): graph = Graph() for step in steps: graph.add_step(step) for step in steps: for dep in step.requires: graph.connect(step.name, dep) for parent in step.required_by: graph.connect(parent, step.name) return graph
Builds a graph of steps. Args: steps (list): a list of :class:`Step` objects to execute.
juraj-google-style
def Var(self, mu=None): if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.iteritems(): var += p * (x - mu) ** 2 return var
Computes the variance of a PMF. Args: mu: the point around which the variance is computed; if omitted, computes the mean Returns: float variance
juraj-google-style
def _EvaluateElementsDataSize(self, context): elements_data_size = None if self._data_type_definition.elements_data_size: elements_data_size = self._data_type_definition.elements_data_size elif self._data_type_definition.elements_data_size_expression: expression = self._data_type_definition.elements_data_size_expression namespace = {} if (context and context.values): namespace.update(context.values) namespace['__builtins__'] = {} try: elements_data_size = eval(expression, namespace) except Exception as exception: raise errors.MappingError('Unable to determine elements data size with error: {0!s}'.format(exception)) if ((elements_data_size is None) or (elements_data_size < 0)): raise errors.MappingError('Invalid elements data size: {0!s}'.format(elements_data_size)) return elements_data_size
Evaluates elements data size. Args: context (DataTypeMapContext): data type map context. Returns: int: elements data size. Raises: MappingError: if the elements data size cannot be determined.
codesearchnet
def bbox_scaling(bboxes, scale, clip_shape=None): if float(scale) == 1.0: scaled_bboxes = bboxes.copy() else: w = bboxes[..., 2] - bboxes[..., 0] + 1 h = bboxes[..., 3] - bboxes[..., 1] + 1 dw = (w * (scale - 1)) * 0.5 dh = (h * (scale - 1)) * 0.5 scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) if clip_shape is not None: return bbox_clip(scaled_bboxes, clip_shape) else: return scaled_bboxes
Scaling bboxes w.r.t the box center. Args: bboxes (ndarray): Shape(..., 4). scale (float): Scaling factor. clip_shape (tuple, optional): If specified, bboxes that exceed the boundary will be clipped according to the given shape (h, w). Returns: ndarray: Scaled bboxes.
juraj-google-style
def get_cpu_type(): key = 'cpu_type' out, err = run_shell_cmd(cmds_all[PLATFORM][key]) cpu_detected = out.split(b':')[1].strip() if err and FLAGS.debug: print('Error in detecting CPU type:\n %s' % str(err)) return cpu_detected
Retrieves CPU (type) information. Returns: String that is name of the CPU. e.g. 'GenuineIntel'
github-repos
def add_service(self, name, long_name, preregistered=False, notify=True): if (name in self.services): raise ArgumentError('Could not add service because the long_name is taken', long_name=long_name) serv_state = states.ServiceState(name, long_name, preregistered) service = {'state': serv_state, 'heartbeat_threshold': 600} self.services[name] = service if notify: return self._notify_update(name, 'new_service', self.service_info(name)) return None
Add a service to the list of tracked services. Args: name (string): A unique short service name for the service long_name (string): A longer, user friendly name for the service preregistered (bool): Whether this service is an expected preregistered service. notify (bool): Send notifications about this service to all clients Returns: awaitable: If notify is True, an awaitable for the notifications. Otherwise None.
codesearchnet
def t_seg(p1, p2, t, align=0): v = vector(p1, p2) result = { 1: lambda a, b: (a, translate(b, scale(v, -t))), 2: lambda a, b: (translate(a, scale(v, t)), b), 0: lambda a, b: (translate(a, scale(v, t / 2)), translate(b, scale(v, -t / 2))) } return result[align](p1, p2)
trim segment Args: p1, p2: point(x, y) t: scaling factor (1 - trimed segment / original segment) align: 1: trim p2, 2: trim p1, 0: both side Return: trimmed segment(p1, p2)
juraj-google-style
def exit(self, code=None, msg=None): if (code is None): code = self.tcex.exit_code if (code == 3): self.tcex.log.info(u'Changing exit code from 3 to 0.') code = 0 elif (code not in [0, 1]): code = 1 self.tcex.exit(code, msg)
Playbook wrapper on TcEx exit method Playbooks do not support partial failures so we change the exit method from 3 to 1 and call it a partial success instead. Args: code (Optional [integer]): The exit code value for the app.
codesearchnet
def transform(self, column): self.check_data_type() return pd.DataFrame({self.col_name: np.exp(column[self.col_name])})
Applies an exponential to values to turn them positive numbers. Args: column (pandas.DataFrame): Data to transform. Returns: pd.DataFrame
juraj-google-style
def _RemoveUsers(self, remove_users): for username in remove_users: self.utils.RemoveUser(username) self.user_ssh_keys.pop(username, None) self.invalid_users -= set(remove_users)
Deprovision Linux user accounts that do not appear in account metadata. Args: remove_users: list, the username strings of the Linux accounts to remove.
codesearchnet
def exit(self, code=None, msg=None): if (msg is not None): if ((code in [0, 3]) or ((code is None) and (self.exit_code in [0, 3]))): self.log.info(msg) else: self.log.error(msg) self.message_tc(msg) if (code is None): code = self.exit_code elif (code in [0, 1, 3]): pass else: self.log.error(u'Invalid exit code') code = 1 if self.default_args.tc_aot_enabled: self.playbook.aot_rpush(code) self.log.info(u'Exit Code: {}'.format(code)) sys.exit(code)
Application exit method with proper exit code The method will run the Python standard sys.exit() with the exit code previously defined via :py:meth:`~tcex.tcex.TcEx.exit_code` or provided during the call of this method. Args: code (Optional [integer]): The exit code value for the app. msg (Optional [string]): A message to log and add to message tc output.
codesearchnet
def __init__(self, base_core, input_shape=None, name="skip_connection_core"): super(SkipConnectionCore, self).__init__(name=name) self._base_core = base_core self._input_shape = input_shape
Construct a SkipConnectionCore. Args: base_core: Base RNNCore to wrap. input_shape: Shape of the input as tuple, excluding the batch size. name: Name of the module.
juraj-google-style
def _inputs_valid(self, output_condition_uris): if (len(self.inputs) != len(output_condition_uris)): raise ValueError('Inputs and output_condition_uris must have the same count') tx_dict = (self.tx_dict if self.tx_dict else self.to_dict()) tx_dict = Transaction._remove_signatures(tx_dict) tx_dict['id'] = None tx_serialized = Transaction._to_str(tx_dict) def validate(i, output_condition_uri=None): 'Validate input against output condition URI' return self._input_valid(self.inputs[i], self.operation, tx_serialized, output_condition_uri) return all((validate(i, cond) for (i, cond) in enumerate(output_condition_uris)))
Validates an Input against a given set of Outputs. Note: The number of `output_condition_uris` must be equal to the number of Inputs a Transaction has. Args: output_condition_uris (:obj:`list` of :obj:`str`): A list of Outputs to check the Inputs against. Returns: bool: If all Outputs are valid.
codesearchnet
def console_set_color_control( con: int, fore: Tuple[int, int, int], back: Tuple[int, int, int] ) -> None: lib.TCOD_console_set_color_control(con, fore, back)
Configure :any:`color controls`. Args: con (int): :any:`Color control` constant to modify. fore (Union[Tuple[int, int, int], Sequence[int]]): An (r, g, b) sequence or Color instance. back (Union[Tuple[int, int, int], Sequence[int]]): An (r, g, b) sequence or Color instance.
juraj-google-style
def list_cover(list1, list2): set2 = set(list2) incover_list = [(item1 in set2) for item1 in list1] return incover_list
r""" returns boolean for each position in list1 if it is in list2 Args: list1 (list): list2 (list): Returns: list: incover_list - true where list1 intersects list2 CommandLine: python -m utool.util_list --test-list_cover Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> # build test data >>> list1 = [1, 2, 3, 4, 5, 6] >>> list2 = [2, 3, 6] >>> # execute function >>> incover_list = list_cover(list1, list2) >>> # verify results >>> result = str(incover_list) >>> print(result) [False, True, True, False, False, True]
codesearchnet
def put_member(self, name: InstanceName, value: Value, raw: bool = False) -> "InstanceNode": if not isinstance(self.value, ObjectValue): raise InstanceValueError(self.json_pointer(), "member of non-object") csn = self._member_schema_node(name) newval = self.value.copy() newval[name] = csn.from_raw(value, self.json_pointer()) if raw else value return self._copy(newval)._member(name)
Return receiver's member with a new value. If the member is permitted by the schema but doesn't exist, it is created. Args: name: Instance name of the member. value: New value of the member. raw: Flag to be set if `value` is raw. Raises: NonexistentSchemaNode: If member `name` is not permitted by the schema. InstanceValueError: If the receiver's value is not an object.
juraj-google-style
def probe_async(self, callback): def _on_finished(_name, control_info, exception): if exception is not None: callback(self.id, False, str(exception)) return self._control_info = control_info try: info = { 'connection_string': "direct", 'uuid': control_info.uuid, 'signal_strength': 100 } self._trigger_callback('on_scan', self.id, info, self.ExpirationTime) finally: callback(self.id, True, None) self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size)
Send advertisements for all connected devices. Args: callback (callable): A callback for when the probe operation has completed. callback should have signature callback(adapter_id, success, failure_reason) where: success: bool failure_reason: None if success is True, otherwise a reason for why we could not probe
juraj-google-style
def get_day_end(config): day_start_datetime = datetime.datetime.combine(datetime.date.today(), config['day_start']) day_end_datetime = (day_start_datetime - datetime.timedelta(seconds=1)) return day_end_datetime.time()
Get the day end time given the day start. This assumes full 24h day. Args: config (dict): Configdict. Needed to extract ``day_start``. Note: This is merely a convinience funtion so we do not have to deduct this from ``day_start`` by hand all the time.
codesearchnet
def to_voxels(array): if type(array) is not numpy.ndarray: raise ValueError("array argument must be of type numpy.ndarray") return numpy.argwhere(array)
Converts an array to its voxel list. Arguments: array (numpy.ndarray): A numpy nd array. This must be boolean! Returns: A list of n-tuples
juraj-google-style
def getsize(self, path): try: file_obj = self.filesystem.resolve(path) if (self.filesystem.ends_with_path_separator(path) and (S_IFMT(file_obj.st_mode) != S_IFDIR)): error_nr = (errno.EINVAL if self.filesystem.is_windows_fs else errno.ENOTDIR) self.filesystem.raise_os_error(error_nr, path) return file_obj.st_size except IOError as exc: raise os.error(exc.errno, exc.strerror)
Return the file object size in bytes. Args: path: path to the file object. Returns: file size in bytes.
codesearchnet
def assign(var, new_val, assign_fn=assign_slice): if isinstance(var, Tensor): var = var.operation if not isinstance(var, Variable): raise ValueError("var must be a mtf.Variable or its output Tensor.") return Assign([var], [new_val], assign_fn=assign_fn)
Assign a new value to a variable. Args: var: either a Variable operation or its output Tensor. new_val: a Tensor assign_fn: a function from (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation Returns: an Operation Raises: ValueError: if var is not a Variable and var.operation is not a Variable
juraj-google-style
def path_to_string(path): if isinstance(path, os.PathLike): return os.fspath(path) return path
Convert `PathLike` objects to their string representation. If given a non-string typed path object, converts it to its string representation. If the object passed to `path` is not among the above, then it is returned unchanged. This allows e.g. passthrough of file objects through this function. Args: path: `PathLike` object that represents a path Returns: A string representation of the path argument, if Python support exists.
github-repos
def get_cuda_version_default(): key = 'cuda_ver_dflt' out = '' cmd_list = cmds_all[PLATFORM.lower()][key] for i, cmd in enumerate(cmd_list): try: out, err = run_shell_cmd(cmd) if not out: raise Exception(err) except Exception as e: if FLAGS.debug: print('\nWarning: Encountered issue while retrieving default CUDA version. (%s) Trying a different method...\n' % e) if i == len(cmd_list) - 1: if FLAGS.debug: print('Error: Cannot retrieve CUDA default version.\nStopping...') else: pass return out.strip('\n')
Retrieves default CUDA version. Default version is the version found in `/usr/local/cuda/` installation. stderr is silenced by default. Setting FLAGS.debug mode will not enable it. Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable stderr. It iterates through two types of version retrieval method: 1) Using `nvcc`: If `nvcc` is not available, then it uses next method. 2) Read version file (`version.txt`) found in CUDA install directory. Returns: String that is the default CUDA version. e.g. '10.1'
github-repos
def _randomize_speed(base_speed: int, sigma: int = None) -> int: if sigma is None: int_sigma = int(base_speed / 4) else: int_sigma = sigma val = MissionWeather._gauss(base_speed, int_sigma) if val < 0: return 0 return min(val, 50)
Creates a variation in wind speed Args: base_speed: base wind speed sigma: sigma value for gaussian variation Returns: random wind speed
juraj-google-style
def export_aliases(export_path=None, exclusions=None): if not export_path: export_path = os.path.abspath(ALIAS_FILE_NAME) alias_table = get_alias_table() for exclusion in exclusions or []: if exclusion not in alias_table.sections(): raise CLIError(ALIAS_NOT_FOUND_ERROR.format(exclusion)) alias_table.remove_section(exclusion) _commit_change(alias_table, export_path=export_path, post_commit=False) logger.warning(POST_EXPORT_ALIAS_MSG, export_path)
Export all registered aliases to a given path, as an INI configuration file. Args: export_path: The path of the alias configuration file to export to. exclusions: Space-separated aliases excluded from export.
juraj-google-style
def dispatch(self, inp): inp = tf.reshape(inp, [(self._batch * self._length), (- 1)]) ret = tf.gather(inp, self._flat_indices) return ret
Send the inputs to the experts. Args: inp: a `Tensor` of shape "[batch, length, depth]` Returns: a tensor with shape [batch, num_experts, expert_capacity, depth]
codesearchnet
def GetBasePathSpecs(self, source_path): if (not source_path): raise errors.ScannerError('Invalid source path.') if ((not source_path.startswith('\\\\.\\')) and (not os.path.exists(source_path))): raise errors.ScannerError('No such device, file or directory: {0:s}.'.format(source_path)) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(source_path) try: self._source_scanner.Scan(scan_context) except (ValueError, errors.BackEndError) as exception: raise errors.ScannerError('Unable to scan source with error: {0!s}'.format(exception)) self._source_path = source_path self._source_type = scan_context.source_type if (self._source_type not in [definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE]): scan_node = scan_context.GetRootScanNode() return [scan_node.path_spec] scan_node = scan_context.GetRootScanNode() while (len(scan_node.sub_nodes) == 1): scan_node = scan_node.sub_nodes[0] base_path_specs = [] if (scan_node.type_indicator != definitions.TYPE_INDICATOR_TSK_PARTITION): self._ScanVolume(scan_context, scan_node, base_path_specs) else: partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node) for partition_identifier in partition_identifiers: location = '/{0:s}'.format(partition_identifier) sub_scan_node = scan_node.GetSubNodeByLocation(location) self._ScanVolume(scan_context, sub_scan_node, base_path_specs) return base_path_specs
Determines the base path specifications. Args: source_path (str): source path. Returns: list[PathSpec]: path specifications. Raises: ScannerError: if the source path does not exists, or if the source path is not a file or directory, or if the format of or within the source file is not supported.
codesearchnet
def ask_to_proceed_with_overwrite(filepath): overwrite = input('[WARNING] %s already exists - overwrite? [y/n]' % filepath).strip().lower() while overwrite not in ('y', 'n'): overwrite = input('Enter "y" (overwrite) or "n" (cancel).').strip().lower() if overwrite == 'n': return False print('[TIP] Next time specify overwrite=True!') return True
Produces a prompt asking about overwriting a file. Args: filepath: the path to the file to be overwritten. Returns: True if we can proceed with overwrite, False otherwise.
github-repos
def call_servo(examples, serving_bundle): parsed_url = urlparse(('http: channel = implementations.insecure_channel(parsed_url.hostname, parsed_url.port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) if serving_bundle.use_predict: request = predict_pb2.PredictRequest() elif (serving_bundle.model_type == 'classification'): request = classification_pb2.ClassificationRequest() else: request = regression_pb2.RegressionRequest() request.model_spec.name = serving_bundle.model_name if (serving_bundle.model_version is not None): request.model_spec.version.value = serving_bundle.model_version if (serving_bundle.signature is not None): request.model_spec.signature_name = serving_bundle.signature if serving_bundle.use_predict: request.inputs[serving_bundle.predict_input_tensor].CopyFrom(tf.compat.v1.make_tensor_proto(values=[ex.SerializeToString() for ex in examples], dtype=types_pb2.DT_STRING)) else: request.input.example_list.examples.extend(examples) if serving_bundle.use_predict: return common_utils.convert_predict_response(stub.Predict(request, 30.0), serving_bundle) elif (serving_bundle.model_type == 'classification'): return stub.Classify(request, 30.0) else: return stub.Regress(request, 30.0)
Send an RPC request to the Servomatic prediction service. Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the serving request. Returns: A ClassificationResponse or RegressionResponse proto.
codesearchnet
def is_connectable(host: str, port: Union[int, str]) -> bool: socket_ = None try: socket_ = socket.create_connection((host, port), 1) result = True except socket.timeout: result = False finally: if socket_: socket_.close() return result
Tries to connect to the device to see if it is connectable. Args: host: The host to connect. port: The port to connect. Returns: True or False.
juraj-google-style
def to_dict(self): out_dict = {} out_dict['commands'] = self.commands out_dict['configs'] = self.configs out_dict['short_name'] = self.name out_dict['versions'] = {'module': self.module_version, 'api': self.api_version} return out_dict
Convert this object into a dictionary. Returns: dict: A dict with the same information as this object.
codesearchnet
def _row_partitions_for_uniform_shape(shape, rank): shape_cumprod = math_ops.cumprod(shape[:rank]) return tuple([RowPartition.from_uniform_row_length(uniform_row_length=shape[i + 1], nvals=shape_cumprod[i + 1], nrows=shape_cumprod[i]) for i in range(rank - 1)])
Returns row partitions for the given shape Tensor. Args: shape: A vector describing a uniform shape. rank: The number of dimensions to generate row partitions for Returns: A list of (rank-1) `RowPartition`s with uniform row length.
github-repos
def shell_call(command, **kwargs): CMD_VARIABLE_RE = re.compile('^\\$\\{(\\w+)\\}$') command = list(command) for i in range(len(command)): m = CMD_VARIABLE_RE.match(command[i]) if m: var_id = m.group(1) if (var_id in kwargs): command[i] = kwargs[var_id] str_command = ' '.join(command) logging.debug(('Executing shell command: %s' % str_command)) return subprocess.check_output(command)
Calls shell command with argument substitution. Args: command: command represented as a list. Each element of the list is one token of the command. For example "cp a b" becomes ['cp', 'a', 'b'] If any element of the list looks like '${NAME}' then it will be replaced by value from **kwargs with key 'NAME'. **kwargs: dictionary with argument substitution Returns: output of the command Raises: subprocess.CalledProcessError if command return value is not zero This function is useful when you need to do variable substitution prior running the command. Below are few examples of how it works: shell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b' shell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b', '${a}; was replaced with 'asd' before calling the command
codesearchnet
def filter_data(self, field, filter_value, filter_operator, field_converter=None): data = [] if self._indexes.get(field) is not None: data = self._index_filter( self._indexes.get(field), filter_value, filter_operator, field_converter ) return set(data)
Filter the data given the provided. Args: field (string): The field to filter on. filter_value (string | list): The value to match. filter_operator (string): The operator for comparison. field_converter (method): A method used to convert the field before comparison. Returns: (set): List of matching data objects
juraj-google-style
def QA_fetch_get_sz_margin(date): if date in trade_date_sse: return pd.read_excel(_sz_url.format(date)).assign(date=date).assign(sse='sz')
return shenzhen margin data Arguments: date {str YYYY-MM-DD} -- date format Returns: pandas.DataFrame -- res for margin data
juraj-google-style
def validate(self, value): errors = [] self._used_validator = [] for val in self._validators: try: val.validate(value) self._used_validator.append(val) except ValidatorException as e: errors.append(e) except Exception as e: errors.append(ValidatorException('Unknown Error', e)) if (len(errors) > 0): raise ValidatorException.from_list(errors) return value
validate function form OrValidator Returns: True if at least one of the validators validate function return True
codesearchnet
def configureDevicesForMultiDeviceTest(self, num_devices): cpus = config.list_physical_devices('CPU') gpus = config.list_physical_devices('GPU') config.set_logical_device_configuration(cpus[0], [context.LogicalDeviceConfiguration() for _ in range(num_devices)]) devices = ['/device:CPU:' + str(i) for i in range(num_devices - 1)] if gpus: devices.append('/device:GPU:0') else: devices.append('/device:CPU:' + str(num_devices - 1)) return devices
Configures number of logical devices for multi-device tests. It returns a list of device names. If invoked in GPU-enabled runtime, the last device name will be for a GPU device. Otherwise, all device names will be for a CPU device. Args: num_devices: The number of devices to configure. Returns: A list of device names to use for a multi-device test.
github-repos
def peek(self, key, indices=None, name=None): if name is None: name = '%s_pop' % self._name indices, dtypes = self._get_indices_and_dtypes(indices) with ops.colocate_with(self._coloc_op): result = self._peek_fn(key, shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit) return self._get_return_value(result, indices)
Peeks at staging area data associated with the key. If the key is not in the staging area, it will block until the associated (key, value) is inserted. Args: key: Key associated with the required data indices: Partial list of tensors to retrieve (optional). A list of integer or string indices. String indices are only valid if the Staging Area has names associated with it. name: A name for the operation (optional) Returns: The created op
github-repos
def _ParseRecurseKeys(self, parser_mediator, root_key): for registry_key in root_key.RecurseKeys(): if parser_mediator.abort: break self._ParseKey(parser_mediator, registry_key)
Parses the Registry keys recursively. Args: parser_mediator (ParserMediator): parser mediator. root_key (dfwinreg.WinRegistryKey): root Windows Registry key.
codesearchnet
def add_squashed_change(self, path, data): assert self._squashed_count, "Called while not squashing changes" self._squashed_changes.append([path[1:], data])
Register a squashed change to a particular path Args: path (list): The path of what has changed, relative from Block data (object): The new data
juraj-google-style
def assert_child_key_has_value(self, parent, child, caller): assert parent, 'parent parameter must be specified.' assert child, 'child parameter must be specified.' self.assert_key_has_value(parent, caller) try: child_exists = (child in self[parent]) except TypeError as err: raise ContextError(f"context['{parent}'] must be iterable and contain '{child}' for {caller}. {err}") from err if child_exists: if (self[parent][child] is None): raise KeyInContextHasNoValueError(f"context['{parent}']['{child}'] must have a value for {caller}.") else: raise KeyNotInContextError(f"context['{parent}']['{child}'] doesn't exist. It must exist for {caller}.")
Assert that context contains key that has child which has a value. Args: parent: parent key child: validate this sub-key of parent exists AND isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None
codesearchnet
def copy_file_content(self, file_id, source_file): if (not is_valid_uuid(file_id)): raise StorageArgumentException('Invalid UUID for file_id: {0}'.format(file_id)) if (not is_valid_uuid(source_file)): raise StorageArgumentException('Invalid UUID for source_file: {0}'.format(source_file)) self._authenticated_request.to_endpoint('file/{}/content/'.format(file_id)).with_headers({'X-Copy-From': source_file}).put()
Copy file content from source file to target file. Args: file_id (str): The UUID of the file whose content is written. source_file (str): The UUID of the file whose content is copied. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
codesearchnet
def get_permalink(*, url: str, template_params: dict[str, Any] | tuple[tuple[str, Any, Any], ...]) -> str: if url.startswith('go/'): url = f'http: if not isinstance(template_params, dict): template_params = {name: value for name, value, default in template_params if value != default} template_params = json_std.dumps(template_params) template_params = urllib.parse.quote(template_params) return f'{url}
Get the permalink for the current colab. Args: url: The base URL. template_params: A dict of name to value. Can also be a list of (name, value, default) tuples, in which case only the value != default are added (to make the url shorter). Returns: The permalink.
github-repos
def translate_job_state(code): code_description = "" if code == "0": code_description = "Queued" if code == "1": code_description = "Scheduled" if code == "2": code_description = "Processing" if code == "3": code_description = "Finished" if code == "4": code_description = "Error" if code == "5": code_description = "Canceled" if code == "6": code_description = "Canceling" return code_description
AUX Function to translate the (numeric) state of a Job. Args: nr (int): A valid number to translate. Returns: HTTP response. JSON body.
juraj-google-style
def _get_args_to_parse(args, sys_argv): arguments = (args if (args is not None) else sys_argv[1:]) _LOG.debug('Parsing arguments: %s', arguments) return arguments
Return the given arguments if it is not None else sys.argv if it contains something, an empty list otherwise. Args: args: argument to be parsed sys_argv: arguments of the command line i.e. sys.argv
codesearchnet
def member_del(self, repl_id, member_id): repl = self[repl_id] result = repl.member_del(member_id) self[repl_id] = repl return result
remove member from replica set (reconfig replica) Args: repl_id - replica set identity member_id - member index
juraj-google-style