code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def RunPlugins( cls, artifacts_registry, file_system, mount_point, knowledge_base): searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point) cls.CollectFromFileSystem( artifacts_registry, knowledge_base, searcher, file_system) environment_variables = None if knowledge_base: environment_variables = knowledge_base.GetEnvironmentVariables() registry_file_reader = FileSystemWinRegistryFileReader( file_system, mount_point, environment_variables=environment_variables) win_registry = dfwinreg_registry.WinRegistry( registry_file_reader=registry_file_reader) searcher = registry_searcher.WinRegistrySearcher(win_registry) cls.CollectFromWindowsRegistry( artifacts_registry, knowledge_base, searcher) cls.CollectFromKnowledgeBase(knowledge_base) if not knowledge_base.HasUserAccounts(): logger.warning('Unable to find any user accounts on the system.')
Runs the preprocessing plugins. Args: artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts definitions registry. file_system (dfvfs.FileSystem): file system to be preprocessed. mount_point (dfvfs.PathSpec): mount point path specification that refers to the base location of the file system. knowledge_base (KnowledgeBase): to fill with preprocessing information.
juraj-google-style
def get_channel_id(turn_context: TurnContext) -> str: if (turn_context.activity.channel_id is None): return '' else: return turn_context.activity.channel_id
Get the Channel Id from the current Activity on the Turn Context. Args: turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from. Returns: str: The Channel Id from the Turn Context's Activity.
codesearchnet
def convert_snapshot(self, shift, instruction): command_dict = { 'name': 'snapshot', 't0': shift+instruction.start_time, 'label': instruction.name, 'type': instruction.type } return self._qobj_model(**command_dict)
Return converted `Snapshot`. Args: shift(int): Offset time. instruction (Snapshot): snapshot instruction. Returns: dict: Dictionary of required parameters.
juraj-google-style
def __format_error(self, error_list_tag): error = {'domain': self.domain(), 'reason': self.reason(), 'message': self.message()} error.update(self.extra_fields() or {}) return {'error': {error_list_tag: [error], 'code': self.status_code(), 'message': self.message()}}
Format this error into a JSON response. Args: error_list_tag: A string specifying the name of the tag to use for the error list. Returns: A dict containing the reformatted JSON error response.
juraj-google-style
def _ParseCmdItem(self, cmd_input, template_file=None): fsm = textfsm.TextFSM(template_file) if (not self._keys): self._keys = set(fsm.GetValuesByAttrib('Key')) table = texttable.TextTable() table.header = fsm.header for record in fsm.ParseText(cmd_input): table.Append(record) return table
Creates Texttable with output of command. Args: cmd_input: String, Device response. template_file: File object, template to parse with. Returns: TextTable containing command output. Raises: CliTableError: A template was not found for the given command.
codesearchnet
def _ParseShellItemPathSegment(self, shell_item): path_segment = None if isinstance(shell_item, pyfwsi.root_folder): description = shell_folder_ids.DESCRIPTIONS.get(shell_item.shell_folder_identifier, None) if description: path_segment = description else: path_segment = '{{{0:s}}}'.format(shell_item.shell_folder_identifier) path_segment = '<{0:s}>'.format(path_segment) elif isinstance(shell_item, pyfwsi.volume): if shell_item.name: path_segment = shell_item.name elif shell_item.identifier: path_segment = '{{{0:s}}}'.format(shell_item.identifier) elif isinstance(shell_item, pyfwsi.file_entry): long_name = '' for extension_block in shell_item.extension_blocks: if isinstance(extension_block, pyfwsi.file_entry_extension): long_name = extension_block.long_name if long_name: path_segment = long_name elif shell_item.name: path_segment = shell_item.name elif isinstance(shell_item, pyfwsi.network_location): if shell_item.location: path_segment = shell_item.location if ((path_segment is None) and (shell_item.class_type == 0)): pass if (path_segment is None): path_segment = '<UNKNOWN: 0x{0:02x}>'.format(shell_item.class_type) return path_segment
Parses a shell item path segment. Args: shell_item (pyfwsi.item): shell item. Returns: str: shell item path segment.
codesearchnet
def GetRequestXML(self, method, *args): packed_args = self._PackArguments(method, args, set_type_attrs=True) headers = self._GetZeepFormattedSOAPHeaders() return self.zeep_client.create_message( self.zeep_client.service, method, *packed_args, _soapheaders=headers)
Get the raw SOAP XML for a request. Args: method: The method name. *args: A list of arguments to be passed to the method. Returns: An element containing the raw XML that would be sent as the request.
juraj-google-style
def __init__(self, parent_xid, relationship): self.xid = str(uuid.uuid4()) self._action_data = { 'indicatorXid': self.xid, 'relationship': relationship, 'parentIndicatorXid': parent_xid, } self._children = []
Initialize Class Properties. .. warning:: This code is not complete and may require some update to the API. Args: parent_xid (str): The external id of the parent Indicator. relationship: ???
juraj-google-style
def start_at(self, document_fields): query = query_mod.Query(self) return query.start_at(document_fields)
Start query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.start_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor.
codesearchnet
def _AddCampaignsToGroup(client, campaign_group_id, campaign_ids): campaign_service = client.GetService('CampaignService', version='v201809') operations = [{'operator': 'SET', 'operand': {'id': campaign_id, 'campaignGroupId': campaign_group_id}} for campaign_id in campaign_ids] campaign_service.mutate(operations) print(('The following campaign IDs were added to the campaign group with ID "%d":\n\t%s' % (campaign_group_id, campaign_ids)))
Adds multiple campaigns to a campaign group. Args: client: an AdWordsClient instance. campaign_group_id: an integer ID for the campaign group. campaign_ids: a list of integer IDs for campaigns.
codesearchnet
def check(self, digest): path = self.get_file_path(digest) if (self._calc_digest(path) != digest): self.logger.warning("found corrupted file: '{0}'".format(path)) return False return True
Check the integrity of the file with the given digest Args: digest -- digest of the file to check Returns: True if the file is not corrupted
codesearchnet
def children(self, sourcepath, recursive=True): return self._get_recursive_dependancies(self._CHILDREN_MAP, sourcepath, recursive=True)
Recursively find all children that are imported from the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path.
codesearchnet
def create_constructor_args(cls, proto_list: List[fra.ForwardRateAgreement], config: ForwardRateAgreementConfig=None) -> Dict[str, Any]: fra_data = proto_utils.from_protos_v2(proto_list, config) res = {} for key in fra_data: tensor_repr = proto_utils.tensor_repr(fra_data[key]) res[key] = tensor_repr return res
Creates a dictionary to initialize ForwardRateAgreement. The output dictionary is such that the instruments can be initialized as follows: ``` initializer = create_constructor_args(proto_list, config) fras = [ForwardRateAgreement(**data) for data in initializer.values()] ``` The keys of the output dictionary are unique identifiers of the batched instruments. This is useful for identifying an existing graph that could be reused for the instruments without the need of rebuilding the graph. Args: proto_list: A list of protos for which the initialization arguments are constructed. config: An instance of `ForwardRateAgreementConfig`. Returns: A possibly nested dictionary such that each value provides initialization arguments for the ForwardRateAgreement.
github-repos
def pull_release(self, name, version, destfolder='.', force=False): unique_id = name.replace('/', '_') depdict = {'name': name, 'unique_id': unique_id, 'required_version': version, 'required_version_string': str(version)} destdir = os.path.join(destfolder, unique_id) if os.path.exists(destdir): if (not force): raise ExternalError('Output directory exists and force was not specified, aborting', output_directory=destdir) shutil.rmtree(destdir) result = self.update_dependency(None, depdict, destdir) if (result != 'installed'): raise ArgumentError('Could not find component to satisfy name/version combination')
Download and unpack a released iotile component by name and version range If the folder that would be created already exists, this command fails unless you pass force=True Args: name (string): The name of the component to download version (SemanticVersionRange): The valid versions of the component to fetch destfolder (string): The folder into which to unpack the result, defaults to the current working directory force (bool): Forcibly overwrite whatever is currently in the folder that would be fetched. Raises: ExternalError: If the destination folder exists and force is not specified ArgumentError: If the specified component could not be found with the required version
codesearchnet
def run(self, args): jlink = self.create_jlink(args) erased = jlink.erase() print(('Bytes Erased: %d' % erased))
Erases the device connected to the J-Link. Args: self (EraseCommand): the ``EraseCommand`` instance args (Namespace): the arguments passed on the command-line Returns: ``None``
codesearchnet
def write_float(self, registeraddress, value, numberOfRegisters=2): _checkNumerical(value, description='input value') _checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers') self._genericCommand(16, registeraddress, value, numberOfRegisters=numberOfRegisters, payloadformat='float')
Write a floating point number to the slave. Floats are stored in two or more consecutive 16-bit registers in the slave. Uses Modbus function code 16. For discussion on precision, number of registers and on byte order, see :meth:`.read_float`. Args: * registeraddress (int): The slave register start address (use decimal numbers, not hex). * value (float or int): The value to store in the slave * numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4. Returns: None Raises: ValueError, TypeError, IOError
codesearchnet
def get_membership(self, uuid=None): group_id = self.get_group_id(uuid=uuid) uri = 'group/{group_id}/member' mbr_data = self.get(uri.format(group_id=group_id), params=None) return mbr_data
Get membership data based on uuid. Args: uuid (str): optional uuid. defaults to self.cuuid Raises: PyLmodUnexpectedData: No data was returned. requests.RequestException: Exception connection error Returns: dict: membership json
juraj-google-style
def _execute_and_process_stdout(self, args, shell, handler) -> bytes: proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, bufsize=1) out = '[elided, processed via handler]' try: while True: line = proc.stdout.readline() if line: handler(line) else: break finally: unexpected_out, err = proc.communicate() if unexpected_out: out = '[unexpected stdout] %s' % unexpected_out for line in unexpected_out.splitlines(): handler(line) ret = proc.returncode logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret) if ret == 0: return err else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
Executes adb commands and processes the stdout with a handler. Args: args: string or list of strings, program arguments. See subprocess.Popen() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Popen() docs. handler: func, a function to handle adb stdout line by line. Returns: The stderr of the adb command run if exit code is 0. Raises: AdbError: The adb command exit code is not 0.
github-repos
def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None): if not isinstance(rdf_value, cls.rdf_type): raise ValueError("This collection only accepts values of type %s." % cls.rdf_type.__name__) if mutation_pool is None: raise ValueError("Mutation pool can't be none.") timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch() if not isinstance(queue_urn, rdfvalue.RDFURN): queue_urn = rdfvalue.RDFURN(queue_urn) mutation_pool.QueueAddItem(queue_urn, rdf_value, timestamp)
Adds an rdf value the queue. Adds an rdf value to a queue. Does not require that the queue be locked, or even open. NOTE: The caller is responsible for ensuring that the queue exists and is of the correct type. Args: queue_urn: The urn of the queue to add to. rdf_value: The rdf value to add to the queue. mutation_pool: A MutationPool object to write to. Raises: ValueError: rdf_value has unexpected type.
juraj-google-style
def __init__(self, hash_queue, hash_analysis_queue, **kwargs): super(NsrlsvrAnalyzer, self).__init__( hash_queue, hash_analysis_queue, **kwargs) self._host = None self._port = None self.hashes_per_batch = 100
Initializes an nsrlsvr analyzer thread. Args: hash_queue (Queue.queue): contains hashes to be analyzed. hash_analysis_queue (Queue.queue): that the analyzer will append HashAnalysis objects this queue.
juraj-google-style
def search(self, search_phrase, limit=None): query, query_params = self._make_query_from_terms(search_phrase, limit=limit) self._parsed_query = (str(query), query_params) assert isinstance(query, TextClause) datasets = {} def make_result(vid=None, b_score=0, p_score=0): res = DatasetSearchResult() res.b_score = b_score res.p_score = p_score res.partitions = set() res.vid = vid return res if query_params: results = self.execute(query, **query_params) for result in results: vid, dataset_score = result datasets[vid] = make_result(vid, b_score=dataset_score) logger.debug('Extending datasets with partitions.') for partition in self.backend.partition_index.search(search_phrase): if partition.dataset_vid not in datasets: datasets[partition.dataset_vid] = make_result(partition.dataset_vid) datasets[partition.dataset_vid].p_score += partition.score datasets[partition.dataset_vid].partitions.add(partition) return list(datasets.values())
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
juraj-google-style
def set(self, time): self._time = time self._pb.sec = int(self._time) self._pb.nsec = int((self._time - self._pb.sec) * 10 ** 9)
Sets time in seconds since Epoch Args: time (:obj:`float`): time in seconds since Epoch (see time.time()) Returns: None
juraj-google-style
def IsLinearOutputModule(cls, name): name = name.lower() output_class = cls._output_classes.get(name, None) if (not output_class): output_class = cls._disabled_output_classes.get(name, None) if output_class: return issubclass(output_class, interface.LinearOutputModule) return False
Determines if a specific output class is a linear output module. Args: name (str): name of the output module. Returns: True: if the output module is linear.
codesearchnet
def parse_func_attrs(attributes, allowlist=None): if not allowlist: allowlist = MONOMORPHIC_FUNCTION_ALLOWLIST attrs = {} for key, value in attributes.items(): if key not in allowlist: raise ValueError(f'Allowlist does not support `{key}` as an attribute.') attrs[key] = _parse_func_attr_value(key, value) return attrs
Convert the keyword arguments into function_def attributes. Currently only support primitive types: bool, int, float and string. Args: attributes: the dictionary of attributes. allowlist: set of attribute names allowed. Returns: A dict of attributes where the key is the name of attribute and the value is the AttrValue proto. Raises: ValueError: If the kwargs contains unallowlisted name or unsupported value types.
github-repos
def remove(package_name): if package_name not in packages: raise HolodeckException("Unknown package name " + package_name) for config, path in _iter_packages(): if config["name"] == package_name: shutil.rmtree(path)
Removes a holodeck package. Args: package_name (str): the name of the package to remove
juraj-google-style
def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TangoAndroidMessageEventData() event_data.message_identifier = self._GetRowValue( query_hash, row, 'msg_id') event_data.direction = self._GetRowValue(query_hash, row, 'direction') timestamp = self._GetRowValue(query_hash, row, 'create_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'send_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_SENT) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a message row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
juraj-google-style
def write_edges(edges: Mapping[(str, Any)], filename: str, jsonlines: bool=False, gzipflag: bool=False, yaml: bool=False): pass
Write edges to file Args: edges (Mapping[str, Any]): in edges JSON Schema format filename (str): filename to write jsonlines (bool): output in JSONLines format? gzipflag (bool): create gzipped file? yaml (bool): create yaml file?
codesearchnet
def isdir(self, path, follow_symlinks=True): return self._is_of_type(path, S_IFDIR, follow_symlinks)
Determine if path identifies a directory. Args: path: Path to filesystem object. Returns: `True` if path points to a directory (following symlinks). Raises: TypeError: if path is None.
codesearchnet
def patch_fromText(self, textline): if (type(textline) == unicode): textline = textline.encode('ascii') patches = [] if (not textline): return patches text = textline.split('\n') while (len(text) != 0): m = re.match('^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$', text[0]) if (not m): raise ValueError(('Invalid patch string: ' + text[0])) patch = patch_obj() patches.append(patch) patch.start1 = int(m.group(1)) if (m.group(2) == ''): patch.start1 -= 1 patch.length1 = 1 elif (m.group(2) == '0'): patch.length1 = 0 else: patch.start1 -= 1 patch.length1 = int(m.group(2)) patch.start2 = int(m.group(3)) if (m.group(4) == ''): patch.start2 -= 1 patch.length2 = 1 elif (m.group(4) == '0'): patch.length2 = 0 else: patch.start2 -= 1 patch.length2 = int(m.group(4)) del text[0] while (len(text) != 0): if text[0]: sign = text[0][0] else: sign = '' line = urllib.unquote(text[0][1:]) line = line.decode('utf-8') if (sign == '+'): patch.diffs.append((self.DIFF_INSERT, line)) elif (sign == '-'): patch.diffs.append((self.DIFF_DELETE, line)) elif (sign == ' '): patch.diffs.append((self.DIFF_EQUAL, line)) elif (sign == '@'): break elif (sign == ''): pass else: raise ValueError(("Invalid patch mode: '%s'\n%s" % (sign, line))) del text[0] return patches
Parse a textual representation of patches and return a list of patch objects. Args: textline: Text representation of patches. Returns: Array of Patch objects. Raises: ValueError: If invalid input.
codesearchnet
def _build(self): if ('w' not in self._initializers): stddev = (1 / math.sqrt(np.prod(self._shape))) self._initializers['w'] = tf.truncated_normal_initializer(stddev=stddev) self._w = tf.get_variable('w', shape=self._shape, dtype=self._dtype, initializer=self._initializers['w'], partitioner=self._partitioners.get('w', None), regularizer=self._regularizers.get('w', None)) return self._w
Connects the TrainableTensor module into the graph. Returns: A Tensor of shape as determined in the constructor.
codesearchnet
def _update_token(self, request): self._source_credentials.refresh(request) body = { "delegates": self._delegates, "scope": self._target_scopes, "lifetime": str(self._lifetime) + "s" } headers = { 'Content-Type': 'application/json', } self._source_credentials.apply(headers) self.token, self.expiry = _make_iam_token_request( request=request, principal=self._target_principal, headers=headers, body=body)
Updates credentials with a new access_token representing the impersonated account. Args: request (google.auth.transport.requests.Request): Request object to use for refreshing credentials.
juraj-google-style
def __init__(self, full_shape, var_offset): if not isinstance(full_shape, (list, tuple)): raise TypeError('`full_shape` must be a sequence (like tuple or list) instead of ' + type(full_shape).__name__) if not isinstance(var_offset, (list, tuple)): raise TypeError('`var_offset` must be a sequence (like tuple or list) instead of ' + type(var_offset).__name__) if len(var_offset) != len(full_shape): raise ValueError('Expected equal length, but `var_offset` is of length {} while full_shape is of length {}.'.format(len(var_offset), len(full_shape))) for offset, shape in zip(var_offset, full_shape): if offset < 0 or offset >= shape: raise ValueError('Expected 0 <= offset < shape but found offset={}, shape={} for var_offset={}, full_shape={}'.format(offset, shape, var_offset, full_shape)) self._full_shape = full_shape self._var_offset = var_offset
Constructor. Args: full_shape: Tuple or list of `int` indicating the full combined shape of the partitioned variables. var_offset: Tuple or list of `int` specifying offset of this partition with respect to the full variable for each dimension. Raises: TypeError: If `full_shape` or `var_offset` is not a sequence. ValueError: If `full_shape` or `var_offset` differ in length. If `var_offset` exceeds `full_shape` in any dimension.
github-repos
def label(self): with self.selenium.context(self.selenium.CONTEXT_CHROME): return self.root.get_attribute('label')
Provide access to the notification label. Returns: str: The notification label
codesearchnet
def get_imagery(cls, lat, lon, date=None, dim=None, cloud_score=False): instance = cls('planetary/earth/imagery') filters = { 'lat': lat, 'lon': lon, 'date': date, 'dim': dim, 'cloud_score': cloud_score } return instance.get_resource(**filters)
Returns satellite image Args: lat: latitude float lon: longitude float date: date instance of available date from `get_assets` dim: width and height of image in degrees as float cloud_score: boolean to calculate the percentage of the image covered by clouds Returns: json
juraj-google-style
def get_extra_managed_storage_volume_paths(self, start=0, count=(- 1), filter='', sort=''): uri = (self.URI + '/repair?alertFixType=ExtraManagedStorageVolumePaths') return self._client.get_all(start, count, filter=filter, sort=sort, uri=uri)
Gets the list of extra managed storage volume paths. Args: start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response might differ from the requested count if the sum of start and count exceeds the total number of items. filter (list or str): A general filter/query string to narrow the list of items returned. The default is no filter; all resources are returned. sort: The sort order of the returned data set. By default, the sort order is based on create time with the oldest entry first. Returns: list: A list of extra managed storage volume paths.
codesearchnet
def build(self, var_list): if self.built: return if var_list: dtype = var_list[0].dtype else: dtype = backend.floatx() super().build(var_list) self._momentums, self._velocities = self.add_optimizer_variables(var_list, ['momentum', 'velocity']) self._u_product = backend.Variable(1.0, dtype=dtype)
Initialize optimizer variables. Nadam optimizer has 2 types of variables: momentums and velocities. Args: var_list: list of model variables to build Nadam variables on.
github-repos
def make_basket_put_payoff(strikes: types.RealTensor, dtype: tf.DType=None, name: str=None) -> Callable[[types.RealTensor], types.RealTensor]: name = name or 'put_valuer' with tf.name_scope(name): strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes') dtype = dtype or strikes.dtype put_valuer = functools.partial(_put_valuer, strikes=strikes, dtype=dtype) return put_valuer
Produces a callable from samples to payoff of a simple basket put option. Args: strikes: A `Tensor` of `dtype` consistent with `samples` and shape `[num_samples, batch_size]`. dtype: Optional `dtype`. Either `tf.float32` or `tf.float64`. If supplied, represents the `dtype` for the 'strikes' as well as for the input argument of the output payoff callable. Default value: `None`, which means that the `dtype` inferred from `strikes` is used. name: Python `str` name prefixed to Ops created by the callable created by this function. Default value: `None` which is mapped to the default name 'put_valuer' Returns: A callable from `Tensor` of shape `[batch_size, num_samples, num_exercise_times, dim]` and a scalar `Tensor` representing current time to a `Tensor` of shape `[num_samples, batch_size]`.
github-repos
def wait_for_import(self, connection_id, wait_interval): self.stdout.write(self.style.NOTICE('Waiting for import'), ending='') state = utils.ConnectionStates.IMPORT_CONFIGURATION while (state == utils.ConnectionStates.IMPORT_CONFIGURATION): self.stdout.write(self.style.NOTICE('.'), ending='') time.sleep(wait_interval) try: connection = utils.get_connection(connection_id) except requests.HTTPError as e: raise CommandError('Failed to fetch connection information.') from e else: state = connection['state'] self.stdout.write(self.style.NOTICE(' Done!'))
Wait until connection state is no longer ``IMPORT_CONFIGURATION``. Args: connection_id (str): Heroku Connect connection to monitor. wait_interval (int): How frequently to poll in seconds. Raises: CommandError: If fetch connection information fails.
codesearchnet
def can_process_matrix(entry, matrix_tags): if (len(matrix_tags) == 0): return True count = 0 if ('tags' in entry): for tag in matrix_tags: if (tag in entry['tags']): count += 1 return (count > 0)
Check given matrix tags to be in the given list of matric tags. Args: entry (dict): matrix item (in yaml). matrix_tags (list): represents --matrix-tags defined by user in command line. Returns: bool: True when matrix entry can be processed.
codesearchnet
def _ParseHeader(self, parser_mediator, structure): _, month, day, hours, minutes, seconds, year = structure.date_time month = timelib.MONTH_DICT.get(month.lower(), 0) time_elements_tuple = (year, month, day, hours, minutes, seconds) try: date_time = dfdatetime_time_elements.TimeElements( time_elements_tuple=time_elements_tuple) date_time.is_local_time = True except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid date time value: {0!s}'.format(structure.date_time)) return self._last_month = month event_data = XChatLogEventData() if structure.log_action[0] == 'BEGIN': self._xchat_year = year event_data.text = 'XChat start logging' elif structure.log_action[0] == 'END': self._xchat_year = None event_data.text = 'XChat end logging' else: logger.debug('Unknown log action: {0:s}.'.format( ' '.join(structure.log_action))) return event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_ADDED, time_zone=parser_mediator.timezone) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a log header. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file.
juraj-google-style
def softplus(x): if any_symbolic_tensors((x,)): return Softplus().symbolic_call(x) return backend.nn.softplus(x)
Softplus activation function. It is defined as `f(x) = log(exp(x) + 1)`, where `log` is the natural logarithm and `exp` is the exponential function. Args: x: Input tensor. Returns: A tensor with the same shape as `x`. Example: >>> x = keras.ops.convert_to_tensor([-0.555, 0.0, 0.555]) >>> keras.ops.softplus(x) array([0.45366603, 0.6931472, 1.008666], dtype=float32)
github-repos
def get_data_dirs(__pkg: str) -> List[str]: dirs = [user_data(__pkg), ] dirs.extend(path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_DATA_DIRS', '/usr/local/share/:/usr/share/').split(':')) return [d for d in dirs if path.isdir(d)]
Return all data directories for given package. Args: __pkg: Package name
juraj-google-style
def get_object(self, dn, filter, attributes, _connection=None): connection = _connection if (not connection): connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD')) connection.bind() connection.search(search_base=dn, search_filter=filter, attributes=attributes) data = None if (len(connection.response) > 0): data = connection.response[0]['attributes'] data['dn'] = connection.response[0]['dn'] if (not _connection): self.destroy_connection(connection) return data
Gets an object at the specified dn and returns it. Args: dn (str): The dn of the object to find. filter (str): The LDAP syntax search filter. attributes (list): A list of LDAP attributes to get when searching. _connection (ldap3.Connection): A connection object to use when searching. If not given, a temporary connection will be created, and destroyed after use. Returns: dict: A dictionary of the object info from LDAP
codesearchnet
def output_csv(filehandle: TextIO, values: Iterable[str]) -> None: line = ','.join(values) filehandle.write((line + '\n'))
Write a line of CSV. POOR; does not escape things properly. DEPRECATED. Args: filehandle: file to write to values: values
codesearchnet
def squeeze(input: ragged_tensor.Ragged, axis=None, name=None): with ops.name_scope(name, 'RaggedSqueeze', [input]): input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input) if isinstance(input, tensor.Tensor): return array_ops.squeeze(input, axis, name) if axis is None: raise ValueError('Ragged.squeeze must have an axis argument.') if isinstance(axis, int): axis = [axis] elif not isinstance(axis, (list, tuple)) or not all((isinstance(d, int) for d in axis)): raise TypeError('Axis must be a list or tuple of integers.') dense_dims = [] ragged_dims = [] axis = [array_ops.get_positive_axis(d, input.shape.ndims, 'axis[%d]' % i, 'rank(input)') for i, d in enumerate(axis)] for dim in axis: if dim > input.ragged_rank: dense_dims.append(dim - input.ragged_rank) else: ragged_dims.append(dim) assertion_list = [] scalar_tensor_one = constant_op.constant(1, dtype=input.row_splits.dtype) for i, r in enumerate(input.nested_row_lengths()): if i + 1 in ragged_dims: assertion_list.append(control_flow_assert.Assert(math_ops.reduce_all(math_ops.equal(r, scalar_tensor_one)), ['the given axis (axis = %d) is not squeezable!' % (i + 1)])) if 0 in ragged_dims: scalar_tensor_two = constant_op.constant(2, dtype=dtypes.int32) assertion_list.append(control_flow_assert.Assert(math_ops.equal(array_ops.size(input.row_splits), scalar_tensor_two), ['the given axis (axis = 0) is not squeezable!'])) squeezed_rt = None squeezed_rt = control_flow_ops.with_dependencies(assertion_list, input.flat_values) if dense_dims: squeezed_rt = array_ops.squeeze(squeezed_rt, dense_dims) remaining_row_splits = [] remaining_row_splits = list() for i, row_split in enumerate(input.nested_row_splits): if i + 1 not in ragged_dims: remaining_row_splits.append(row_split) if remaining_row_splits and 0 in ragged_dims: remaining_row_splits.pop(0) squeezed_rt = RaggedTensor.from_nested_row_splits(squeezed_rt, remaining_row_splits) if set(range(0, input.ragged_rank + 1)).issubset(set(ragged_dims)): squeezed_rt = array_ops.squeeze(squeezed_rt, [0], name) return squeezed_rt
Ragged compatible squeeze. If `input` is a `tf.Tensor`, then this calls `tf.squeeze`. If `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time, where `N` is the number of elements in the squeezed dimensions. Args: input: A potentially ragged tensor. The input to squeeze. axis: An optional list of ints. Defaults to `None`. If the `input` is ragged, it only squeezes the dimensions listed. It fails if `input` is ragged and axis is []. If `input` is not ragged it calls tf.squeeze. Note that it is an error to squeeze a dimension that is not 1. It must be in the range of [-rank(input), rank(input)). name: A name for the operation (optional). Returns: A potentially ragged tensor. Contains the same data as input, but has one or more dimensions of size 1 removed.
github-repos
def sample(self, qubits: List[ops.Qid], repetitions: int=1): return self._stepper.sample_measurements(indices=[self.qubit_map[q] for q in qubits], repetitions=repetitions)
Samples from the wave function at this point in the computation. Note that this does not collapse the wave function. Returns: Measurement results with True corresponding to the `|1>` state. The outer list is for repetitions, and the inner corresponds to measurements ordered by the supplied qubits.
codesearchnet
def fill(self, background_shape, img): background_shape = tuple(background_shape) return self._fill(background_shape, img)
Return a proper background image of background_shape, given img. Args: background_shape (tuple): a shape (h, w) img: an image Returns: a background image
juraj-google-style
def _get_lp_matrix(spin_states, nodes, edges, offset_weight, gap_weight): if len(spin_states) == 0: return None n_states = len(spin_states) m_linear = len(nodes) m_quadratic = len(edges) matrix = np.empty((n_states, m_linear + m_quadratic + 2)) if spin_states.ndim == 1: spin_states = np.expand_dims(spin_states, 1) matrix[:, :m_linear] = spin_states node_indices = dict(zip(nodes, range(m_linear))) for j, (u, v) in enumerate(edges): u_ind = node_indices[u] v_ind = node_indices[v] matrix[:, j + m_linear] = np.multiply(matrix[:, u_ind], matrix[:, v_ind]) matrix[:, -2] = offset_weight matrix[:, -1] = gap_weight return matrix
Creates an linear programming matrix based on the spin states, graph, and scalars provided. LP matrix: [spin_states, corresponding states of edges, offset_weight, gap_weight] Args: spin_states: Numpy array of spin states nodes: Iterable edges: Iterable of tuples offset_weight: Numpy 1-D array or number gap_weight: Numpy 1-D array or a number
juraj-google-style
def __init__(self, parameters, confirms=True): self.confirms = confirms self.protocol = FedoraMessagingProtocolV2 self._parameters = parameters self._client_deferred = defer.Deferred() self._client = None self._consumers = {}
Create a new factory for protocol objects. Any exchanges, queues, or bindings provided here will be declared and set up each time a new protocol instance is created. In other words, each time a new connection is set up to the broker, it will start with the declaration of these objects. Args: parameters (pika.ConnectionParameters): The connection parameters. confirms (bool): If true, attempt to turn on publish confirms extension.
juraj-google-style
def LSTMCell(weights, m_prev, c_prev, x, pad): xm = array_ops.concat([x, m_prev], 1) xmw = math_ops.matmul(xm, weights) in_value, in_gate, forget_gate, out_gate = array_ops.split(value=xmw, num_or_size_splits=4, axis=1) in_value = math_ops.tanh(in_value) in_gate = math_ops.sigmoid(in_gate) forget_gate = math_ops.sigmoid(forget_gate) out_gate = math_ops.sigmoid(out_gate) c_next = Clip(Clip(forget_gate * c_prev) + Clip(in_gate * in_value)) m_next = Clip(out_gate * c_next) c_next = c_prev * pad + c_next * (1.0 - pad) m_next = m_prev * pad + m_next * (1.0 - pad) return (m_next, c_next)
Unrolls a single LSTM cell with clipped activations forward by one step. Args: weights: Weight matrix with shape LSTMCellWeightsShape. m_prev: Previous m states with shape [batch_size, num_nodes]. c_prev: Previous c states with shape [batch_size, num_nodes]. x: Input with shape [batch_size, num_inputs]. pad: Padding with shape [batch_size, 1]. Each padding value is either 0 or 1, where 1 indicates padding; i.e. the input is shorter than the sequence length, and the (m, c) states should simply be passed through from the previous states. Returns: The next (m, c) states, each with shape [batch_size, num_nodes].
github-repos
def mark_as_done(self, **kwargs): path = ('%s/%s/mark_as_done' % (self.manager.path, self.id)) server_data = self.manager.gitlab.http_post(path, **kwargs) self._update_attrs(server_data)
Mark the todo as done. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTodoError: If the server failed to perform the request
codesearchnet
def get_all_configs(): all_functions = collections.OrderedDict([('Platform', get_platform()), ('CPU', get_cpu_type()), ('CPU arch', get_cpu_arch()), ('Distribution', get_distrib()), ('Distribution version', get_distrib_version()), ('GPU', get_gpu_type()[1]), ('GPU count', get_gpu_count()), ('CUDA version (default)', get_cuda_version_default()), ('CUDA versions (all)', get_cuda_version_all()), ('CUDA compute capability', get_cuda_compute_capability(get_gpu_type()[1])), ('cuDNN version', get_cudnn_version()), ('GCC version', get_gcc_version()), ('Python version (default)', get_python_version()), ('GNU C Lib (glibc) version', get_glibc_version()), ('libstdc++ version', get_libstdcpp_version()), ('CPU ISA (min requirement)', get_cpu_isa_version())]) configs_found = [] json_data = {} missing = [] warning = [] for config, call_func in all_functions.items(): ret_val = call_func if not ret_val: configs_found.append([config, '\x1b[91m\x1b[1mMissing\x1b[0m']) missing.append([config]) json_data[config] = '' elif ret_val == 'unknown': configs_found.append([config, '\x1b[93m\x1b[1mUnknown type\x1b[0m']) warning.append([config, ret_val]) json_data[config] = 'unknown' elif 'ISA' in config: if not ret_val[1]: configs_found.append([config, ret_val[0]]) json_data[config] = ret_val[0] else: configs_found.append([config, '\x1b[91m\x1b[1mMissing ' + str(ret_val[1][1:-1]) + '\x1b[0m']) missing.append([config, '\n\t=> Found %s but missing %s' % (str(ret_val[0]), str(ret_val[1]))]) json_data[config] = ret_val[0] else: configs_found.append([config, ret_val]) json_data[config] = ret_val return (configs_found, missing, warning, json_data)
Runs all functions for detecting user machine configurations. Returns: Tuple (List of all configurations found, List of all missing configurations, List of all configurations found with warnings, Dict of all configurations)
github-repos
def read_message(self, timeout): with self._reader_lock: raw_header = self._transport.read(struct.calcsize(AdbMessage.HEADER_STRUCT_FORMAT), timeout.remaining_ms) if (not raw_header): raise usb_exceptions.AdbProtocolError('Adb connection lost') try: raw_message = RawAdbMessage(*struct.unpack(AdbMessage.HEADER_STRUCT_FORMAT, raw_header)) except struct.error as exception: raise usb_exceptions.AdbProtocolError('Unable to unpack ADB command (%s): %s (%s)', AdbMessage.HEADER_STRUCT_FORMAT, raw_header, exception) if (raw_message.data_length > 0): if timeout.has_expired(): _LOG.warning('Timed out between AdbMessage header and data, reading data anyway with 10ms timeout') timeout = timeouts.PolledTimeout.from_millis(10) data = self._transport.read(raw_message.data_length, timeout.remaining_ms) else: data = '' return raw_message.to_adb_message(data)
Read an AdbMessage from this transport. Args: timeout: Timeout for the entire read operation, in the form of a timeouts.PolledTimeout instance. Note that for packets with a data payload, two USB reads are performed. Returns: The ADB message read from the device. Raises: UsbReadFailedError: There's an error during read, including timeout. AdbProtocolError: A message is incorrectly formatted. AdbTimeoutError: timeout is already expired, or expires before we read the entire message, specifically between reading header and data packets.
codesearchnet
def _to_tensor(x, dtype): return tensor_conversion.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
Convert the input `x` to a tensor of type `dtype`. Args: x: An object to be converted (numpy array, list, tensors). dtype: The destination type. Returns: A tensor.
github-repos
def format_error_message(exception_message, task_exception=False): lines = exception_message.split("\n") if task_exception: lines = lines[0:1] + lines[3:] pass return "\n".join(lines)
Improve the formatting of an exception thrown by a remote function. This method takes a traceback from an exception and makes it nicer by removing a few uninformative lines and adding some space to indent the remaining lines nicely. Args: exception_message (str): A message generated by traceback.format_exc(). Returns: A string of the formatted exception message.
juraj-google-style
def robust_zscore(mat, ctrl_mat=None, min_mad=0.1): if ctrl_mat is not None: medians = ctrl_mat.median(axis=1) median_devs = abs(ctrl_mat.subtract(medians, axis=0)) else: medians = mat.median(axis=1) median_devs = abs(mat.subtract(medians, axis=0)) sub = mat.subtract(medians, axis='index') mads = median_devs.median(axis=1) mads = mads.clip(lower=min_mad) zscore_df = sub.divide(mads * 1.4826, axis='index') return zscore_df.round(rounding_precision)
Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data
juraj-google-style
def Equals(self, other): if (other is None): return False if ((other.PrevHash.ToBytes() == self.PrevHash.ToBytes()) and (other.PrevIndex == self.PrevIndex)): return True return False
Test for equality. Args: other (obj): Returns: bool: True `other` equals self.
codesearchnet
def decode(self, ids): (_, tmp_file_path) = tempfile.mkstemp() wavfile.write(tmp_file_path, self._sample_rate, np.asarray(ids)) return tmp_file_path
Transform a sequence of float32 into a waveform. Args: ids: list of integers to be converted. Returns: Path to the temporary file where the waveform was saved. Raises: ValueError: if the ids are not of the appropriate size.
codesearchnet
def grid_destroy_from_ids(oargrid_jobids): jobs = grid_reload_from_ids(oargrid_jobids) for job in jobs: job.delete() logger.info(('Killing the jobs %s' % oargrid_jobids))
Destroy all the jobs with corresponding ids Args: oargrid_jobids (list): the ``(site, oar_job_id)`` list of tuple identifying the jobs for each site.
codesearchnet
def _parallel_part_functions(fns: Sequence[PartWithMatchFn], part: _T, with_default_output: bool=False, with_always_output: bool=False) -> AsyncIterable[_T]: c_iters = [_eager_run_fn(fn, part) for fn, match_fn in fns if match_fn(part)] async def result_iter(): has_output = False for c_iter in c_iters: async for c in c_iter: has_output = True yield c if with_always_output or (not has_output and with_default_output): yield part return result_iter()
Executes each part function in a sequence of part functions concurrently. This method is similar to `_chain_part_functions` except that all of the PartFns are exectued on exactly `part` instead of being chained together. The resulting AsyncIterables returned by call each fn are concatenated together in the provided fns order. This must be called called in an async context. It immediately schedules tasks on the event loop to execute each fn in fns on on the part. Args: fns: the part functions to execute on the part. part: the part to execute the function on. with_default_output: When True if the resulting Iterable is empty `part` will be yielded. with_always_output: When True the input part will be yielded regardless of the output of the fns. This is a stronger condition than `with_default_output`. When `with_always_output` is True, `with_default_output` is basically ignored. Returns: An AsyncIterable that can be used to retrieve the results. NOTE: this method is non-blocking.
github-repos
def init(images, num_channels, dim='2d', stride=2, kernel_size=7, maxpool=True, training=True, scope='init'): conv = CONFIG[dim]['conv'] pool = CONFIG[dim]['max_pool'] with tf.variable_scope(scope): net = conv(images, num_channels, kernel_size, strides=stride, padding='SAME', activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) if maxpool: net = pool(net, pool_size=3, strides=stride) (x1, x2) = tf.split(net, 2, axis=CONFIG[dim]['split_axis']) return (x1, x2)
Standard ResNet initial block used as first RevNet block. Args: images: [N, H, W, 3] tensor of input images to the model. num_channels: Output depth of convolutional layer in initial block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. stride: stride for the convolution and pool layer. kernel_size: Size of the initial convolution filter maxpool: If true, apply a maxpool after the convolution training: True for train phase, False for eval phase. scope: Optional scope for the init block. Returns: Two [N, H, W, C] output activations from input images.
codesearchnet
def physical_name(self): pchar = self._libinput.libinput_seat_get_physical_name(self._handle) return string_at(pchar).decode()
The physical name of the seat. For libinput contexts created from udev, this is always the same value as passed into :meth:`~libinput.LibInputUdev.assign_seat` and all seats from that context will have the same physical name. The physical name of the seat is one that is usually set by the system or lower levels of the stack. In most cases, this is the base filter for devices - devices assigned to seats outside the current seat will not be available to the caller. Returns: str: The physical name of this seat.
codesearchnet
def load_many(self, fobjs=None): if fobjs is not None: if not hasattr(fobjs, "__iter__"): fobjs = [fobjs] for index, (fobj, page) in enumerate(zip(fobjs, self.pages)): if fobj is None: continue elif isinstance(fobj, ft.DataFile): self.load(fobj, index) elif isinstance(fobj, str): self.load_filename(fobj, index) else: raise TypeError("Invalid object of class '{}'".format(fobj.__class__.__name__))
Loads as many files as the number of pages Args: fobjs: [filename or DataFile obj, ...]
juraj-google-style
def extract_attribute_array(self, data_array, var_name): if (var_name not in self.attributes.keys()): self.attributes[var_name] = [] for t in range(self.times.size): self.attributes[var_name].append(data_array[(self.i[t], self.j[t])])
Extracts data from a 2D array that has the same dimensions as the grid used to identify the object. Args: data_array: 2D numpy array
codesearchnet
def make_iaf_stack(total_event_size, num_hidden_layers=2, seed=None, dtype=tf.float32): seed = tfd.SeedStream(seed, 'make_iaf_stack') def make_iaf(): 'Create an IAF.' initializer = tf.compat.v2.keras.initializers.VarianceScaling((2 * 0.01), seed=(seed() % ((2 ** 31) - 1))) made = tfb.AutoregressiveLayer(params=2, event_shape=[total_event_size], hidden_units=([total_event_size] * num_hidden_layers), activation=tf.nn.elu, kernel_initializer=initializer, dtype=dtype) def shift_and_scale(x): x.set_shape(x.shape.merge_with((([None] * (x.shape.ndims - 1)) + [total_event_size]))) return tf.unstack(made(x), num=2, axis=(- 1)) return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale)) def make_swap(): 'Create an swap.' permutation = list(reversed(range(total_event_size))) return tfb.Permute(permutation) bijector = make_iaf() bijector = make_swap()(bijector) bijector = make_iaf()(bijector) bijector = make_swap()(bijector) bijector = make_iaf()(bijector) bijector = make_swap()(bijector) return bijector
Creates an stacked IAF bijector. This bijector operates on vector-valued events. Args: total_event_size: Number of dimensions to operate over. num_hidden_layers: How many hidden layers to use in each IAF. seed: Random seed for the initializers. dtype: DType for the variables. Returns: bijector: The created bijector.
codesearchnet
def context_managers(self, kwargs): del kwargs return []
Return context managers for running the test combination. The test combination will run under all context managers that all `TestCombination` instances return. Args: kwargs: Arguments and their values that are passed to the test combination. Returns: A list of instantiated context managers.
github-repos
def synthetic_source(self, value): if value == self._defaults['ai.operation.syntheticSource'] and 'ai.operation.syntheticSource' in self._values: del self._values['ai.operation.syntheticSource'] else: self._values['ai.operation.syntheticSource'] = value
The synthetic_source property. Args: value (string). the property value.
juraj-google-style
def Dump(self, output): data = {'current_content_length': self._current_content_length, 'is_last': self._is_last, 'server': self._request_builder.GetServer(), 'upload_url': self._upload_url, 'version': self._request_builder.GetVersion()} try: yaml.dump(data, output) except yaml.YAMLError as e: raise googleads.errors.GoogleAdsError(('Error dumping IncrementalUploadHelper to file: %s' % str(e)))
Serialize the IncrementalUploadHelper and store in file-like object. Args: output: a file-like object where the status of the IncrementalUploadHelper will be written. Raises: GoogleAdsError: If a YAMLError occurs while writing to the file.
codesearchnet
def _DeserializeAttributeContainer(self, container_type, serialized_data): if (not serialized_data): return None if self._serializers_profiler: self._serializers_profiler.StartTiming(container_type) try: serialized_string = serialized_data.decode('utf-8') except UnicodeDecodeError as exception: raise IOError('Unable to decode serialized data: {0!s}'.format(exception)) attribute_container = self._serializer.ReadSerialized(serialized_string) if self._serializers_profiler: self._serializers_profiler.StopTiming(container_type) return attribute_container
Deserializes an attribute container. Args: container_type (str): attribute container type. serialized_data (bytes): serialized attribute container data. Returns: AttributeContainer: attribute container or None. Raises: IOError: if the serialized data cannot be decoded. OSError: if the serialized data cannot be decoded.
codesearchnet
def is_github_repo_owner_the_official_one(context, repo_owner): official_repo_owner = context.config['official_github_repos_owner'] if not official_repo_owner: raise ConfigError( 'This worker does not have a defined owner for official GitHub repositories. ' 'Given "official_github_repos_owner": {}'.format(official_repo_owner) ) return official_repo_owner == repo_owner
Given a repo_owner, check if it matches the one configured to be the official one. Args: context (scriptworker.context.Context): the scriptworker context. repo_owner (str): the repo_owner to verify Raises: scriptworker.exceptions.ConfigError: when no official owner was defined Returns: bool: True when ``repo_owner`` matches the one configured to be the official one
juraj-google-style
def _ExpectedKeysForEntry(self, entry): return [entry.name]
Generate a list of expected cache keys for this type of map. Args: entry: A SshkeyMapEntry Returns: A list of strings
github-repos
def FindHeader(self, header): for section_list in self.include_list: for f in section_list: if f[0] == header: return f[1] return -1
Check if a header has already been included. Args: header: header to check. Returns: Line number of previous occurrence, or -1 if the header has not been seen before.
juraj-google-style
def power_spectral_density(x, time_step, freq_range = None): N = len(x) P = 2 * np.abs(np.fft.rfft(x))**2 / N * time_step F = np.fft.rfftfreq(len(x), time_step) if freq_range is not None: brange = np.all([F >= freq_range[0], F <= freq_range[1]], axis=0) P = P[brange] F = F[brange] return F, P
returns the *single sided* power spectral density of the time trace x which is sampled at intervals time_step Args: x (array): timetrace time_step (float): sampling interval of x freq_range (array or tuple): frequency range in the form [f_min, f_max] to return only the spectrum within this range Returns:
juraj-google-style
def set_flat(self, new_weights): self._check_sess() shapes = [v.get_shape().as_list() for v in self.variables.values()] arrays = unflatten(new_weights, shapes) placeholders = [self.placeholders[k] for (k, v) in self.variables.items()] self.sess.run(list(self.assignment_nodes.values()), feed_dict=dict(zip(placeholders, arrays)))
Sets the weights to new_weights, converting from a flat array. Note: You can only set all weights in the network using this function, i.e., the length of the array must match get_flat_size. Args: new_weights (np.ndarray): Flat array containing weights.
codesearchnet
def getStreamNetworkAsWkt(self, session, withNodes=True): wkt_list = [] for link in self.streamLinks: wkt_link = link.getAsWkt(session) if wkt_link: wkt_list.append(wkt_link) if withNodes: for node in link.nodes: wkt_node = node.getAsWkt(session) if wkt_node: wkt_list.append(wkt_node) return 'GEOMCOLLECTION ({0})'.format(', '.join(wkt_list))
Retrieve the stream network geometry in Well Known Text format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: Well Known Text string.
juraj-google-style
def refresh(self, refresh_binary=True): updated_self = self.repo.get_resource(self.uri) if (not isinstance(self, type(updated_self))): raise Exception(('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)))) if updated_self: self.status_code = updated_self.status_code self.rdf.data = updated_self.rdf.data self.headers = updated_self.headers self.exists = updated_self.exists if (type(self) != NonRDFSource): self._parse_graph() self.versions = SimpleNamespace() if ((type(updated_self) == NonRDFSource) and refresh_binary): self.binary.refresh(updated_self) if hasattr(self, '_post_refresh'): self._post_refresh() del updated_self else: logger.debug('resource %s not found, dumping values') self._empty_resource_attributes()
Performs GET request and refreshes RDF information for resource. Args: None Returns: None
codesearchnet
def object_hook(obj): try: if '__type' in obj: obj_type = obj['__type'] cls = getattr(cloud_inquisitor.schema, obj_type) if hasattr(cls, 'from_json'): return cls.from_json(obj) key, value = next(iter(obj.items())) if key == ' t': return tuple(value) elif key == ' u': return uuid.UUID(value) elif key == ' b': return b64decode(value) elif key == ' m': return Markup(value) elif key == ' d': return parse_date(value) return obj except Exception: log.exception('Error during data deserialization')
Checks to see if the `__type`-hinting field is available in the object being de-serialized. If present, and the class referenced has a `from_json` function it will return the generated object, else a standard dic will be returned Args: obj: Object to be deserialized Returns: Deserialized object or regular python objec
juraj-google-style
def read(self, size=-1): if not self._readable: raise UnsupportedOperation('read') if self._seek == self._size: return b'' if size == self._buffer_size: queue_index = self._seek if queue_index == 0: self._preload_range() with handle_os_exceptions(): buffer = self._read_queue.pop(queue_index).result() buffer_size = self._buffer_size index = queue_index + buffer_size * self._max_buffers if index < self._size: self._read_queue[index] = self._workers.submit( self._read_range, index, index + buffer_size) self._seek += buffer_size else: self._seek = self._size return buffer if size != -1: buffer = bytearray(size) else: buffer = bytearray() read_size = self.readinto(buffer) return memoryview(buffer)[:read_size].tobytes()
Read and return up to size bytes, with at most one call to the underlying raw stream’s. Use at most one call to the underlying raw stream’s read method. Args: size (int): Number of bytes to read. -1 to read the stream until end. Returns: bytes: Object content
juraj-google-style
def local_batch_predict(model_dir, csv_file_pattern, output_dir, output_format, batch_size=100): file_io.recursive_create_dir(output_dir) csv_files = file_io.get_matching_files(csv_file_pattern) if (len(csv_files) == 0): raise ValueError(('No files found given ' + csv_file_pattern)) with tf.Graph().as_default(), tf.Session() as sess: (input_alias_map, output_alias_map) = _tf_load_model(sess, model_dir) csv_tensor_name = list(input_alias_map.values())[0] output_schema = _get_output_schema(sess, output_alias_map) for csv_file in csv_files: output_file = os.path.join(output_dir, ((('predict_results_' + os.path.splitext(os.path.basename(csv_file))[0]) + '.') + output_format)) with file_io.FileIO(output_file, 'w') as f: prediction_source = _batch_csv_reader(csv_file, batch_size) for batch in prediction_source: batch = [l.rstrip() for l in batch if l] predict_results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: batch}) formatted_results = _format_results(output_format, output_schema, predict_results) f.write(('\n'.join(formatted_results) + '\n')) file_io.write_string_to_file(os.path.join(output_dir, 'predict_results_schema.json'), json.dumps(output_schema, indent=2))
Batch Predict with a specified model. It does batch prediction, saves results to output files and also creates an output schema file. The output file names are input file names prepended by 'predict_results_'. Args: model_dir: The model directory containing a SavedModel (usually saved_model.pb). csv_file_pattern: a pattern of csv files as batch prediction source. output_dir: the path of the output directory. output_format: csv or json. batch_size: Larger batch_size improves performance but may cause more memory usage.
codesearchnet
def concatenate_context_input(context_input, sequence_input): seq_rank_check = check_ops.assert_rank(sequence_input, 3, message='sequence_input must have rank 3', data=[array_ops.shape(sequence_input)]) seq_type_check = check_ops.assert_type(sequence_input, dtypes.float32, message='sequence_input must have dtype float32; got {}.'.format(sequence_input.dtype)) ctx_rank_check = check_ops.assert_rank(context_input, 2, message='context_input must have rank 2', data=[array_ops.shape(context_input)]) ctx_type_check = check_ops.assert_type(context_input, dtypes.float32, message='context_input must have dtype float32; got {}.'.format(context_input.dtype)) with ops.control_dependencies([seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]): padded_length = array_ops.shape(sequence_input)[1] tiled_context_input = array_ops.tile(array_ops.expand_dims(context_input, 1), array_ops.concat([[1], [padded_length], [1]], 0)) return array_ops.concat([sequence_input, tiled_context_input], 2)
Replicates `context_input` across all timesteps of `sequence_input`. Expands dimension 1 of `context_input` then tiles it `sequence_length` times. This value is appended to `sequence_input` on dimension 2 and the result is returned. Args: context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`. sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, d0]`. Returns: A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, d0 + d1]`. Raises: ValueError: If `sequence_input` does not have rank 3 or `context_input` does not have rank 2.
github-repos
def mean_area_distance(item_a, item_b, max_value): mean_area_a = np.mean([item_a.size(t) for t in item_a.times]) mean_area_b = np.mean([item_b.size(t) for t in item_b.times]) return (np.abs((mean_area_a - mean_area_b)) / float(max_value))
Absolute difference in the means of the areas of each track over time. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
codesearchnet
def corrcoef(x): if any_symbolic_tensors((x,)): return Corrcoef().symbolic_call(x) return backend.numpy.corrcoef(x)
Compute the Pearson correlation coefficient matrix. Args: x: A 2D tensor of shape `(N, D)`, where N is the number of variables and D is the number of observations. Returns: A tensor of shape `(N, N)` representing the correlation matrix.
github-repos
def rasterize(layer, rast): driver = ImageDriver('MEM') r2 = driver.raster(driver.ShortName, rast.size) r2.affine = rast.affine sref = rast.sref if not sref.srid: sref = SpatialReference(4326) r2.sref = sref ml = MemoryLayer(sref, layer.GetGeomType()) ml.load(layer) status = gdal.RasterizeLayer( r2.ds, (1,), ml.layer, options=['ATTRIBUTE=%s' % ml.id]) ml.close() return r2
Returns a Raster from layer features. Arguments: layer -- Layer to rasterize rast -- Raster with target affine, size, and sref
juraj-google-style
def get_local_filter_directives(ast, current_schema_type, inner_vertex_fields): result = [] if ast.directives: for directive_obj in ast.directives: if (directive_obj.name.value == 'filter'): filtered_field_name = get_ast_field_name_or_none(ast) if is_filter_with_outer_scope_vertex_field_operator(directive_obj): if (not is_vertex_field_type(current_schema_type)): raise GraphQLCompilationError(u'Found disallowed filter on a property field: {} {} {}'.format(directive_obj, current_schema_type, filtered_field_name)) elif isinstance(ast, InlineFragment): raise GraphQLCompilationError(u'Found disallowed filter on a type coercion: {} {}'.format(directive_obj, current_schema_type)) else: pass else: operation = FilterOperationInfo(directive=directive_obj, field_name=filtered_field_name, field_type=current_schema_type, field_ast=ast) result.append(operation) if inner_vertex_fields: for inner_ast in inner_vertex_fields: for directive_obj in inner_ast.directives: if is_filter_with_outer_scope_vertex_field_operator(directive_obj): filtered_field_name = get_ast_field_name(inner_ast) filtered_field_type = get_vertex_field_type(current_schema_type, filtered_field_name) operation = FilterOperationInfo(directive=directive_obj, field_name=filtered_field_name, field_type=filtered_field_type, field_ast=inner_ast) result.append(operation) return result
Get all filter directives that apply to the current field. This helper abstracts away the fact that some vertex field filtering operators apply on the inner scope (the scope of the inner vertex field on which they are applied), whereas some apply on the outer scope (the scope that contains the inner vertex field). See filters.py for more information. Args: ast: a GraphQL AST object for which to load local filters, from the graphql library current_schema_type: GraphQLType, the schema type at the current AST location inner_vertex_fields: a list of inner AST objects representing vertex fields that are within the current field. If currently processing a property field (i.e. there are no inner vertex fields), this argument may be set to None. Returns: list of FilterOperationInfo objects. If the field_ast field is of type InlineFragment, the field_name field is set to None.
codesearchnet
def get_cursor(self): (x, y) = self._cursor (width, height) = self.parent.get_size() while (x >= width): x -= width y += 1 if ((y >= height) and (self.scrollMode == 'scroll')): y = (height - 1) return (x, y)
Return the virtual cursor position. The cursor can be moved with the :any:`move` method. Returns: Tuple[int, int]: The (x, y) coordinate of where :any:`print_str` will continue from. .. seealso:: :any:move`
codesearchnet
def scan_file(path): path = os.path.abspath(path) assert os.path.exists(path), ("Unreachable file '%s'." % path) result = sh.clamscan(path, no_summary=True, infected=True, _ok_code=[0, 1]) return _parse_result(result)
Scan `path` for viruses using ``clamscan`` program. Args: path (str): Relative or absolute path of file/directory you need to scan. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. Raises: AssertionError: When the internal file doesn't exists.
codesearchnet
def ParseBookmarkAnnotationRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = FirefoxPlacesBookmarkAnnotationEventData() event_data.content = self._GetRowValue(query_hash, row, 'content') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a bookmark annotation row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def lstat(self, entry_path, dir_fd=None): entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd) return self.filesystem.stat(entry_path, follow_symlinks=False)
Return the os.stat-like tuple for entry_path, not following symlinks. Args: entry_path: path to filesystem object to retrieve. dir_fd: If not `None`, the file descriptor of a directory, with `entry_path` being relative to this directory. New in Python 3.3. Returns: the FakeStatResult object corresponding to `entry_path`. Raises: OSError: if the filesystem object doesn't exist.
codesearchnet
def write_config(config, filename=None): if not filename: filename = CONFIG_DEFAULT_PATH with open(filename, 'w') as f: json.dump(config, f, indent=4)
Write the provided configuration to a specific location. Args: config (dict): a dictionary with the configuration to load. filename (str): the name of the file that will store the new configuration. Defaults to ``None``. If ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.
juraj-google-style
def value_text(tensor, is_repr=False) -> AnyStr: if tensor._prefer_custom_summarizer(): text = tensor._summarize_value() if is_repr: text = 'value=' + text else: text = numpy_text(tensor, is_repr=is_repr) if is_repr: text = 'numpy=' + text return text
Either the NumPy value or a custom TensorFlow formatting of `tensor`. Custom formatting is used for custom device tensors, e.g. parallel tensors with multiple components on different devices. Args: tensor: The tensor to format. is_repr: Controls the style/verbosity of formatting. Returns: The formatted tensor.
github-repos
def set_fore(self, x: int, y: int, r: int, g: int, b: int, char: str) -> None: i = ((self.width * y) + x) self.fore_r[i] = r self.fore_g[i] = g self.fore_b[i] = b self.char[i] = ord(char)
Set the character and foreground color of one cell. Args: x (int): X position to change. y (int): Y position to change. r (int): Red foreground color, from 0 to 255. g (int): Green foreground color, from 0 to 255. b (int): Blue foreground color, from 0 to 255. char (AnyStr): A single character str or bytes object.
codesearchnet
def lowercase_term_id(term_id: str) -> str: (ns, val) = term_id.split(":", maxsplit=1) term_id = f"{ns}:{val.lower()}" return term_id
Lowercase the term value (not the namespace prefix) Args: term_id (str): term identifier with namespace prefix, e.g. MESH:Atherosclerosis Returns: str: lowercased, e.g. MESH:atherosclerosis
juraj-google-style
def then_by(self, key_selector=identity): if self.closed(): raise ValueError('Attempt to call then_by() on a closed OrderedQueryable.') if (not is_callable(key_selector)): raise TypeError('then_by() parameter key_selector={key_selector} is not callable'.format(key_selector=repr(key_selector))) self._funcs.append(((- 1), key_selector)) return self
Introduce subsequent ordering to the sequence with an optional key. The returned sequence will be sorted in ascending order by the selected key. Note: This method uses deferred execution. Args: key_selector: A unary function the only positional argument to which is the element value from which the key will be selected. The return value should be the key from that element. Returns: An OrderedQueryable over the sorted items. Raises: ValueError: If the OrderedQueryable is closed(). TypeError: If key_selector is not callable.
codesearchnet
def verify(token, key, algorithms, verify=True): (header, payload, signing_input, signature) = _load(token) if verify: _verify_signature(signing_input, header, signature, key, algorithms) return payload
Verifies a JWS string's signature. Args: token (str): A signed JWS to be verified. key (str or dict): A key to attempt to verify the payload with. Can be individual JWK or JWK set. algorithms (str or list): Valid algorithms that should be used to verify the JWS. Returns: str: The str representation of the payload, assuming the signature is valid. Raises: JWSError: If there is an exception verifying a token. Examples: >>> token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8' >>> jws.verify(token, 'secret', algorithms='HS256')
codesearchnet
def _prompt_split_image(self, aspect_ratio, num_patches_per_chunk): img_string = '<|image_start|>' ratio_h, ratio_w = aspect_ratio if ratio_h * ratio_w > 1: for yy in range(ratio_h): for xx in range(ratio_w): img_string += '<|patch|>' * num_patches_per_chunk if xx < ratio_w - 1: img_string += '<|tile_x_separator|>' img_string += '<|tile_y_separator|>' img_string += '<|image|>' img_string += '<|patch|>' * num_patches_per_chunk img_string += '<|image_end|>' return img_string
Create a structured string representation of image tokens Args: num_patches: Number of patches in the image Returns: String with appropriate image tokens
github-repos
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY): partial = dataset._build_resource(fields) if (dataset.etag is not None): headers = {'If-Match': dataset.etag} else: headers = None api_response = self._call_api(retry, method='PATCH', path=dataset.path, data=partial, headers=headers) return Dataset.from_api_repr(api_response)
Change some fields of a dataset. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``dataset``, it will be deleted. If ``dataset.etag`` is not ``None``, the update will only succeed if the dataset on the server has the same ETag. Thus reading a dataset with ``get_dataset``, changing its fields, and then passing it to ``update_dataset`` will ensure that the changes will only be saved if no modifications to the dataset occurred since the read. Args: dataset (google.cloud.bigquery.dataset.Dataset): The dataset to update. fields (Sequence[str]): The properties of ``dataset`` to change (e.g. "friendly_name"). retry (google.api_core.retry.Retry, optional): How to retry the RPC. Returns: google.cloud.bigquery.dataset.Dataset: The modified ``Dataset`` instance.
codesearchnet
async def _send(self, request_bytes, body_bytes, h11_connection): await self.sock.send_all(h11_connection.send(request_bytes)) if body_bytes is not None: await self.sock.send_all(h11_connection.send(body_bytes)) await self.sock.send_all(h11_connection.send(h11.EndOfMessage()))
Takes a package and body, combines then, then shoots 'em off in to the ether. Args: package (list of str): The header package. body (str): The str representation of the body.
juraj-google-style
def set_secondary_ips(self, name, vrid, secondary_ips, run=True): cmds = [] curr_sec_ips = [] vrrps = self.get(name) if (vrrps and (vrid in vrrps)): curr_sec_ips = vrrps[vrid]['secondary_ip'] for sec_ip in secondary_ips: if ((type(sec_ip) is not str) or (not re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', sec_ip))): raise ValueError("vrrp property 'secondary_ip' must be a list of properly formatted ip address strings") intersection = list((set(curr_sec_ips) & set(secondary_ips))) remove = list((set(curr_sec_ips) - set(intersection))) add = list((set(secondary_ips) - set(intersection))) for sec_ip in remove: cmds.append(('no vrrp %d ip %s secondary' % (vrid, sec_ip))) for sec_ip in add: cmds.append(('vrrp %d ip %s secondary' % (vrid, sec_ip))) cmds = sorted(cmds) if run: result = self.configure_interface(name, cmds) if (result is False): return self.error return result return cmds
Configure the secondary_ip property of the vrrp Notes: set_secondary_ips takes a list of secondary ip addresses which are to be set on the virtal router. An empty list will remove any existing secondary ip addresses from the vrrp. A list containing addresses will configure the virtual router with only the addresses specified in the list - any existing addresses not included in the list will be removed. Args: name (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be managed. secondary_ips (list): A list of secondary ip addresses to be assigned to the virtual router. run (boolean): Set to True to execute the command, False to return a string with the formatted command. Returns: If run is True, returns True if the command executed successfully, error if failure. If run is False, returns the formatted command string which can be passed to the node
codesearchnet
def _compute_edges(self): (nodes1, nodes2, nodes3) = _surface_helpers.compute_edge_nodes(self._nodes, self._degree) edge1 = _curve_mod.Curve(nodes1, self._degree, _copy=False) edge2 = _curve_mod.Curve(nodes2, self._degree, _copy=False) edge3 = _curve_mod.Curve(nodes3, self._degree, _copy=False) return (edge1, edge2, edge3)
Compute the edges of the current surface. Returns: Tuple[~curve.Curve, ~curve.Curve, ~curve.Curve]: The edges of the surface.
codesearchnet
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs): assert ('noise_shape' not in kwargs) if broadcast_dims: shape = tf.shape(x) ndims = len(x.get_shape()) broadcast_dims = [((dim + ndims) if (dim < 0) else dim) for dim in broadcast_dims] kwargs['noise_shape'] = [(1 if (i in broadcast_dims) else shape[i]) for i in range(ndims)] return tf.nn.dropout(x, keep_prob, **kwargs)
Like tf.nn.dropout but takes broadcast_dims instead of noise_shape. Instead of specifying noise_shape, this function takes broadcast_dims - a list of dimension numbers in which noise_shape should be 1. The random keep/drop tensor has dimensionality 1 along these dimensions. Args: x: a floating point tensor. keep_prob: A scalar Tensor with the same type as x. The probability that each element is kept. broadcast_dims: an optional list of integers the dimensions along which to broadcast the keep/drop flags. **kwargs: keyword arguments to tf.nn.dropout other than "noise_shape". Returns: Tensor of the same shape as x.
codesearchnet