code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, feature_set='spe+', feature_model='strict'): fm = {'strict': _panphon.FeatureTable, 'permissive': permissive.PermissiveFeatureTable} self.fm = fm[feature_model](feature_set=feature_set)
Construct a Sonority object Args: feature_set (str): features set to be used by `FeatureTable` feature_model (str): 'strict' or 'permissive' feature model
juraj-google-style
def scale_out(self, blocks=1): r = [] for i in range(blocks): if self.provider: external_block_id = str(len(self.blocks)) launch_cmd = self.launch_cmd.format(block_id=external_block_id) internal_block = self.provider.submit(launch_cmd, 1, 1) logger.debug('Launched block {}->{}'.format(external_block_id, internal_block)) if (not internal_block): raise ScalingFailed(self.provider.label, 'Attempts to provision nodes via provider has failed') r.extend([external_block_id]) self.blocks[external_block_id] = internal_block else: logger.error('No execution provider available') r = None return r
Scales out the number of blocks by "blocks" Raises: NotImplementedError
codesearchnet
def __init__(self, script=None): param_list = bytearray(b'\x07\x10') super(NEP5Token, self).__init__(script=script, param_list=param_list)
Create an instance. Args: script (bytes): (Optional)
juraj-google-style
def set_vector_catch(self, flags): res = self._dll.JLINKARM_WriteVectorCatch(flags) if (res < 0): raise errors.JLinkException(res) return None
Sets vector catch bits of the processor. The CPU will jump to a vector if the given vector catch is active, and will enter a debug state. This has the effect of halting the CPU as well, meaning the CPU must be explicitly restarted. Args: self (JLink): the ``JLink`` instance Returns: ``None`` Raises: JLinkException: on error.
codesearchnet
def get_time(self, force_uptime=False): if force_uptime: return self.uptime time = self.uptime + self.time_offset if self.is_utc: time |= (1 << 31) return time
Get the current UTC time or uptime. By default, this method will return UTC time if possible and fall back to uptime if not. If you specify, force_uptime=True, it will always return uptime even if utc time is available. Args: force_uptime (bool): Always return uptime, defaults to False. Returns: int: The current uptime or encoded utc time.
juraj-google-style
def Matches(self, file_entry): if not self._filters: return True results = [] for file_entry_filter in self._filters: result = file_entry_filter.Matches(file_entry) results.append(result) return True in results or False not in results
Compares the file entry against the filter collection. Args: file_entry (dfvfs.FileEntry): file entry to compare. Returns: bool: True if the file entry matches one of the filters. If no filters are provided or applicable the result will be True.
juraj-google-style
def write_file(self, filename, distance=6, velocity=8, charge=3): with open(filename, "w") as f: f.write(self.get_string(distance=distance, velocity=velocity, charge=charge))
Writes LammpsData to file. Args: filename (str): Filename. distance (int): No. of significant figures to output for box settings (bounds and tilt) and atomic coordinates. Default to 6. velocity (int): No. of significant figures to output for velocities. Default to 8. charge (int): No. of significant figures to output for charges. Default to 3.
juraj-google-style
def fork(self, command: Command) -> Tuple[('SelectedMailbox', Iterable[Response])]: frozen = _Frozen(self) cls = type(self) copy = cls(self._guid, self._readonly, self._permanent_flags, self._session_flags, self._selected_set, self._lookup, _mod_sequence=self._mod_sequence, _prev=frozen, _messages=self._messages) if (self._prev is not None): with_uid: bool = getattr(command, 'uid', False) untagged = self._compare(self._prev, frozen, with_uid) else: untagged = [] return (copy, untagged)
Compares the state of the current object to that of the last fork, returning the untagged responses that reflect any changes. A new copy of the object is also returned, ready for the next command. Args: command: The command that was finished.
codesearchnet
def __init__(self, actions=None): super().__init__(InstructionType.OFPIT_CLEAR_ACTIONS) self.actions = actions if actions else []
Create a InstructionClearAction with the optional parameters below. Args: actions (:class:`~.actions.ListOfActions`): Actions associated with OFPIT_CLEAR_ACTIONS.
juraj-google-style
def __setKeySwitchGuardTime(self, iKeySwitchGuardTime): print '%s call setKeySwitchGuardTime' % self.port print iKeySwitchGuardTime try: cmd = 'keysequence guardtime %s' % str(iKeySwitchGuardTime) if self.__sendCommand(cmd)[0] == 'Done': time.sleep(1) return True else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger("setKeySwitchGuardTime() Error; " + str(e))
set the Key switch guard time Args: iKeySwitchGuardTime: key switch guard time Returns: True: successful to set key switch guard time False: fail to set key switch guard time
juraj-google-style
def add_path(self, path, path_filter=None): for (root, _, files) in os.walk(path): for filename in files: full_path_and_filename = os.path.join(root, filename) if ((path_filter is None) or path_filter(full_path_and_filename)): relative_path_and_filename = full_path_and_filename.replace((path + '/'), '') with open(full_path_and_filename, 'rb') as handle: self.files[relative_path_and_filename] = b64encode(handle.read()).decode('utf-8')
Adding all files from given path to the object. Args: path (str): valid, existing directory
codesearchnet
def __init__(self, host, port, rhash): self.hash = rhash self.r = redis.StrictRedis(host=host, port=port)
Initialize the Class properties. Args: host (string): The Redis host. port (string): The Redis port. rhash (string): The rhash value.
juraj-google-style
def to_numpy_array(self): return encode_resource_handle(self._get_resource_handle())
Convert a TensorHandle object to a feedable numpy value. Returns: A numpy array of a custom struct type that can be used as a feed value to run().
github-repos
def load_file(self, path, objtype=None, encoding='utf-8'): path = self.abspath(path) debug(('file path is %s' % path)) if (path in self._cache): return self._cache[path] try: debug(('cache miss, attempting to load file from disk: %s' % path)) contents = parsed_data = self.get_contents(path) if encoding: parsed_data = contents.encode(encoding) except ConfigurationError as exc: debug(exc) raise except UnicodeEncodeError: raise ConfigurationError('unable to encode file contents') if (objtype is not string_types): for deserializer in (self._load_json, self._load_yaml): parsed_data = deserializer(contents) if parsed_data: break if (objtype and (not isinstance(parsed_data, objtype))): debug(('specified file %s is not of type %s' % (path, objtype))) raise ConfigurationError('invalid file serialization type for contents') self._cache[path] = parsed_data return parsed_data
Load the file specified by path This method will first try to load the file contents from cache and if there is a cache miss, it will load the contents from disk Args: path (string): The full or relative path to the file to be loaded encoding (string): The file contents text encoding objtype (object): The object type of the file contents. This is used to type check the deserialized content against the contents loaded from disk. Ignore serializing if objtype is string_types Returns: object: The deserialized file contents which could be either a string object or a dict object Raises: ConfigurationError:
codesearchnet
def clinvar_export(store, institute_id, case_name, variant_id): institute_obj, case_obj = institute_and_case(store, institute_id, case_name) pinned = [store.variant(variant_id) or variant_id for variant_id in case_obj.get('suspects', [])] variant_obj = store.variant(variant_id) return dict( today = str(date.today()), institute=institute_obj, case=case_obj, variant=variant_obj, pinned_vars=pinned )
Gather the required data for creating the clinvar submission form Args: store(scout.adapter.MongoAdapter) institute_id(str): Institute ID case_name(str): case ID variant_id(str): variant._id Returns: a dictionary with all the required data (case and variant level) to pre-fill in fields in the clinvar submission form
juraj-google-style
def List(self, request, global_params=None): config = self.GetMethodConfig('List') return self._RunMethod(config, request, global_params=global_params)
List all repositories for a given `BitbucketServerConfig`. This API is experimental. Args: request: (CloudbuildProjectsLocationsBitbucketServerConfigsReposListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListBitbucketServerRepositoriesResponse) The response message.
github-repos
def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False): def checkNoNested(mod): try: all = mod.__all__ except AttributeError: return False mems = inspect.getmembers(mod, inspect.ismodule) mems = [m for m in mems if (m[0] in mod.__all__)] if (len(mems) > 0): return False return True mods = inspect.getmembers(package, inspect.ismodule) (nmods, pvt, npkgs) = ([], [], []) for mod in mods: if checkNoNested(mod[1]): if (mod[0][0] == '_'): pvt.append(mod) else: nmods.append(mod) else: npkgs.append(mod) if showprivate: nmods += pvt files = [] ignore = [] for pkg in npkgs: pt = ('%s/%s/%s' % (self.path, package.__name__.replace('.', '/'), pkg[1].__name__.split('.')[(- 1)])) if os.path.exists(pt): shutil.rmtree(pt) os.makedirs(pt) ignore += inspect.getmembers(pkg[1]) f = self._MakePackagePages(pkg[1], showprivate=showprivate, nested=True, showinh=showinh) files.append(f.split((package.__name__.replace('.', '/') + '/'))[1]) if nested: try: name = package.__displayname__ except AttributeError: name = package.__name__ index = ('\n%s\n%s\n\n.. toctree::\n :maxdepth: 5\n\n ' % (name, ('*' * len(name)))) index += '\n '.join(files) index += ('\n ' + self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh)) findex = ('content/%s/index.rst' % package.__name__.replace('.', '/')) with open(findex, 'w') as f: if package.__doc__: f.write(package.__doc__) f.write(index) return ('\n ' + findex) names = ('\n %s/%s/' % (self.path, package.__name__.replace('.', '/'))) nmods = [m for m in nmods if (m not in ignore)] return names.join((self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh).split('\n ') + files))
An internal helper to generate all of the pages for a given package Args: package (module): The top-level package to document showprivate (bool): A flag for whether or not to display private members nested (bool): Foor internal use ONLY Returns: str: The file names ready to be appended to a top-level toctree
codesearchnet
def get_file_systems(self): result = {} if os.access('/proc/mounts', os.R_OK): file = open('/proc/mounts') for line in file: try: mount = line.split() device = mount[0] mount_point = mount[1] fs_type = mount[2] except (IndexError, ValueError): continue if (fs_type not in self.filesystems): self.log.debug(('Ignoring %s since it is of type %s ' + ' which is not in the list of filesystems.'), mount_point, fs_type) continue if self.exclude_reg.search(mount_point): self.log.debug(('Ignoring %s since it is in the ' + 'exclude_filter list.'), mount_point) continue if ((('/' in device) or (device == 'tmpfs')) and mount_point.startswith('/')): try: stat = os.stat(mount_point) except OSError: self.log.debug('Path %s is not mounted - skipping.', mount_point) continue if (stat.st_dev in result): continue result[stat.st_dev] = {'device': os.path.realpath(device), 'mount_point': mount_point, 'fs_type': fs_type} file.close() else: if (not psutil): self.log.error('Unable to import psutil') return None partitions = psutil.disk_partitions(False) for partition in partitions: result[len(result)] = {'device': os.path.realpath(partition.device), 'mount_point': partition.mountpoint, 'fs_type': partition.fstype} pass return result
Creates a map of mounted filesystems on the machine. iostat(1): Each sector has size of 512 bytes. Returns: st_dev -> FileSystem(device, mount_point)
codesearchnet
def OnCreateAccount(self, account): pubkey = account.PublicKey.encode_point(False) pubkeyunhex = binascii.unhexlify(pubkey) pub = pubkeyunhex[1:65] priv = bytearray(account.PrivateKey) decrypted = (pub + priv) encrypted_pk = self.EncryptPrivateKey(bytes(decrypted)) (db_account, created) = Account.get_or_create(PrivateKeyEncrypted=encrypted_pk, PublicKeyHash=account.PublicKeyHash.ToBytes()) db_account.save() self.__dbaccount = db_account
Save a KeyPair in encrypted form into the database. Args: account (KeyPair):
codesearchnet
def update_status(self, progress): update_interval = 0.2 now = datetime.datetime.now() if not self._last_progress_update is None and now-self._last_progress_update < datetime.timedelta(seconds=update_interval): return self._last_progress_update = now self.progressBar.setValue(progress) script = self.current_script if progress: remaining_time = str(datetime.timedelta(seconds=script.remaining_time.seconds)) self.lbl_time_estimate.setText('time remaining: {:s}'.format(remaining_time)) if script is not str(self.tabWidget.tabText(self.tabWidget.currentIndex())).lower() in ['scripts', 'instruments']: self.plot_script(script)
waits for a signal emitted from a thread and updates the gui Args: progress: Returns:
juraj-google-style
def end_offsets(self, partitions): offsets = self._fetcher.end_offsets(partitions, self.config['request_timeout_ms']) return offsets
Get the last offset for the given partitions. The last offset of a partition is the offset of the upcoming message, i.e. the offset of the last available message + 1. This method does not change the current consumer position of the partitions. Note: This method may block indefinitely if the partition does not exist. Arguments: partitions (list): List of TopicPartition instances to fetch offsets for. Returns: ``{TopicPartition: int}``: The end offsets for the given partitions. Raises: UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms
codesearchnet
def unique_fn_name(scope, name): return ('%s%s_%s' % (scope, name, ops.uid())).replace('/', '_')
Returns a unique name to use for a control flow function. Args: scope: A name scope string. name: An identifier for this function (e.g. "true", "body"). Returns: A string, the name to use for the function.
github-repos
def get_history_by_tail_number(self, tail_number, page=1, limit=100): url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url, True)
Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10)
codesearchnet
def __rmul__(self, other): return self * other
Returns the product of `self` and `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`.
github-repos
def parse_datetime(value): if (not value): return None elif isinstance(value, datetime.datetime): return value return dateutil.parser.parse(value)
Attempts to parse `value` into an instance of ``datetime.datetime``. If `value` is ``None``, this function will return ``None``. Args: value: A timestamp. This can be a string or datetime.datetime value.
codesearchnet
def _WritesString(self, content): content_bytes = codecs.encode(content, 'utf-8') self._sample_file.write(content_bytes)
Writes a string to the sample file. Args: content (str): content to write to the sample file.
juraj-google-style
def _ReadString(self, file_object, file_offset, data_type_map, description): element_data_size = data_type_map._element_data_type_definition.GetByteSize() elements_terminator = data_type_map._data_type_definition.elements_terminator byte_stream = [] element_data = file_object.read(element_data_size) byte_stream.append(element_data) while (element_data and (element_data != elements_terminator)): element_data = file_object.read(element_data_size) byte_stream.append(element_data) byte_stream = b''.join(byte_stream) return self._ReadStructureFromByteStream(byte_stream, file_offset, data_type_map, description)
Reads a string. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_type_map (dtfabric.DataTypeMap): data type map of the string. description (str): description of the string. Returns: object: structure values object. Raises: FileFormatError: if the string cannot be read. ValueError: if file-like object or date type map are invalid.
codesearchnet
def get_nested_group_users(self, groupname): response = self._get(self.rest_url + "/group/user/nested", params={"groupname": groupname, "start-index": 0, "max-results": 99999}) if not response.ok: return None return [u['name'] for u in response.json()['users']]
Retrieves a list of all users that directly or indirectly belong to the given groupname. Args: groupname: The group name. Returns: list: A list of strings of user names.
juraj-google-style
def __contains__(self, item): if isinstance(item, str): return item in self._impl.namespace elif isinstance(item, Cells): return item._impl in self._impl.cells.values() elif isinstance(item, StaticSpace): return item._impl in self._impl.spaces.values() else: return False
Check if item is in the space. item can be either a cells or space. Args: item: a cells or space to check. Returns: True if item is a direct child of the space, False otherwise.
juraj-google-style
def make_predict_function(self): if self.predict_function is not None: return self.predict_function def step_function(model, iterator): def run_step(data): outputs = model.predict_step(data) with ops.control_dependencies(_minimum_control_deps(outputs)): model._predict_counter.assign_add(1) return outputs data = next(iterator) outputs = model.distribute_strategy.run(run_step, args=(data,)) outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='concat') return outputs if self._steps_per_execution is None or self._steps_per_execution.numpy().item() == 1: def predict_function(iterator): return step_function(self, iterator) else: def predict_function(iterator): outputs = step_function(self, iterator) for _ in math_ops.range(self._steps_per_execution - 1): directives.set_loop_options(shape_invariants=[(t, tf_utils.get_tensor_spec(t, dynamic_batch=True).shape) for t in nest.flatten(outputs)]) step_outputs = step_function(self, iterator) outputs = nest.map_structure(lambda t1, t2: concat([t1, t2]), outputs, step_outputs) return outputs if not self.run_eagerly: predict_function = def_function.function(predict_function, experimental_relax_shapes=True) self.predict_function = predict_function return self.predict_function
Creates a function that executes one step of inference. This method can be overridden to support custom inference logic. This method is called by `Model.predict` and `Model.predict_on_batch`. Typically, this method directly controls `tf.function` and `tf.distribute.Strategy` settings, and delegates the actual evaluation logic to `Model.predict_step`. This function is cached the first time `Model.predict` or `Model.predict_on_batch` is called. The cache is cleared whenever `Model.compile` is called. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, and return the outputs of the `Model`.
github-repos
def ParseFromHumanReadable(self, string): if not string: return None match = self.REGEX.match(string.strip().lower()) if not match: raise DecodeError("Unknown specification for ByteSize %s" % string) multiplier = self.DIVIDERS.get(match.group(2)) if not multiplier: raise DecodeError("Invalid multiplier %s" % match.group(2)) value = match.group(1) if "." in value: value = float(value) else: value = int(value) self._value = int(value * multiplier)
Parse a human readable string of a byte string. Args: string: The string to parse. Raises: DecodeError: If the string can not be parsed.
juraj-google-style
def CreatePrecisionHelper(cls, precision): precision_helper_class = cls._PRECISION_CLASSES.get(precision, None) if not precision_helper_class: raise ValueError('Unsupported precision: {0!s}'.format(precision)) return precision_helper_class
Creates a precision helper. Args: precision (str): precision of the date and time value, which should be one of the PRECISION_VALUES in definitions. Returns: class: date time precision helper class. Raises: ValueError: if the precision value is unsupported.
juraj-google-style
def getOutlet(self): outrow = (int(self.getCard(name='OUTROW').value) - 1) outcol = (int(self.getCard(name='OUTCOL').value) - 1) gssha_grid = self.getGrid() return gssha_grid.pixel2lonlat(outcol, outrow)
Gets the outlet latitude and longitude. Returns: latitude(float): Latitude of grid cell center. longitude(float): Longitude of grid cell center.
codesearchnet
def add_team_member(self, account_id=None, email_address=None): return self._add_remove_team_member(self.TEAM_ADD_MEMBER_URL, email_address, account_id)
Add or invite a user to your Team Args: account_id (str): The id of the account of the user to invite to your team. email_address (str): The email address of the account to invite to your team. The account id prevails if both account_id and email_address are provided. Returns: A Team object
codesearchnet
def convert_alg_to_int(alg): if isinstance(alg, int): return alg if isinstance(alg, Algorithm): return alg.value if isinstance(alg, tensor.Tensor): return alg if isinstance(alg, str): canon_alg = alg.strip().lower().replace('-', '').replace('_', '') if canon_alg == 'philox': return Algorithm.PHILOX.value elif canon_alg == 'threefry': return Algorithm.THREEFRY.value elif canon_alg == 'autoselect': return Algorithm.AUTO_SELECT.value else: raise ValueError(unsupported_alg_error_msg(alg)) else: raise TypeError(f"Can't convert argument `alg` (of value {alg} and type {type(alg)}) to int.")
Converts algorithm to an integer. Args: alg: can be one of these types: integer, Algorithm, Tensor, string. Allowed strings are "philox" and "threefry". Returns: An integer, unless the input is a Tensor in which case a Tensor is returned.
github-repos
def _add_qasm_measure(self, qubit, cmembit, cregbit=None): outcome, probability = self._get_measure_outcome(qubit) membit = 1 << cmembit self._classical_memory = (self._classical_memory & (~membit)) | (int(outcome) << cmembit) if cregbit is not None: regbit = 1 << cregbit self._classical_register = \ (self._classical_register & (~regbit)) | (int(outcome) << cregbit) if outcome == '0': update_diag = [[1 / np.sqrt(probability), 0], [0, 0]] else: update_diag = [[0, 0], [0, 1 / np.sqrt(probability)]] self._add_unitary_single(update_diag, qubit)
Apply a measure instruction to a qubit. Args: qubit (int): qubit is the qubit measured. cmembit (int): is the classical memory bit to store outcome in. cregbit (int, optional): is the classical register bit to store outcome in.
juraj-google-style
def __get_object__(binding): if isinstance(binding, rdflib.term.Node): return binding elif isinstance(binding, collections.Iterable): for key, row in binding.items(): if isinstance(row, (rdflib.URIRef, rdflib.Literal)): return row elif isinstance(row, dict): if row.get('type').startswith('uri'): return rdflib.URIRef(row.get('value')) return rdflib.Literal(row.get('value')) elif isinstance(row, tuple): print(row) elif isinstance(row, str): if row.startswith("literal") or "xml:lang" in key: continue return rdflib.Literal(row)
Method takes a binding extracts value and returns rdflib entity Args: binding: binding row
juraj-google-style
def deserialize(config, custom_objects=None): obj = serialization_lib.deserialize_keras_object(config, custom_objects=custom_objects) if not isinstance(obj, Layer): raise ValueError(f'`keras.layers.deserialize` was passed a `config` object that is not a `keras.layers.Layer`. Received: {config}') return obj
Returns a Keras layer object via its configuration. Args: config: A python dict containing a serialized layer configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras layer instance.
github-repos
def arctan(x): if any_symbolic_tensors((x,)): return Arctan().symbolic_call(x) return backend.numpy.arctan(x)
Trigonometric inverse tangent, element-wise. Args: x: Input tensor. Returns: Tensor of the inverse tangent of each element in `x`, in the interval `[-pi/2, pi/2]`. Example: >>> x = keras.ops.convert_to_tensor([0, 1]) >>> keras.ops.arctan(x) array([0., 0.7853982], dtype=float32)
github-repos
def solveAsync(self, callback): def async_call(): self._lock.acquire() try: self._impl.solve() except Exception: self._lock.release() raise else: self._lock.release() callback.run() Thread(target=async_call).start()
Solve the current model asynchronously. Args: callback: Callback to be executed when the solver is done.
codesearchnet
def _copy_deploy_scripts_for_hosts(self, domains): with LogTask('Copying any deploy scripts'): for (host_name, host_spec) in domains.iteritems(): host_metadata = host_spec.get('metadata', {}) deploy_scripts = self._get_scripts(host_metadata) new_scripts = self._copy_delpoy_scripts(deploy_scripts) self._set_scripts(host_metadata=host_metadata, scripts=new_scripts) return domains
Copy the deploy scripts for all the domains into the prefix scripts dir Args: domains(dict): spec with the domains info as when loaded from the initfile Returns: None
codesearchnet
def Convert(self, metadata, grr_message, token=None): return self.BatchConvert([(metadata, grr_message)], token=token)
Converts GrrMessage into a set of RDFValues. Args: metadata: ExportedMetadata to be used for conversion. grr_message: GrrMessage to be converted. token: Security token. Returns: List or generator with resulting RDFValues.
codesearchnet
def ParseRow(self, parser_mediator, row_offset, row): timestamp = self._ParseTimestamp(parser_mediator, row) if timestamp is None: return event_data = TrendMicroUrlEventData() event_data.offset = row_offset for field in ( 'credibility_rating', 'credibility_score', 'policy_identifier', 'threshold', 'block_mode'): try: value = int(row[field], 10) except (ValueError, TypeError): value = None setattr(event_data, field, value) for field in ('url', 'group_name', 'group_code', 'application_name', 'ip'): setattr(event_data, field, row[field]) event = time_events.DateTimeValuesEvent( timestamp, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a line of the log file and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row_offset (int): line number of the row. row (dict[str, str]): fields of a single row, as specified in COLUMNS.
juraj-google-style
def console_set_char_foreground( con: tcod.console.Console, x: int, y: int, col: Tuple[int, int, int] ) -> None: lib.TCOD_console_set_char_foreground(_console(con), x, y, col)
Change the foreground color of x,y to col. Args: con (Console): Any Console instance. x (int): Character x position from the left. y (int): Character y position from the top. col (Union[Tuple[int, int, int], Sequence[int]]): An (r, g, b) sequence or Color instance. .. deprecated:: 8.4 Array access performs significantly faster than using this function. See :any:`Console.fg`.
juraj-google-style
def is_function_or_method(obj): return (inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj))
Check if an object is a function or method. Args: obj: The Python object in question. Returns: True if the object is an function or method.
codesearchnet
def position(x=None, y=None): (posx, posy) = platformModule._position() posx = int(posx) posy = int(posy) if (x is not None): posx = int(x) if (y is not None): posy = int(y) return Point(posx, posy)
Returns the current xy coordinates of the mouse cursor as a two-integer tuple. Args: x (int, None, optional) - If not None, this argument overrides the x in the return value. y (int, None, optional) - If not None, this argument overrides the y in the return value. Returns: (x, y) tuple of the current xy coordinates of the mouse cursor.
codesearchnet
def delete(self, request): try: self.client.delete_object(Bucket=request.bucket, Key=request.object) except Exception as e: raise messages.S3ClientError(str(e), get_http_error_code(e))
Deletes given object from bucket Args: request: (DeleteRequest) input message Returns: (void) Void, otherwise will raise if an error occurs
github-repos
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream): if (file_entry.IsRoot() and (file_entry.type_indicator not in self._TYPES_WITH_ROOT_METADATA)): return if (data_stream and (not data_stream.IsDefault())): return display_name = mediator.GetDisplayName() logger.debug('[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(display_name)) self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING if self._processing_profiler: self._processing_profiler.StartTiming('extracting') self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) if self._processing_profiler: self._processing_profiler.StopTiming('extracting') self.processing_status = definitions.STATUS_INDICATOR_RUNNING
Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
codesearchnet
def _CreateLineString(self, parent, coordinate_list): if (not coordinate_list): return None linestring = ET.SubElement(parent, 'LineString') tessellate = ET.SubElement(linestring, 'tessellate') tessellate.text = '1' if (len(coordinate_list[0]) == 3): altitude_mode = ET.SubElement(linestring, 'altitudeMode') altitude_mode.text = 'absolute' coordinates = ET.SubElement(linestring, 'coordinates') if (len(coordinate_list[0]) == 3): coordinate_str_list = [('%f,%f,%f' % t) for t in coordinate_list] else: coordinate_str_list = [('%f,%f' % t) for t in coordinate_list] coordinates.text = ' '.join(coordinate_str_list) return linestring
Create a KML LineString element. The points of the string are given in coordinate_list. Every element of coordinate_list should be one of a tuple (longitude, latitude) or a tuple (longitude, latitude, altitude). Args: parent: The parent ElementTree.Element instance. coordinate_list: The list of coordinates. Returns: The LineString ElementTree.Element instance or None if coordinate_list is empty.
codesearchnet
def repertoire(self, direction, mechanism, purview): system = self.system[direction] node_labels = system.node_labels if not set(purview).issubset(self.purview_indices(direction)): raise ValueError('{} is not a {} purview in {}'.format( fmt.fmt_mechanism(purview, node_labels), direction, self)) if not set(mechanism).issubset(self.mechanism_indices(direction)): raise ValueError('{} is no a {} mechanism in {}'.format( fmt.fmt_mechanism(mechanism, node_labels), direction, self)) return system.repertoire(direction, mechanism, purview)
Return the cause or effect repertoire function based on a direction. Args: direction (str): The temporal direction, specifiying the cause or effect repertoire.
juraj-google-style
def _ScanFileSystemForWindowsDirectory(self, path_resolver): result = False for windows_path in self._WINDOWS_DIRECTORIES: windows_path_spec = path_resolver.ResolvePath(windows_path) result = windows_path_spec is not None if result: self._windows_directory = windows_path break return result
Scans a file system for a known Windows directory. Args: path_resolver (WindowsPathResolver): Windows path resolver. Returns: bool: True if a known Windows directory was found.
juraj-google-style
def _overrides(subcls, supercls, attr): if subcls and supercls and (supercls in subcls.mro): subcls = _base(subcls) supercls = _base(supercls) for cls in subcls.mro: if cls == supercls: break if isinstance(cls, mixin.LazyMembers): cls.load_lazy_attribute(attr) if isinstance(cls, abstract.SimpleValue) and attr in cls.members and cls.members[attr].bindings: return True return False
Check whether subcls_var overrides or newly defines the given attribute. Args: subcls: A potential subclass. supercls: A potential superclass. attr: An attribute name. Returns: True if subcls_var is a subclass of supercls_var and overrides or newly defines the attribute. False otherwise.
github-repos
def delete_additional_charge(self, recurring_billing_id): fmt = 'recurringBillItems/{}'.format(recurring_billing_id) return self.client._delete(self.url + fmt, headers=self.get_headers())
Remove an extra charge from an invoice. Args: recurring_billing_id: Identifier of the additional charge. Returns:
juraj-google-style
def FillDeviceCapabilities(device, descriptor): preparsed_data = PHIDP_PREPARSED_DATA(0) ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data)) if not ret: raise ctypes.WinError() try: caps = HidCapabilities() ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps)) if ret != HIDP_STATUS_SUCCESS: raise ctypes.WinError() descriptor.usage = caps.Usage descriptor.usage_page = caps.UsagePage descriptor.internal_max_in_report_len = caps.InputReportByteLength descriptor.internal_max_out_report_len = caps.OutputReportByteLength finally: hid.HidD_FreePreparsedData(preparsed_data)
Fill out device capabilities. Fills the HidCapabilitites of the device into descriptor. Args: device: A handle to the open device descriptor: DeviceDescriptor to populate with the capabilities Returns: none Raises: WindowsError when unable to obtain capabilitites.
juraj-google-style
def max_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): warnings.warn('`tf.layers.max_pooling1d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.MaxPooling1D` instead.') layer = MaxPooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs)
Max Pooling layer for 1D inputs. Args: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled.
github-repos
def estCumPos(pos, chrom, offset=20000000): chromvals = SP.unique(chrom) chrom_pos = SP.zeros_like(chromvals) cum_pos = SP.zeros_like(pos) maxpos_cum = 0 for (i, mychrom) in enumerate(chromvals): chrom_pos[i] = maxpos_cum i_chr = (chrom == mychrom) maxpos = (pos[i_chr].max() + offset) maxpos_cum += maxpos cum_pos[i_chr] = (chrom_pos[i] + pos[i_chr]) return (cum_pos, chrom_pos)
compute the cumulative position of each variant given the position and the chromosome Also return the starting cumulativeposition of each chromosome Args: pos: scipy.array of basepair positions (on the chromosome) chrom: scipy.array of chromosomes offset: offset between chromosomes for cumulative position (default 20000000 bp) Returns: cum_pos: scipy.array of cumulative positions chrom_pos: scipy.array of starting cumulative positions for each chromosme
codesearchnet
def compute_ld(cur_geno, other_genotypes, r2=False): norm_cur = normalize_genotypes(cur_geno) norm_others = np.stack( tuple(normalize_genotypes(g) for g in other_genotypes), axis=1, ) assert norm_cur.shape[0] == norm_others.shape[0] n = ( ~np.isnan(norm_cur.reshape(norm_cur.shape[0], 1)) * ~np.isnan(norm_others) ).sum(axis=0) r = pd.Series( np.dot( np.nan_to_num(norm_cur), np.nan_to_num(norm_others) / n ), index=[g.variant.name for g in other_genotypes], name="r2" if r2 else "r", ) r.loc[r > 1] = 1 r.loc[r < -1] = -1 if r2: return r ** 2 else: return r
Compute LD between a marker and a list of markers. Args: cur_geno (Genotypes): The genotypes of the marker. other_genotypes (list): A list of genotypes. Returns: numpy.array: An array containing the r or r**2 values between cur_geno and other_genotypes. Note: The genotypes will automatically be normalized using (x - mean) / std.
juraj-google-style
def represent_as_string(iterable): keep = (".", "[", "]") return "".join(tuple(int_to_str_digit(i) if i not in keep else i for i in iterable))
Represent a number in the form of a string. (8, 6, 8, '.', 0, 15) -> "868.0F" Args: iterable - Number represented as an iterable container of digits. Returns: Number represented as a string of digits. >>> represent_as_string((8, 6, 8, '.', 0, 15)) '868.0F'
juraj-google-style
def load(self, context): if (not ((context.flags.debugger_data_server_grpc_port > 0) or (context.flags.debugger_port > 0))): return None flags = context.flags try: import tensorflow except ImportError: raise ImportError('To use the debugger plugin, you need to have TensorFlow installed:\n pip install tensorflow') try: from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib except ImportError as e: (e_type, e_value, e_traceback) = sys.exc_info() message = (e.msg if hasattr(e, 'msg') else e.message) if ('grpc' in message): e_value = ImportError((message + '\n\nTo use the debugger plugin, you need to have gRPC installed:\n pip install grpcio')) six.reraise(e_type, e_value, e_traceback) if (flags.debugger_port > 0): interactive_plugin = interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context) logger.info('Starting Interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port) interactive_plugin.listen(flags.debugger_port) return interactive_plugin elif (flags.debugger_data_server_grpc_port > 0): noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context) logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port) noninteractive_plugin.listen(flags.debugger_data_server_grpc_port) return noninteractive_plugin raise AssertionError()
Returns the debugger plugin, if possible. Args: context: The TBContext flags including `add_arguments`. Returns: A DebuggerPlugin instance or None if it couldn't be loaded.
codesearchnet
def create_binary_descriptor(streamer): trigger = 0 if streamer.automatic: trigger = 1 elif streamer.with_other is not None: trigger = (1 << 7) | streamer.with_other return struct.pack("<8sHBBBx", streamer.dest.encode(), streamer.selector.encode(), trigger, streamer.KnownFormats[streamer.format], streamer.KnownTypes[streamer.report_type])
Create a packed binary descriptor of a DataStreamer object. Args: streamer (DataStreamer): The streamer to create a packed descriptor for Returns: bytes: A packed 14-byte streamer descriptor.
juraj-google-style
def _should_elide_opcode(op_items: list[tuple[int, Opcode]], i: int, python_version: tuple[int, int]): op = op_items[i][1] if python_version == (3, 11): return isinstance(op, JUMP_BACKWARD) and i + 1 < len(op_items) and isinstance(op_items[i + 1][1], END_ASYNC_FOR) return False
Returns `True` if the opcode on index `i` should be elided. Opcodes should be elided if they don't contribute to type checking and cause issues in the block graph. Args: op_items: List of (offset, opcode) tuples. i: Index of opcode to check for elision. python_version: Python version tuple.
github-repos
def get_group(self, name, user_name=None): return self.service.get_group(name, user_name, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get owner of group and the resources it's attached to. Args: name (string): Name of group to query. user_name (optional[string]): Supply None if not interested in determining if user is a member of the given group. Returns: (dict): Keys include 'owner', 'name', 'resources'. Raises: requests.HTTPError on failure.
codesearchnet
def identical_dataset_and_algorithm_tuner(self, additional_parents=None): return self._create_warm_start_tuner(additional_parents=additional_parents, warm_start_type=WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM)
Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as "IdenticalDataAndAlgorithm" and parents as the union of provided list of ``additional_parents`` and the ``self`` Args: additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting the identical dataset and algorithm tuner. Returns: sagemaker.tuner.HyperparameterTuner: HyperparameterTuner instance which can be used to launch identical dataset and algorithm tuning job. Examples: >>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1") >>> identical_dataset_algo_tuner = parent_tuner.identical_dataset_and_algorithm_tuner( >>> additional_parents={"parent-job-2"}) Later On: >>> identical_dataset_algo_tuner.fit(inputs={})
codesearchnet
def logloss(y, p): p[p < EPS] = EPS p[p > 1 - EPS] = 1 - EPS return log_loss(y, p)
Bounded log loss error. Args: y (numpy.array): target p (numpy.array): prediction Returns: bounded log loss error
juraj-google-style
def _get_augmented_label_matrix(self, L, higher_order=False): self.c_data = {} for i in range(self.m): self.c_data[i] = { "start_index": i * self.k, "end_index": (i + 1) * self.k, "max_cliques": set( [ j for j in self.c_tree.nodes() if i in self.c_tree.node[j]["members"] ] ), } L_ind = self._create_L_ind(L) if higher_order: L_aug = np.copy(L_ind) for item in chain(self.c_tree.nodes(), self.c_tree.edges()): if isinstance(item, int): C = self.c_tree.node[item] C_type = "node" elif isinstance(item, tuple): C = self.c_tree[item[0]][item[1]] C_type = "edge" else: raise ValueError(item) members = list(C["members"]) nc = len(members) if nc == 1: C["start_index"] = members[0] * self.k C["end_index"] = (members[0] + 1) * self.k else: L_C = np.ones((self.n, self.k ** nc)) for i, vals in enumerate(product(range(self.k), repeat=nc)): for j, v in enumerate(vals): L_C[:, i] *= L_ind[:, members[j] * self.k + v] if L_aug is not None: C["start_index"] = L_aug.shape[1] C["end_index"] = L_aug.shape[1] + L_C.shape[1] L_aug = np.hstack([L_aug, L_C]) else: C["start_index"] = 0 C["end_index"] = L_C.shape[1] L_aug = L_C id = tuple(members) if len(members) > 1 else members[0] self.c_data[id] = { "start_index": C["start_index"], "end_index": C["end_index"], "max_cliques": set([item]) if C_type == "node" else set(item), } return L_aug else: return L_ind
Returns an augmented version of L where each column is an indicator for whether a certain source or clique of sources voted in a certain pattern. Args: L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}
juraj-google-style
def auto_model(layout, scan_length=None, one_vs_rest=False): base_name = split(layout.root)[(- 1)] tasks = layout.entities['task'].unique() task_models = [] for task_name in tasks: model = OrderedDict() model['Name'] = '_'.join([base_name, task_name]) model['Description'] = ('Autogenerated model for the %s task from %s' % (task_name, base_name)) model['Input'] = {'Task': task_name} steps = [] transformations = OrderedDict(Name='Factor', Input=['trial_type']) run = OrderedDict(Level='Run', Name='Run', Transformations=[transformations]) run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length) evs = [] for n in run_nodes.nodes: evs.extend(n.variables['trial_type'].values.values) trial_types = np.unique(evs) trial_type_factors = [('trial_type.' + tt) for tt in trial_types] run['Transformations'].append(OrderedDict(Name='Convolve', Input=trial_type_factors)) run_model = OrderedDict(X=trial_type_factors) run['Model'] = run_model if one_vs_rest: contrasts = [] for (i, tt) in enumerate(trial_types): cdict = OrderedDict() if (len(trial_types) > 1): cdict['Name'] = (('run_' + tt) + '_vs_others') else: cdict['Name'] = ('run_' + tt) cdict['ConditionList'] = trial_type_factors weights = np.ones(len(trial_types)) try: weights[(trial_types != tt)] = ((- 1.0) / (len(trial_types) - 1)) except ZeroDivisionError: pass cdict['Weights'] = list(weights) cdict['Type'] = 't' contrasts.append(cdict) run['Contrasts'] = contrasts steps.append(run) if one_vs_rest: sessions = layout.get_sessions() if (len(sessions) > 1): contrast_names = [cc['Name'] for cc in steps[(- 1)]['Contrasts']] steps.append(_make_passthrough_contrast('Session', contrast_names)) subjects = layout.get_subjects() if (len(subjects) > 1): contrast_names = [cc['Name'] for cc in steps[(- 1)]['Contrasts']] steps.append(_make_passthrough_contrast('Subject', contrast_names)) contrast_names = [cc['Name'] for cc in steps[(- 1)]['Contrasts']] steps.append(_make_passthrough_contrast('Dataset', contrast_names)) model['Steps'] = steps task_models.append(model) return task_models
Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses t-tests at each other level present to aggregate these results up. Args: layout (BIDSLayout) A BIDSLayout instance scan_length (Int) Scan length for loading event varibles in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest (Bool) Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns: models (list) list of model dictionaries for each task
codesearchnet
def remove(self, *dic): dicList = list(flatten(dic)) for d in dicList: di = [] for k in d: di.append(Pair(k, IntegerSingle(d[k]))) dictSingle = DictSingle(di) self._remove([dictSingle], self.l)
remove a calendar config. Args: *dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
codesearchnet
def lazy_property(fn): attr_name = ('_lazy_' + fn.__name__) @property @wraps(fn) def _lazy_property(self): if (not hasattr(self, attr_name)): setattr(self, attr_name, fn(self)) return getattr(self, attr_name) return _lazy_property
Decorator that makes a property lazy-evaluated whilst preserving docstrings. Args: fn (function): the property in question Returns: evaluated version of the property.
codesearchnet
def __init__(self, fields): self.fields = { ensure_unicode_string(key): value for key, value in six.iteritems(fields) } super(ConstructResult, self).__init__(self.fields) self.validate()
Construct a ConstructResult object that maps the given field names to their expressions. Args: fields: dict, variable name string -> Expression see rules for variable names in validate_safe_string(). Returns: new ConstructResult object
juraj-google-style
def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None): total_model = None sub_models = [] for (i, kT) in enumerate(kTs): component = ui.xsapec((name_template % i)) component.kT = kT ui.freeze(component.kT) if (norm is not None): component.norm = norm sub_models.append(component) if (total_model is None): total_model = component else: total_model = (total_model + component) return (total_model, sub_models)
Create a model summing multiple APEC components at fixed temperatures. *kTs* An iterable of temperatures for the components, in keV. *name_template* = 'apec%d' A template to use for the names of each component; it is string-formatted with the 0-based component number as an argument. *norm* = None An initial normalization to be used for every component, or None to use the Sherpa default. Returns: A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa model representing the sum of the APEC components and *sub_models* is a list of the individual models. This function creates a vector of APEC model components and sums them. Their *kT* parameters are set and then frozen (using :func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the amplitude of each component is the only free parameter.
codesearchnet
def total_seconds(td): a_milli = 1000000.0 td_ds = td.seconds + (td.days * 86400) td_micro = td.microseconds + (td_ds * a_milli) return td_micro / a_milli
For those with older versions of Python, a pure-Python implementation of Python 2.7's :meth:`~datetime.timedelta.total_seconds`. Args: td (datetime.timedelta): The timedelta to convert to seconds. Returns: float: total number of seconds >>> td = timedelta(days=4, seconds=33) >>> total_seconds(td) 345633.0
juraj-google-style
def is_workdir(cls, path): try: cls(path=path).load() except MalformedWorkdir: return False return True
Check if the given path is a workdir Args: path(str): Path to check Return: bool: True if the given path is a workdir
codesearchnet
def logical_xor(x1, x2): if any_symbolic_tensors((x1, x2)): return LogicalXor().symbolic_call(x1, x2) return backend.numpy.logical_xor(x1, x2)
Compute the truth value of `x1 XOR x2`, element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output boolean tensor.
github-repos
def ParseApplicationUsageRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) application_name = self._GetRowValue(query_hash, row, 'event') usage = 'Application {0:s}'.format(application_name) event_data = MacOSApplicationUsageEventData() event_data.application = self._GetRowValue(query_hash, row, 'app_path') event_data.app_version = self._GetRowValue(query_hash, row, 'app_version') event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id') event_data.count = self._GetRowValue(query_hash, row, 'number_times') event_data.query = query timestamp = self._GetRowValue(query_hash, row, 'last_time') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, usage) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an application usage row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def register_entry(self, navbar_kwargs): path = navbar_kwargs.pop('path') if not hasattr(path, '__iter__') or isinstance(path, basestring): path = [path] entry_group = self.navbar_entries for name, is_last in iter_islast(path): kwargs = deepcopy(navbar_kwargs) kwargs['name'] = name for existing_entry in entry_group: if existing_entry.name == name: entry = existing_entry if is_last: entry.endpoint = kwargs['endpoint'] break else: if not is_last: kwargs['endpoint'] = None entry = NavbarEntry(**kwargs) entry_group.add(entry) entry_group = entry.children
Register a navbar entry with the copilot. Args: navbar_kwargs (dict): Arguments passed to the :class:`NavbarEntry` instance.
juraj-google-style
def AddContract(self, contract): super(UserWallet, self).AddContract(contract) try: db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes()) db_contract.delete_instance() except Exception as e: logger.debug('contract does not exist yet') sh = bytes(contract.ScriptHash.ToArray()) (address, created) = Address.get_or_create(ScriptHash=sh) address.IsWatchOnly = False address.save() db_contract = Contract.create(RawData=contract.ToArray(), ScriptHash=contract.ScriptHash.ToBytes(), PublicKeyHash=contract.PublicKeyHash.ToBytes(), Address=address, Account=self.__dbaccount) logger.debug(('Creating db contract %s ' % db_contract)) db_contract.save()
Add a contract to the database. Args: contract(neo.SmartContract.Contract): a Contract instance.
codesearchnet
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, current_tokens: torch.LongTensor, beam_group_idx: int) -> torch.FloatTensor: batch_size = current_tokens.shape[0] group_start_idx = beam_group_idx * self._num_sub_beams group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams) group_size = group_end_idx - group_start_idx vocab_size = scores.shape[-1] if group_start_idx == 0: return scores scores_processed = scores.clone() for batch_idx in range(batch_size): previous_group_tokens = current_tokens[batch_idx * self._num_beams:batch_idx * self._num_beams + group_start_idx] token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device) scores_processed[batch_idx * group_size:(batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency return scores_processed
Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search current_tokens (`torch.LongTensor` of shape `(batch_size)`): Indices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other beam groups in the current generation step. beam_group_idx (`int`): The index of the beam group currently being processed. Return: `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
github-repos
def inspect_config(self, id): url = self._url('/configs/{0}', id) return self._result(self._get(url), True)
Retrieve config metadata Args: id (string): Full ID of the config to inspect Returns (dict): A dictionary of metadata Raises: :py:class:`docker.errors.NotFound` if no config with that ID exists
codesearchnet
def assert_no_garbage_created(f: _F) -> _F: def decorator(self: 'TensorFlowTestCase', **kwargs): gc.disable() previous_debug_flags = gc.get_debug() gc.set_debug(gc.DEBUG_UNCOLLECTABLE) gc.collect() previous_garbage = len(gc.garbage) result = f(self, **kwargs) gc.collect() new_garbage = len(gc.garbage) if new_garbage > previous_garbage: for i, obj in enumerate(gc.garbage[previous_garbage:]): if getattr(obj, '__module__', '') == 'ast': new_garbage -= 3 if new_garbage > previous_garbage: logging.error("The decorated test created work for Python's garbage collector, likely due to a reference cycle. New objects in cycle(s):") for i, obj in enumerate(gc.garbage[previous_garbage:]): try: logging.error('Object %d of %d', i, len(gc.garbage) - previous_garbage) def _safe_object_str(obj) -> str: return '<%s %d>' % (obj.__class__.__name__, id(obj)) logging.error(' Object type: %s', _safe_object_str(obj)) logging.error(' Referrer types: %s', ', '.join([_safe_object_str(ref) for ref in gc.get_referrers(obj)])) logging.error(' Referent types: %s', ', '.join([_safe_object_str(ref) for ref in gc.get_referents(obj)])) logging.error(' Object attribute names: %s', dir(obj)) logging.error(' Object __str__:') logging.error(obj) logging.error(' Object __repr__:') logging.error(repr(obj)) except Exception: logging.error('(Exception while printing object)') if new_garbage > previous_garbage: for i in range(previous_garbage, new_garbage): if _find_reference_cycle(gc.garbage, i): break self.assertEqual(previous_garbage, new_garbage) gc.set_debug(previous_debug_flags) gc.enable() return result return decorator
Test method decorator to assert that no garbage has been created. Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters cannot be un-set (i.e. will disable garbage collection for any other unit tests in the same file/shard). Args: f: The function to decorate. Returns: The decorated function.
github-repos
def _get_full_name(self, node): curr = node items = [] while not isinstance(curr, ast.Name): if not isinstance(curr, ast.Attribute): return None items.append(curr.attr) curr = curr.value items.append(curr.id) return '.'.join(reversed(items))
Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar". This is the inverse of `full_name_node`. Args: node: A Node of type Attribute. Returns: a '.'-delimited full-name or None if node was not Attribute or Name. i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
github-repos
def domain_tag(self, domains): api_name = 'opendns-domain_tag' fmt_url_path = u'domains/{0}/latest_tags' return self._multi_get(api_name, fmt_url_path, domains)
Get the data range when a domain is part of OpenDNS block list. Args: domains: an enumerable of strings domain names Returns: An enumerable of string with period, category, and url
codesearchnet
def _TravelTimes(self,triplist,index=0): def DistanceInTravelTime(dep_secs, arr_secs): t_dist = arr_secs-dep_secs if t_dist<0: t_dist = self._DUMMY_SEPARATOR return t_dist if not triplist: return [] if 0 < index < len(triplist): trip = triplist[index] else: trip = triplist[0] t_dists2 = [DistanceInTravelTime(stop[3],tail[2]) for (stop,tail) in itertools.izip(trip.GetTimeStops(),trip.GetTimeStops()[1:])] return t_dists2
Calculate distances and plot stops. Uses a timetable to approximate distances between stations Args: # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] # (Optional) Index of Triplist prefered for timetable Calculation index: 3 Returns: # One integer for each pair of stations # indicating the approximate distance [0,33,140, ... ,X]
juraj-google-style
def get_function_name(function: Any) -> str: return str(function.__qualname__.split('.')[0])
Get the original name of a function, removing module paths. Args: * function: function instance Returns: * Actual name of function
github-repos
def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult: known_args, pipeline_args = parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = save_main_session model_loader = KeyedModelHandler(TFModelHandlerNumpy(model_uri=known_args.model_path, model_type=ModelType.SAVED_WEIGHTS, create_model_fn=get_model)) pipeline = test_pipeline if not test_pipeline: pipeline = beam.Pipeline(options=pipeline_options) label_pixel_tuple = pipeline | 'ReadFromInput' >> beam.io.ReadFromText(known_args.input) | 'PreProcessInputs' >> beam.Map(process_input) predictions = label_pixel_tuple | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor()) _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True) result = pipeline.run() result.wait_until_finish() return result
Args: argv: Command line arguments defined for this example. save_main_session: Used for internal testing. test_pipeline: Used for internal testing.
github-repos
def get_numeric_value(event_tags, logger=None): logger_message_debug = None numeric_metric_value = None if (event_tags is None): logger_message_debug = 'Event tags is undefined.' elif (not isinstance(event_tags, dict)): logger_message_debug = 'Event tags is not a dictionary.' elif (NUMERIC_METRIC_TYPE not in event_tags): logger_message_debug = 'The numeric metric key is not in event tags.' else: numeric_metric_value = event_tags[NUMERIC_METRIC_TYPE] try: if isinstance(numeric_metric_value, (numbers.Integral, float, str)): cast_numeric_metric_value = float(numeric_metric_value) if ((not isinstance(cast_numeric_metric_value, float)) or math.isnan(cast_numeric_metric_value) or math.isinf(cast_numeric_metric_value)): logger_message_debug = 'Provided numeric value {} is in an invalid format.'.format(numeric_metric_value) numeric_metric_value = None elif isinstance(numeric_metric_value, bool): logger_message_debug = 'Provided numeric value is a boolean, which is an invalid format.' numeric_metric_value = None else: numeric_metric_value = cast_numeric_metric_value else: logger_message_debug = 'Numeric metric value is not in integer, float, or string form.' numeric_metric_value = None except ValueError: logger_message_debug = 'Value error while casting numeric metric value to a float.' numeric_metric_value = None if (logger and logger_message_debug): logger.log(enums.LogLevels.DEBUG, logger_message_debug) if (numeric_metric_value is not None): if logger: logger.log(enums.LogLevels.INFO, 'The numeric metric value {} will be sent to results.'.format(numeric_metric_value)) elif logger: logger.log(enums.LogLevels.WARNING, 'The provided numeric metric value {} is in an invalid format and will not be sent to results.'.format(numeric_metric_value)) return numeric_metric_value
A smart getter of the numeric value from the event tags. Args: event_tags: A dictionary of event tags. logger: Optional logger. Returns: A float numeric metric value is returned when the provided numeric metric value is in the following format: - A string (properly formatted, e.g., no commas) - An integer - A float or double None is returned when the provided numeric metric values is in the following format: - None - A boolean - inf, -inf, nan - A string not properly formatted (e.g., '1,234') - Any values that cannot be cast to a float (e.g., an array or dictionary)
codesearchnet
def speech_speaker(self): if self.speaker: return self.speaker elif self.parent: return self.parent.speech_speaker() else: return None
Retrieves the speaker of the audio or video file associated with the element. The source is inherited from ancestor elements if none is specified. For this reason, always use this method rather than access the ``src`` attribute directly. Returns: str or None if not found
codesearchnet
def hdg60(msg): d = hex2bin(data(msg)) if (d[0] == '0'): return None sign = int(d[1]) value = bin2int(d[2:12]) if sign: value = (value - 1024) hdg = ((value * 90) / 512.0) if (hdg < 0): hdg = (360 + hdg) return round(hdg, 3)
Megnetic heading of aircraft Args: msg (String): 28 bytes hexadecimal message (BDS60) string Returns: float: heading in degrees to megnetic north (from 0 to 360)
codesearchnet
def load(filename): if (not os.path.exists(filename)): LOG.error("load object - File '%s' does not exist.", filename) return None obj = None with open(filename, 'rb') as obj_file: obj = dill.load(obj_file) return obj
Load a pickled obj from the filesystem. You better know what you expect from the given pickle, because we don't check it. Args: filename (str): The filename we load the object from. Returns: The object we were able to unpickle, else None.
codesearchnet
def mount_share(share_path): sh_url = CFURLCreateWithString(None, share_path, None) open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI} mount_options = {NetFS.kNetFSAllowSubMountsKey: True} result, output = NetFS.NetFSMountURLSync(sh_url, None, None, None, open_options, mount_options, None) if result != 0: raise Exception('Error mounting url "%s": %s' % (share_path, output)) return str(output[0])
Mounts a share at /Volumes Args: share_path: String URL with all auth info to connect to file share. Returns: The mount point or raises an error.
juraj-google-style
def atmospheric_station_pressure(self, value=999999): if (value is not None): try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int for field `atmospheric_station_pressure`'.format(value)) if (value <= 31000): raise ValueError('value need to be greater 31000 for field `atmospheric_station_pressure`') if (value >= 120000): raise ValueError('value need to be smaller 120000 for field `atmospheric_station_pressure`') self._atmospheric_station_pressure = value
Corresponds to IDD Field `atmospheric_station_pressure` Args: value (int): value for IDD Field `atmospheric_station_pressure` Unit: Pa value > 31000 value < 120000 Missing value: 999999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _parse_cli_args(argv): parser = argparse.ArgumentParser(description='Mobly Suite Executable.') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-c', '--config', type=str, metavar='<PATH>', help='Path to the test configuration file.') group.add_argument('-l', '--list_tests', action='store_true', help='Print the names of the tests defined in a script without executing them.') parser.add_argument('--tests', '--test_case', nargs='+', type=str, metavar='[ClassA[_test_suffix][.test_a] ClassB[_test_suffix][.test_b] ...]', help='A list of test classes and optional tests to execute. Note: test_suffix based names are only supported when running by suite class') parser.add_argument('-tb', '--test_bed', nargs='+', type=str, metavar='[<TEST BED NAME1> <TEST BED NAME2> ...]', help='Specify which test beds to run tests on.') parser.add_argument('-v', '--verbose', action='store_true', help='Set console logger level to DEBUG') if not argv: argv = sys.argv[1:] return parser.parse_known_args(argv)[0]
Parses cli args that are consumed by Mobly. Args: argv: A list that is then parsed as cli args. If None, defaults to cli input. Returns: Namespace containing the parsed args.
github-repos
def get_nonmonotonic_neurites(neuron, tol=1e-6): return [n for n in neuron.neurites if not is_monotonic(n, tol)]
Get neurites that are not monotonic Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio Returns: list of neurites that do not satisfy monotonicity test
juraj-google-style
def init_state(self, node): raise NotImplementedError('Subclasses must implement this.')
State initialization function. Optional to overload. An in/out state slot will be created for each node in the graph. Subclasses must overload this to control what that is initialized to. Args: node: Node
github-repos
def register_agent(self, host, sweep_id=None, project_name=None): mutation = gql('\n mutation CreateAgent(\n $host: String!\n $projectName: String!,\n $entityName: String!,\n $sweep: String!\n ) {\n createAgent(input: {\n host: $host,\n projectName: $projectName,\n entityName: $entityName,\n sweep: $sweep,\n }) {\n agent {\n id\n }\n }\n }\n ') if (project_name is None): project_name = self.settings('project') def no_retry_400(e): if (not isinstance(e, requests.HTTPError)): return True if (e.response.status_code != 400): return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={'host': host, 'entityName': self.settings('entity'), 'projectName': project_name, 'sweep': sweep_id}, check_retry_fn=no_retry_400) return response['createAgent']['agent']
Register a new agent Args: host (str): hostname persistent (bool): long running or oneoff sweep (str): sweep id project_name: (str): model that contains sweep
codesearchnet
def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray: bucket_length = (bucket_length or len(tags)) answer = np.zeros(shape=(bucket_length,), dtype=np.int32) for (i, tag) in enumerate(tags): answer[i] = self.tags.tok2idx(tag) return answer
Transforms a sentence of tags to Numpy array, which will be the network target. Args: tags: input sentence of tags bucket_length: the width of the bucket Returns: A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.
codesearchnet
def byte_adaptor(fbuffer): if six.PY3: strings = fbuffer.read().decode('latin-1') fbuffer = six.StringIO(strings) return fbuffer else: return fbuffer
provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer
codesearchnet
def accepts_scalar_input(func): @ignores_exc_tb(outer_wrapper=False) def wrp_asi(self, input_, *args, **kwargs): if util_iter.isiterable(input_): return func(self, input_, *args, **kwargs) else: ret = func(self, [input_], *args, **kwargs) if (ret is not None): return ret[0] wrp_asi = preserve_sig(wrp_asi, func) return wrp_asi
DEPRICATE in favor of accepts_scalar_input2 only accepts one input as vector accepts_scalar_input is a decorator which expects to be used on class methods. It lets the user pass either a vector or a scalar to a function, as long as the function treats everything like a vector. Input and output is sanitized to the user expected format on return. Args: func (func): Returns: func: wrp_asi CommandLine: python -m utool.util_decor --test-accepts_scalar_input Example: >>> # ENABLE_DOCTEST >>> from utool.util_decor import * # NOQA >>> @accepts_scalar_input ... def foobar(self, list_): ... return [x + 1 for x in list_] >>> self = None # dummy self because this decorator is for classes >>> assert 2 == foobar(self, 1) >>> assert [2, 3] == foobar(self, [1, 2])
codesearchnet
def ExamineEvent(self, mediator, event): if self._session_end_timestamp is None: self._session_end_timestamp = ( event.timestamp + self._maximum_pause_microseconds) self._events_per_session.append(0) if event.timestamp > self._session_end_timestamp: self._session_counter += 1 self._events_per_session.append(0) self._session_end_timestamp = ( event.timestamp + self._maximum_pause_microseconds) self._events_per_session[-1] += 1 label = 'session_{0:d}'.format(self._session_counter) event_tag = self._CreateEventTag(event, self._EVENT_TAG_COMMENT, [label]) mediator.ProduceEventTag(event_tag) self._number_of_event_tags += 1
Analyzes an EventObject and tags it as part of a session. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
juraj-google-style
def findSequenceOnDisk(cls, pattern, strictPadding=False): seq = cls(pattern) if ((seq.frameRange() == '') and (seq.padding() == '')): if os.path.isfile(pattern): return seq patt = seq.format('{dirname}{basename}*{extension}') ext = seq.extension() basename = seq.basename() pad = seq.padding() globbed = iglob(patt) if (pad and strictPadding): globbed = cls._filterByPaddingNum(globbed, seq.zfill()) pad = cls.conformPadding(pad) matches = cls.yield_sequences_in_list(globbed) for match in matches: if ((match.basename() == basename) and (match.extension() == ext)): if (pad and strictPadding): match.setPadding(pad) return match msg = 'no sequence found on disk matching {0}' raise FileSeqException(msg.format(pattern))
Search for a specific sequence on disk. The padding characters used in the `pattern` are used to filter the frame values of the files on disk (if `strictPadding` is True). Examples: Find sequence matching basename and extension, and a wildcard for any frame. returns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive >>> findSequenceOnDisk("seq/bar@@@@.exr") Find exactly 4-padded sequence, i.e. seq/bar1-100#.exr returns only frames bar1000.exr through bar9999.exr >>> findSequenceOnDisk("seq/bar#.exr", strictPadding=True) Args: pattern (str): the sequence pattern being searched for strictPadding (bool): if True, ignore files with padding length different from `pattern` Returns: str: Raises: :class:`.FileSeqException`: if no sequence is found on disk
codesearchnet
def __init__(self, queues=queues_config.WORKER_LIST, threadpool_prefix="grr_threadpool", threadpool_size=None, token=None): logging.info("started worker with queues: %s", str(queues)) self.queues = queues self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60) if token is None: raise RuntimeError("A valid ACLToken is required.") if threadpool_size is None: threadpool_size = config.CONFIG["Threadpool.size"] self.thread_pool = threadpool.ThreadPool.Factory( threadpool_prefix, min_threads=2, max_threads=threadpool_size) self.thread_pool.Start() self.token = token self.last_active = 0 self.last_mh_lease_attempt = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0) self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)
Constructor. Args: queues: The queues we use to fetch new messages from. threadpool_prefix: A name for the thread pool used by this worker. threadpool_size: The number of workers to start in this thread pool. token: The token to use for the worker. Raises: RuntimeError: If the token is not provided.
juraj-google-style