code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _to_enos_networks(networks): nets = [] for (roles, network) in networks: nets.append(network.to_enos(roles)) logger.debug(nets) return nets
Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init`
codesearchnet
def from_dir(dirpath: Path, feat_type: str) -> None: logger.info('Extracting features from directory {}'.format(dirpath)) dirname = str(dirpath) def all_wavs_processed() -> bool: '\n True if all wavs in the directory have corresponding numpy feature\n file; False otherwise.\n ' for fn in os.listdir(dirname): (prefix, ext) = os.path.splitext(fn) if (ext == '.wav'): if (not os.path.exists(os.path.join(dirname, ('%s.%s.npy' % (prefix, feat_type))))): return False return True if all_wavs_processed(): logger.info('All WAV files already preprocessed') return if ((feat_type == 'pitch') or (feat_type == 'fbank_and_pitch')): kaldi_pitch(dirname, dirname) for filename in os.listdir(dirname): logger.info('Preparing %s features for %s', feat_type, filename) path = os.path.join(dirname, filename) if path.endswith('.wav'): if empty_wav(path): raise PersephoneException("Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.".format(path)) if (feat_type == 'fbank'): fbank(path) elif (feat_type == 'fbank_and_pitch'): fbank(path) prefix = os.path.splitext(filename)[0] combine_fbank_and_pitch(dirname, prefix) elif (feat_type == 'pitch'): pass elif (feat_type == 'mfcc13_d'): mfcc(path) else: logger.warning('Feature type not found: %s', feat_type) raise PersephoneException(('Feature type not found: %s' % feat_type))
Performs feature extraction from the WAV files in a directory. Args: dirpath: A `Path` to the directory where the WAV files reside. feat_type: The type of features that are being used.
codesearchnet
def submit(self, command, blocksize, tasks_per_node, job_name='parsl.auto'): wrapped_cmd = self.launcher(command, tasks_per_node, 1) (instance, name) = self.create_instance(command=wrapped_cmd) self.provisioned_blocks += 1 self.resources[name] = {'job_id': name, 'status': translate_table[instance['status']]} return name
The submit method takes the command string to be executed upon instantiation of a resource most often to start a pilot. Args : - command (str) : The bash command string to be executed. - blocksize (int) : Blocksize to be requested - tasks_per_node (int) : command invocations to be launched per node KWargs: - job_name (str) : Human friendly name to be assigned to the job request Returns: - A job identifier, this could be an integer, string etc Raises: - ExecutionProviderException or its subclasses
codesearchnet
def format_date(self, dl_string): thedate = get_simple_date(dl_string) if ((thedate != 'Failed') and thedate): return thedate day = get_day_of_month(dl_string) month = get_month(dl_string) return (((day + '.') + month) + '.')
Formats various date formats to dd.MM. Examples - January 15th --> 15.01. - 15.01.2017 --> 15.01. - 15th of January --> 15.01. - 15.1. --> 15.01. Keyword arguments: dl_string -- a string to be formatted Returns: Date string in format dd.MM. or "None.None"
codesearchnet
def suggest_charges(self, tolerance=0.1): recommendations = {} for def_type in self.defect_types: test_charges = np.arange((np.min(self.stable_charges[def_type]) - 1), (np.max(self.stable_charges[def_type]) + 2)) test_charges = [charge for charge in test_charges if (charge not in self.finished_charges[def_type])] if len(self.transition_level_map[def_type].keys()): min_tl = min(self.transition_level_map[def_type].keys()) if (min_tl < tolerance): max_charge = max(self.transition_level_map[def_type][min_tl]) test_charges = [charge for charge in test_charges if (charge < max_charge)] max_tl = max(self.transition_level_map[def_type].keys()) if (max_tl > (self.band_gap - tolerance)): min_charge = min(self.transition_level_map[def_type][max_tl]) test_charges = [charge for charge in test_charges if (charge > min_charge)] else: test_charges = [charge for charge in test_charges if (charge not in self.stable_charges[def_type])] recommendations[def_type] = test_charges return recommendations
Suggest possible charges for defects to computee based on proximity of known transitions from entires to VBM and CBM Args: tolerance (float): tolerance with respect to the VBM and CBM to ` continue to compute new charges
codesearchnet
def __init__(self, xml=None, resort=True): self.leader = None self.oai_marc = False self.controlfields = OrderedDict() self.datafields = OrderedDict() self.valid_i_chars = set(list(" 0123456789*")) self.resorted = tools.resorted if resort else lambda x: x if hasattr(xml, "read"): xml = xml.read() if xml is not None: self._original_xml = xml self._parse_string(xml)
Constructor. Args: xml (str/file, default None): XML to be parsed. May be file-like object. resort (bool, default True): Sort the output alphabetically?
juraj-google-style
def run(in_file_nose, out_dir_unitth): suites = Converter.read_nose(in_file_nose) Converter.write_unitth(suites, out_dir_unitth)
Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files Args: in_file_nose (:obj:`str`): path to nose-style test report out_file_unitth (:obj:`str`): path to save UnitTH-style test reports
juraj-google-style
def start(args_string): context = _get_context() try: import IPython import IPython.display except ImportError: IPython = None if (context == _CONTEXT_NONE): handle = None print('Launching TensorBoard...') else: handle = IPython.display.display(IPython.display.Pretty('Launching TensorBoard...'), display_id=True) def print_or_update(message): if (handle is None): print(message) else: handle.update(IPython.display.Pretty(message)) parsed_args = shlex.split(args_string, comments=True, posix=True) start_result = manager.start(parsed_args) if isinstance(start_result, manager.StartLaunched): _display(port=start_result.info.port, print_message=False, display_handle=handle) elif isinstance(start_result, manager.StartReused): template = "Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. (Use '!kill {pid}' to kill it.)" message = template.format(port=start_result.info.port, pid=start_result.info.pid, delta=_time_delta_from_info(start_result.info)) print_or_update(message) _display(port=start_result.info.port, print_message=False, display_handle=None) elif isinstance(start_result, manager.StartFailed): def format_stream(name, value): if (value == ''): return '' elif (value is None): return ('\n<could not read %s>' % name) else: return ('\nContents of %s:\n%s' % (name, value.strip())) message = ('ERROR: Failed to launch TensorBoard (exited with %d).%s%s' % (start_result.exit_code, format_stream('stderr', start_result.stderr), format_stream('stdout', start_result.stdout))) print_or_update(message) elif isinstance(start_result, manager.StartTimedOut): message = ('ERROR: Timed out waiting for TensorBoard to start. It may still be running as pid %d.' % start_result.pid) print_or_update(message) else: raise TypeError(('Unexpected result from `manager.start`: %r.\nThis is a TensorBoard bug; please report it.' % start_result))
Launch and display a TensorBoard instance as if at the command line. Args: args_string: Command-line arguments to TensorBoard, to be interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0". Shell metacharacters are not supported: e.g., "--logdir 2>&1" will point the logdir at the literal directory named "2>&1".
codesearchnet
def _get_short_value(cls, value, type_): if type_ == 'CLASS': return value.__name__ return None
Calculates the short value for an item. Args: value: The value of the item that needs to be shortened. type_(string): The type of the value. Returns: The unqualified name of a class if type_ is 'CLASS'. None otherwise.
github-repos
def compare_profiles(profile1, profile2): length = len(profile1) profile1 = np.array(list(profile1)) profile2 = np.array(list(profile2)) similarity_array = (profile1 == profile2) matches = np.sum(similarity_array) similarity_ratio = (matches / length) return similarity_ratio
Given two profiles, determine the ratio of similarity, i.e. the hamming distance between the strings. Args: profile1/2 (str): profile string Returns: similarity_ratio (float): the ratio of similiarity (0-1)
codesearchnet
def _orthogonal_kernel(self, ksize, cin, cout): if cin > cout: raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).') orth = self._orthogonal_matrix(cout)[0:cin, :] if ksize == 1: return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0) p = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout)) for _ in range(ksize - 2): temp = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout)) p = self._matrix_conv(p, temp) for i in range(ksize): for j in range(ksize): p[i, j] = math_ops.matmul(orth, p[i, j]) return self._dict_to_tensor(p, ksize, ksize)
Construct orthogonal kernel for convolution. Args: ksize: Kernel size. cin: Number of input channels. cout: Number of output channels. Returns: An [ksize, ksize, cin, cout] orthogonal kernel. Raises: ValueError: If cin > cout.
github-repos
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=True): print('X_train shape:', X_train.shape) (w, b) = initialize_with_zeros(X_train.shape[0]) print('w shape:', w.shape) (parameters, grads, costs) = LR_train(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) w = parameters['w'] b = parameters['b'] print('w shape params:', w.shape) Y_prediction_test = LR_predict(w, b, X_test) Y_prediction_train = LR_predict(w, b, X_train) print('train accuracy: {} %'.format((100 - (np.mean(np.abs((Y_prediction_train - Y_train))) * 100)))) print('test accuracy: {} %'.format((100 - (np.mean(np.abs((Y_prediction_test - Y_test))) * 100)))) d = {'costs': costs, 'Y_prediction_test': Y_prediction_test, 'Y_prediction_train': Y_prediction_train, 'w': w, 'b': b, 'learning_rate': learning_rate, 'num_iterations': num_iterations} return d
Builds the logistic regression model by calling the function implemented above Arguments: X_train training set represented by a numpy array of shape (dim, m_train) Y_train training labels represented by a numpy array (vector) of shape (1, m_train) X_test test set represented by a numpy array of shape (dim, m_test) Y_test test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations hyperparameter representing the number of iterations to optimize the parameters learning_rate hyperparameter representing the learning rate used in the update rule of optimize() print_cost Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model.
codesearchnet
def read(self, path): with open(path, "r") as f: for line in f: line = line.strip() match_obj_name = re.search(r"^([A-Z][A-Z/ \d]+),", line) if match_obj_name is not None: internal_name = match_obj_name.group(1) if internal_name in self._data: self._data[internal_name] = self._create_datadict( internal_name) data_line = line[len(internal_name) + 1:] vals = data_line.strip().split(',') self._data[internal_name].read(vals) else: wd = WeatherData() wd.read(line.strip().split(',')) self.add_weatherdata(wd)
Read EPW weather data from path. Args: path (str): path to read weather data from
juraj-google-style
def verbose_ping(dest_addr: str, count: int = 4, *args, **kwargs): timeout = kwargs.get("timeout") src = kwargs.get("src") unit = kwargs.setdefault("unit", "ms") for i in range(count): output_text = "ping '{}'".format(dest_addr) output_text += " from '{}'".format(src) if src else "" output_text += " ... " print(output_text, end="") delay = ping(dest_addr, seq=i, *args, **kwargs) if delay is None: print("Timeout > {}s".format(timeout) if timeout else "Timeout") else: print("{value}{unit}".format(value=int(delay), unit=unit))
Send pings to destination address with the given timeout and display the result. Args: dest_addr: The destination address. Ex. "192.168.1.1"/"example.com" count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4) *args and **kwargs: And all the other arguments available in ping() except `seq`. Returns: Formatted ping results printed.
juraj-google-style
def Parse(self, conditions, host_data): result = CheckResult(check_id=self.check_id) methods = self.SelectChecks(conditions) result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods]) return result
Runs methods that evaluate whether collected host_data has an issue. Args: conditions: A list of conditions to determine which Methods to trigger. host_data: A map of artifacts and rdf data. Returns: A CheckResult populated with Anomalies if an issue exists.
juraj-google-style
def _GetBcastSubshape(subscripts): start = subscripts.find(ellipsis) if start == -1: return (0, 0) remaining = len(subscripts) - (start + len(ellipsis)) end = -remaining if remaining > 0 else None return (start, end)
Returns a tuple denoting the slice mapping to ellipsis. For a given subscript, returns a tuple (start, end) denoting the start axis index and the (negative) end axis index respectively. For any input Tensor `x` described by the subscript, `x[start:end]` would be the slice represented by the ellipsis. E.g. For `ab...cd` returns `[1, -2]`. If ellipsis is not present in `subscripts`, returns `(0, 0)`. Args: subscripts: A string denoting the einsum subscript.
github-repos
def get_op(self, id: str, **kwargs: str) -> dict: path = self._get_path_for_op_id(id) return self.get_path(path, kwargs)
Queries the ESI by looking up an operation id. Endpoints are cached, so calls to this method for the same op and args will return the data from the cache instead of making the API call. Args: id: operation id kwargs: data to populate the endpoint's URL variables Returns: ESI data
juraj-google-style
def json_get_fields(recipe, path=[]): fields = {} path = path[:] if isinstance(recipe, dict): if 'field' in recipe: fields[recipe['field']['name']] = recipe['field'] else: for key, value in recipe.items(): fields.update(json_get_fields(value, path + [key])) elif isinstance(recipe, list) or isinstance(recipe, tuple): for index, value in enumerate(recipe): fields.update(json_get_fields(value, path + [index])) if path == []: return sorted(fields.values(), key=lambda f: f.get('order', 0)) else: return fields
Recusrsively finds fields in script JSON and returns them as a list. Field has format: { "field":{ "name":"???", "kind":"???", "default":???, "description":"???" }} Args: recipe: (dict) A dictionary representation fo the JSON script. path: (list) Stack that keeps track of recursion depth. Not used externally. Returns: fields: (list or dictionary) A list or dictionary representing each field recipe found in the JSON.
github-repos
def attach(self, droplet_id, region): return self.get_data( "volumes/%s/actions/" % self.id, type=POST, params={"type": "attach", "droplet_id": droplet_id, "region": region} )
Attach a Volume to a Droplet. Args: droplet_id: int - droplet id region: string - slug identifier for the region
juraj-google-style
def md5sum(string): h = hashlib.new('md5') h.update(string.encode('utf-8')) return h.hexdigest()
Generate the md5 checksum for a string Args: string (Str): The string to be checksummed. Returns: (Str): The hex checksum.
codesearchnet
def camel_to_snake(name): s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
Converts CamelCase to snake_case. Args: name (string): The name to convert from CamelCase to snake_case. Returns: string: Converted string.
juraj-google-style
def _prepare_socket_file(self, socket_path, default_prefix): if socket_path is not None: if os.path.exists(socket_path): raise Exception("Socket file {} exists!".format(socket_path)) socket_dir = os.path.dirname(socket_path) try_to_create_directory(socket_dir) return socket_path return self._make_inc_temp( prefix=default_prefix, directory_name=self._sockets_dir)
Prepare the socket file for raylet and plasma. This method helps to prepare a socket file. 1. Make the directory if the directory does not exist. 2. If the socket file exists, raise exception. Args: socket_path (string): the socket file to prepare.
juraj-google-style
def _CompletionsFromArgs(fn_args): completions = [] for arg in fn_args: arg = arg.replace('_', '-') completions.append(f'--{arg}') return completions
Takes a list of fn args and returns a list of the fn's completion strings. Args: fn_args: A list of the args accepted by a function. Returns: A list of possible completion strings for that function.
github-repos
def _split_generators(self, dl_manager): path = dl_manager.download_and_extract(_DOWNLOAD_URL) return [tfds.core.SplitGenerator(name=tfds.Split.TEST, num_shards=1, gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})]
Return the test split of Cifar10. Args: dl_manager: download manager object. Returns: test split.
codesearchnet
def get_or_create_direct_channel(cls, initiator_key, receiver_key): existing = cls.objects.OR().filter(code_name=('%s_%s' % (initiator_key, receiver_key))).filter(code_name=('%s_%s' % (receiver_key, initiator_key))) receiver_name = UserModel.objects.get(receiver_key).full_name if existing: channel = existing[0] else: channel_name = ('%s_%s' % (initiator_key, receiver_key)) channel = cls(is_direct=True, code_name=channel_name, typ=10).blocking_save() with BlockSave(Subscriber): Subscriber.objects.get_or_create(channel=channel, user_id=initiator_key, name=receiver_name) Subscriber.objects.get_or_create(channel=channel, user_id=receiver_key, name=UserModel.objects.get(initiator_key).full_name) return (channel, receiver_name)
Creates a direct messaging channel between two user Args: initiator: User, who want's to make first contact receiver: User, other party Returns: (Channel, receiver_name)
codesearchnet
def to_json_string(self, use_diff: bool=True, ignore_metadata: bool=False) -> str: if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() if ignore_metadata: for metadata_field in METADATA_FIELDS: config_dict.pop(metadata_field, None) def convert_keys_to_string(obj): if isinstance(obj, dict): return {str(key): convert_keys_to_string(value) for key, value in obj.items()} elif isinstance(obj, list): return [convert_keys_to_string(item) for item in obj] else: return obj def convert_dataclass_to_dict(obj): if isinstance(obj, dict): return {key: convert_dataclass_to_dict(value) for key, value in obj.items()} elif is_dataclass(obj): return obj.to_dict() else: return obj config_dict = convert_keys_to_string(config_dict) config_dict = convert_dataclass_to_dict(config_dict) return json.dumps(config_dict, indent=2, sort_keys=True) + '\n'
Serializes this instance to a JSON string. Args: use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `GenerationConfig()` is serialized to JSON string. ignore_metadata (`bool`, *optional*, defaults to `False`): Whether to ignore the metadata fields present in the instance Returns: `str`: String containing all the attributes that make up this configuration instance in JSON format.
github-repos
def hertz_to_octave(freq: Union[float, np.ndarray], tuning: Optional[float]=0.0, bins_per_octave: Optional[int]=12): stuttgart_pitch = 440.0 * 2.0 ** (tuning / bins_per_octave) octave = np.log2(freq / (float(stuttgart_pitch) / 16)) return octave
Convert frequency from hertz to fractional octave numbers. Adapted from *librosa*. Args: freq (`float` or `np.ndarray`): The frequency, or multiple frequencies, in hertz (Hz). tuning (`float`, defaults to `0.`): Tuning deviation from the Stuttgart pitch (A440) in (fractional) bins per octave. bins_per_octave (`int`, defaults to `12`): Number of bins per octave. Returns: `float` or `np.ndarray`: The frequencies on the octave scale.
github-repos
def verify_calling_thread(self, should_be_emulation, message=None): if (should_be_emulation == self._on_emulation_thread()): return if (message is None): message = 'Operation performed on invalid thread' raise InternalError(message)
Verify if the calling thread is or is not the emulation thread. This method can be called to make sure that an action is being taken in the appropriate context such as not blocking the event loop thread or modifying an emulate state outside of the event loop thread. If the verification fails an InternalError exception is raised, allowing this method to be used to protect other methods from being called in a context that could deadlock or cause race conditions. Args: should_be_emulation (bool): True if this call should be taking place on the emulation, thread, False if it must not take place on the emulation thread. message (str): Optional message to include when raising the exception. Otherwise a generic message is used. Raises: InternalError: When called from the wrong thread.
codesearchnet
def setReplicationPolicy(self, pid, policy, serialVersion, vendorSpecific=None): response = self.setReplicationPolicyResponse( pid, policy, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
See Also: setReplicationPolicyResponse() Args: pid: policy: serialVersion: vendorSpecific: Returns:
juraj-google-style
def rate_to_mcs(rate, bw=20, long_gi=True): if (bw not in [20, 40, 80, 160]): raise Exception(('Unknown bandwidth: %d MHz' % bw)) idx = int(((math.log((bw / 10), 2) - 1) * 2)) if (not long_gi): idx += 1 for (mcs, rates) in MCS_TABLE.items(): if (abs((rates[idx] - rate)) < 0.001): return mcs for (idx, r) in enumerate(DOT11A_RATES): if (abs((r - rate)) < 0.001): return idx raise Exception(('MCS not found: rate=%f, bw=%d, long_gi=%s' % (rate, bw, long_gi)))
Convert bit rate to MCS index. Args: rate (float): bit rate in Mbps bw (int): bandwidth, 20, 40, 80, ... long_gi (bool): True if long GI is used. Returns: mcs (int): MCS index >>> rate_to_mcs(120, bw=40, long_gi=False) 5
codesearchnet
def _local_var_name(splittable_dimensions, assignment): assignment_string = [] for splittable in sorted(splittable_dimensions): if (splittable in assignment): assignment_string.append('{}:{}'.format(splittable, assignment[splittable])) else: assignment_string.append('{}'.format(splittable)) return (('y_(' + ','.join(assignment_string)) + ')')
Name for a local variable. Args: splittable_dimensions: frozenset of names of splittable dimensions. assignment: dict from names of splittable dimensions to names of mesh dimensions. Returns: A string, the variable name.
codesearchnet
def MergeAttributeContainers(self, callback=None, maximum_number_of_containers=0): if (maximum_number_of_containers < 0): raise ValueError('Invalid maximum number of containers') if (not self._cursor): self._Open() self._ReadStorageMetadata() self._container_types = self._GetContainerTypes() number_of_containers = 0 while (self._active_cursor or self._container_types): if (not self._active_cursor): self._PrepareForNextContainerType() if (maximum_number_of_containers == 0): rows = self._active_cursor.fetchall() else: number_of_rows = (maximum_number_of_containers - number_of_containers) rows = self._active_cursor.fetchmany(size=number_of_rows) if (not rows): self._active_cursor = None continue for row in rows: identifier = identifiers.SQLTableIdentifier(self._active_container_type, row[0]) if (self._compression_format == definitions.COMPRESSION_FORMAT_ZLIB): serialized_data = zlib.decompress(row[1]) else: serialized_data = row[1] attribute_container = self._DeserializeAttributeContainer(self._active_container_type, serialized_data) attribute_container.SetIdentifier(identifier) if (self._active_container_type == self._CONTAINER_TYPE_EVENT_TAG): event_identifier = identifiers.SQLTableIdentifier(self._CONTAINER_TYPE_EVENT, attribute_container.event_row_identifier) attribute_container.SetEventIdentifier(event_identifier) del attribute_container.event_row_identifier if callback: callback(self._storage_writer, attribute_container) self._add_active_container_method(attribute_container) number_of_containers += 1 if ((maximum_number_of_containers != 0) and (number_of_containers >= maximum_number_of_containers)): return False self._Close() os.remove(self._path) return True
Reads attribute containers from a task storage file into the writer. Args: callback (function[StorageWriter, AttributeContainer]): function to call after each attribute container is deserialized. maximum_number_of_containers (Optional[int]): maximum number of containers to merge, where 0 represent no limit. Returns: bool: True if the entire task storage file has been merged. Raises: RuntimeError: if the add method for the active attribute container type is missing. OSError: if the task storage file cannot be deleted. ValueError: if the maximum number of containers is a negative value.
codesearchnet
def update_submit_s3_uri(estimator, job_name): if estimator.uploaded_code is None: return pattern = r'(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)' submit_uri = estimator.uploaded_code.s3_prefix submit_uri = re.sub(pattern, job_name, submit_uri) script_name = estimator.uploaded_code.script_name estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name)
Updated the S3 URI of the framework source directory in given estimator. Args: estimator (sagemaker.estimator.Framework): The Framework estimator to update. job_name (str): The new job name included in the submit S3 URI Returns: str: The updated S3 URI of framework source directory
juraj-google-style
def plot_carriers(self, temp=300): import matplotlib.pyplot as plt plt.semilogy(self._bz.mu_steps, abs((self._bz._carrier_conc[temp] / (self._bz.vol * 1e-24))), linewidth=3.0, color='r') self._plot_bg_limits() self._plot_doping(temp) plt.xlim((- 0.5), (self._bz.gap + 0.5)) plt.ylim(100000000000000.0, 1e+22) plt.ylabel('carrier concentration (cm-3)', fontsize=30.0) plt.xlabel('E-E$_f$ (eV)', fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) return plt
Plot the carrier concentration in function of Fermi level Args: temp: the temperature Returns: a matplotlib object
codesearchnet
def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]: return reference_model_inputs
Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq models which have the encoder and decoder exported as separate ONNX files. Args: reference_model_inputs ([`Mapping[str, Tensor]`): Reference inputs for the model. Returns: `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function
github-repos
def check_output_despite_error(args): try: output = subprocess.check_output(args, shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: output = e.output return output.strip()
Get output of args from command line, even if there are errors. Args: args: a list of command line args. Returns: output as string.
github-repos
def __new__(cls, *args, **kwargs): instance = super(_AutoFinalizedObjectBase, cls).__new__(cls) instance._finalize_called = False return instance
Creates a new object instance and adds the private finalizer attributes to it. Returns: new object instance Arguments: * *args, **kwargs -- ignored
juraj-google-style
def from_b58check(private_key): b58dec = base58.b58decode_check(private_key) version = b58dec[0] assert version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION] return PrivateKey(int.from_bytes(b58dec[1:], 'big'))
Decodes a Base58Check encoded private-key. Args: private_key (str): A Base58Check encoded private key. Returns: PrivateKey: A PrivateKey object
juraj-google-style
def _add_exac(self, variant_obj, info_dict): exac = None exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF'] for key in exac_keys: if info_dict.get(key): exac = float(info_dict[key]) if not exac: for transcript in variant_obj.transcripts: exac_raw = transcript.ExAC_MAF if exac_raw: exac = float(exac_raw.split(':')[-1]) if exac: variant_obj.add_frequency('ExAC', exac)
Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
juraj-google-style
def create_nic(access_token, subscription_id, resource_group, nic_name, public_ip_id, subnet_id, location, nsg_id=None): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkInterfaces/', nic_name, '?api-version=', NETWORK_API]) nic_body = {'location': location} ipconfig = {'name': 'ipconfig1'} ipc_properties = {'privateIPAllocationMethod': 'Dynamic'} ipc_properties['publicIPAddress'] = {'id': public_ip_id} ipc_properties['subnet'] = {'id': subnet_id} ipconfig['properties'] = ipc_properties properties = {'ipConfigurations': [ipconfig]} if (nsg_id is not None): properties['networkSecurityGroup'] = {'id': nsg_id} nic_body['properties'] = properties body = json.dumps(nic_body) return do_put(endpoint, body, access_token)
Create a network interface with an associated public ip address. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. nic_name (str): Name of the new NIC. public_ip_id (str): Public IP address resource id. subnetid (str): Subnet resource id. location (str): Azure data center location. E.g. westus. nsg_id (str): Optional Network Secruity Group resource id. Returns: HTTP response. NIC JSON body.
codesearchnet
def value_from_message(self, message): message = super(DateTimeField, self).value_from_message(message) if (message.time_zone_offset is None): return datetime.datetime.utcfromtimestamp((message.milliseconds / 1000.0)) milliseconds = (message.milliseconds - (60000 * message.time_zone_offset)) timezone = util.TimeZoneOffset(message.time_zone_offset) return datetime.datetime.fromtimestamp((milliseconds / 1000.0), tz=timezone)
Convert DateTimeMessage to a datetime. Args: A DateTimeMessage instance. Returns: A datetime instance.
codesearchnet
def wrap_http_for_jwt_access(credentials, http): orig_request_method = http.request wrap_http_for_auth(credentials, http) authenticated_request_method = http.request def new_request(uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): if ('aud' in credentials._kwargs): if ((credentials.access_token is None) or credentials.access_token_expired): credentials.refresh(None) return request(authenticated_request_method, uri, method, body, headers, redirections, connection_type) else: headers = _initialize_headers(headers) _apply_user_agent(headers, credentials.user_agent) uri_root = uri.split('?', 1)[0] (token, unused_expiry) = credentials._create_token({'aud': uri_root}) headers['Authorization'] = ('Bearer ' + token) return request(orig_request_method, uri, method, body, clean_headers(headers), redirections, connection_type) http.request = new_request http.request.credentials = credentials
Prepares an HTTP object's request method for JWT access. Wraps HTTP requests with logic to catch auth failures (typically identified via a 401 status code). In the event of failure, tries to refresh the token used and then retry the original request. Args: credentials: _JWTAccessCredentials, the credentials used to identify a service account that uses JWT access tokens. http: httplib2.Http, an http object to be used to make auth requests.
codesearchnet
def token_validate_with_login(self, **kwargs): path = self._get_path('token_validate_with_login') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Authenticate a user with a TMDb username and password. The user must have a verified email address and be registered on TMDb. Args: request_token: The token you generated for the user to approve. username: The user's username on TMDb. password: The user's password on TMDb. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def zip_file(self, app_path, app_name, tmp_path): zip_file = os.path.join(app_path, self.args.outdir, app_name) zip_file_zip = '{}.zip'.format(zip_file) zip_file_tcx = '{}.tcx'.format(zip_file) shutil.make_archive(zip_file, 'zip', tmp_path, app_name) shutil.move(zip_file_zip, zip_file_tcx) self._app_packages.append(zip_file_tcx) self.package_data['package'].append({'action': 'App Package:', 'output': zip_file_tcx})
Zip the App with tcex extension. Args: app_path (str): The path of the current project. app_name (str): The name of the App. tmp_path (str): The temp output path for the zip.
juraj-google-style
def gzip_uncompress(data, truncated=False): decompressor = SimpleGzipDecompressor() inflated_data = decompressor.decompress(data) if not truncated: inflated_data += decompressor.flush() return inflated_data
Uncompress gzip data. Args: data (bytes): The gzip data. truncated (bool): If True, the decompressor is not flushed. This is a convenience function. Returns: bytes: The inflated data. Raises: zlib.error
juraj-google-style
def _update_from_file(self, filename): if os.path.exists(filename): try: with open(filename, 'r') as config_file: yaml_dict = yaml.safe_load(config_file.read()) if yaml_dict is not None: self._update_dict(self._config, yaml_dict) except IsADirectoryError: raise ConfigLoadError( 'The specified configuration file is a directory not a file') else: raise ConfigLoadError('The config file {} does not exist'.format(filename))
Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file.
juraj-google-style
def update_institute(self, internal_id, sanger_recipient=None, coverage_cutoff=None, frequency_cutoff=None, display_name=None, remove_sanger=None, phenotype_groups=None, group_abbreviations=None, add_groups=None): add_groups = (add_groups or False) institute_obj = self.institute(internal_id) if (not institute_obj): raise IntegrityError('Institute {} does not exist in database'.format(internal_id)) updates = {} updated_institute = institute_obj if sanger_recipient: user_obj = self.user(sanger_recipient) if (not user_obj): raise IntegrityError('user {} does not exist in database'.format(sanger_recipient)) LOG.info('Updating sanger recipients for institute: {0} with {1}'.format(internal_id, sanger_recipient)) updates['$push'] = {'sanger_recipients': remove_sanger} if remove_sanger: LOG.info('Removing sanger recipient {0} from institute: {1}'.format(remove_sanger, internal_id)) updates['$pull'] = {'sanger_recipients': remove_sanger} if coverage_cutoff: LOG.info('Updating coverage cutoff for institute: {0} to {1}'.format(internal_id, coverage_cutoff)) updates['$set'] = {'coverage_cutoff': coverage_cutoff} if frequency_cutoff: LOG.info('Updating frequency cutoff for institute: {0} to {1}'.format(internal_id, frequency_cutoff)) if (not ('$set' in updates)): updates['$set'] = {} updates['$set'] = {'frequency_cutoff': frequency_cutoff} if display_name: LOG.info('Updating display name for institute: {0} to {1}'.format(internal_id, display_name)) if (not ('$set' in updates)): updates['$set'] = {} updates['$set'] = {'display_name': display_name} if phenotype_groups: if group_abbreviations: group_abbreviations = list(group_abbreviations) existing_groups = {} if add_groups: existing_groups = institute_obj.get('phenotype_groups', PHENOTYPE_GROUPS) for (i, hpo_term) in enumerate(phenotype_groups): hpo_obj = self.hpo_term(hpo_term) if (not hpo_obj): raise IntegrityError('Term {} does not exist'.format(hpo_term)) hpo_id = hpo_obj['hpo_id'] description = hpo_obj['description'] abbreviation = None if group_abbreviations: abbreviation = group_abbreviations[i] existing_groups[hpo_term] = {'name': description, 'abbr': abbreviation} updates['$set'] = {'phenotype_groups': existing_groups} if updates: if (not ('$set' in updates)): updates['$set'] = {} updates['$set']['updated_at'] = datetime.now() updated_institute = self.institute_collection.find_one_and_update({'_id': internal_id}, updates, return_document=pymongo.ReturnDocument.AFTER) LOG.info('Institute updated') return updated_institute
Update the information for an institute Args: internal_id(str): The internal institute id sanger_recipient(str): Email adress to add for sanger order coverage_cutoff(int): Update coverage cutoff frequency_cutoff(float): New frequency cutoff display_name(str): New display name remove_sanger(str): Email adress for sanger user to be removed phenotype_groups(iterable(str)): New phenotype groups group_abbreviations(iterable(str)) add_groups: If groups should be added. If False replace groups Returns: updated_institute(dict)
codesearchnet
def show(self, app_path, browser=None, new='tab'): if (not app_path.startswith('/')): raise ValueError('app_path must start with a /') address_string = 'localhost' if ((self.address is not None) and (self.address != '')): address_string = self.address url = ('http: from bokeh.util.browser import view view(url, browser=browser, new=new)
Opens an app in a browser window or tab. This method is useful for testing or running Bokeh server applications on a local machine but should not call when running Bokeh server for an actual deployment. Args: app_path (str) : the app path to open The part of the URL after the hostname:port, with leading slash. browser (str, optional) : browser to show with (default: None) For systems that support it, the **browser** argument allows specifying which browser to display in, e.g. "safari", "firefox", "opera", "windows-default" (see the ``webbrowser`` module documentation in the standard lib for more details). new (str, optional) : window or tab (default: "tab") If ``new`` is 'tab', then opens a new tab. If ``new`` is 'window', then opens a new window. Returns: None
codesearchnet
def log_request( self, request: str, trim_log_values: bool = False, **kwargs: Any ) -> None: return log_(request, request_log, "info", trim=trim_log_values, **kwargs)
Log a request. Args: request: The JSON-RPC request string. trim_log_values: Log an abbreviated version of the request.
juraj-google-style
def left_shift(x, y): if any_symbolic_tensors((x, y)): return LeftShift().symbolic_call(x, y) return backend.numpy.left_shift(x, y)
Shift the bits of an integer to the left. Bits are shifted to the left by appending `y` 0s at the right of `x`. Since the internal representation of numbers is in binary format, this operation is equivalent to multiplying `x` by `2**y`. Args: x: Input integer tensor. y: Input integer tensor. Returns: Result tensor.
github-repos
def run_from_ufos(self, ufos, output=(), **kwargs): if (set(output) == {'ufo'}): return ufo_paths = [] if isinstance(ufos, basestring): ufo_paths = glob.glob(ufos) ufos = [Font(x) for x in ufo_paths] elif isinstance(ufos, list): ufos = [(Font(x) if isinstance(x, basestring) else x) for x in ufos] ufo_paths = [x.path for x in ufos] else: raise FontmakeError('UFOs parameter is neither a defcon.Font object, a path or a glob, nor a list of any of these.', ufos) need_reload = False if ('otf' in output): self.build_otfs(ufos, **kwargs) need_reload = True if ('ttf' in output): if need_reload: ufos = [Font(path) for path in ufo_paths] self.build_ttfs(ufos, **kwargs) need_reload = True
Run toolchain from UFO sources. Args: ufos: List of UFO sources, as either paths or opened objects. output: List of output formats to generate. kwargs: Arguments passed along to save_otfs.
codesearchnet
def StopTaskStorage(self, abort=False): if (self._storage_type != definitions.STORAGE_TYPE_SESSION): raise IOError('Unsupported storage type.') if os.path.isdir(self._merge_task_storage_path): if abort: shutil.rmtree(self._merge_task_storage_path) else: os.rmdir(self._merge_task_storage_path) if os.path.isdir(self._processed_task_storage_path): if abort: shutil.rmtree(self._processed_task_storage_path) else: os.rmdir(self._processed_task_storage_path) if os.path.isdir(self._task_storage_path): if abort: shutil.rmtree(self._task_storage_path) else: os.rmdir(self._task_storage_path) self._merge_task_storage_path = None self._processed_task_storage_path = None self._task_storage_path = None
Removes the temporary path for the task storage. The results of tasks will be lost on abort. Args: abort (bool): True to indicate the stop is issued on abort. Raises: IOError: if the storage type is not supported. OSError: if the storage type is not supported.
codesearchnet
def extract_cookies(self, response, request, referrer_host=None): new_response = HTTPResponseInfoWrapper(response) new_request = convert_http_request(request, referrer_host) self._cookie_jar.extract_cookies(new_response, new_request)
Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL.
juraj-google-style
def with_division(self, division): if division is None: division = '' division = slugify(division) self._validate_division(division) self.division = division return self
Add a division segment Args: division (str): Official name of an electoral division. Returns: IdBuilder Raises: ValueError
juraj-google-style
def select_savename(self, original_filename): if self.edit_filetypes is None: self.edit_filetypes = get_edit_filetypes() if self.edit_filters is None: self.edit_filters = get_edit_filters() if is_kde_desktop() and not is_anaconda(): filters = '' selectedfilter = '' else: filters = self.edit_filters selectedfilter = get_filter(self.edit_filetypes, osp.splitext(original_filename)[1]) self.redirect_stdio.emit(False) filename, _selfilter = getsavefilename(self, _("Save file"), original_filename, filters=filters, selectedfilter=selectedfilter, options=QFileDialog.HideNameFilterDetails) self.redirect_stdio.emit(True) if filename: return osp.normpath(filename) return None
Select a name to save a file. Args: original_filename: Used in the dialog to display the current file path and name. Returns: Normalized path for the selected file name or None if no name was selected.
juraj-google-style
def get_maskformer_resize_output_image_size(image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int]=None, size_divisor: int=0, default_to_square: bool=True, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: output_size = get_resize_output_image_size(input_image=image, size=size, default_to_square=default_to_square, max_size=max_size, input_data_format=input_data_format) if size_divisor > 0: height, width = output_size height = int(math.ceil(height / size_divisor) * size_divisor) width = int(math.ceil(width / size_divisor) * size_divisor) output_size = (height, width) return output_size
Computes the output size given the desired size. Args: image (`np.ndarray`): The input image. size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`): The size of the output image. max_size (`int`, *optional*): The maximum size of the output image. size_divisor (`int`, *optional*, defaults to 0): If `size_divisor` is given, the output image size will be divisible by the number. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `Tuple[int, int]`: The output size.
github-repos
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs): plt = pretty_plot(**kwargs) pp = np.polyfit(x, y, deg) xp = np.linspace(min(x), max(x), 200) plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o') if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) return plt
Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object.
codesearchnet
def __new__(cls, obj=None, prop=None, func=None): new = super(SinonBase, cls).__new__(cls) if func: new.__init__(obj, prop, func) else: new.__init__(obj, prop) cls._queue.append(new) return weakref.proxy(new)
Constructor of SinonBase It will new true base but return a proxy of weakref and store it in _queue Args: obj: None / function / instance method / module / class Inspected target prop: None / string Inspected target when obj contains callable things func: function / instance method ONLY used by stub, it will replace original target Return: weakref
juraj-google-style
def GetOutputDir(self, base_dir, config_filename): return os.path.join(base_dir, os.path.basename(config_filename.replace(".yaml", "")))
Add the repack config filename onto the base output directory. This allows us to repack lots of different configs to the same installer name and still be able to distinguish them. Args: base_dir: output directory string config_filename: the secondary config filename string Returns: String to be used as output directory for this repack.
juraj-google-style
def __init__(self, resolver_context): super(TSKPartitionFileSystem, self).__init__(resolver_context) self._file_object = None self._tsk_volume = None
Initializes a file system object. Args: resolver_context (Context): a resolver context.
juraj-google-style
def _attend_over_memory(self, memory): attention_mlp = basic.BatchApply(mlp.MLP(([self._mem_size] * self._attention_mlp_layers))) for _ in range(self._num_blocks): attended_memory = self._multihead_attention(memory) memory = basic.BatchApply(layer_norm.LayerNorm())((memory + attended_memory)) memory = basic.BatchApply(layer_norm.LayerNorm())((attention_mlp(memory) + memory)) return memory
Perform multiheaded attention over `memory`. Args: memory: Current relational memory. Returns: The attended-over memory.
codesearchnet
def read_local_config(cfg): try: if os.path.exists(cfg): config = import_file_object(cfg) return config else: logger.warning(('%s: local config file (%s) not found, cannot be read' % (inspect.stack()[0][3], str(cfg)))) except IOError as e: logger.warning(('import_file_object: %s error opening %s' % (str(e), str(cfg)))) return {}
Parses local config file for override values Args: :local_file (str): filename of local config file Returns: dict object of values contained in local config file
codesearchnet
def add_dos(self, label, dos): energies = dos.energies - dos.efermi if self.zero_at_efermi \ else dos.energies densities = dos.get_smeared_densities(self.sigma) if self.sigma \ else dos.densities efermi = dos.efermi self._doses[label] = {'energies': energies, 'densities': densities, 'efermi': efermi}
Adds a dos for plotting. Args: label: label for the DOS. Must be unique. dos: Dos object
juraj-google-style
def setitem(self, axis, key, value): def setitem(df, internal_indices=[]): def _setitem(): if len(internal_indices) == 1: if axis == 0: df[df.columns[internal_indices[0]]] = value else: df.iloc[internal_indices[0]] = value else: if axis == 0: df[df.columns[internal_indices]] = value else: df.iloc[internal_indices] = value try: _setitem() except ValueError: df = df.copy() _setitem() return df if axis == 0: numeric_indices = list(self.columns.get_indexer_for([key])) else: numeric_indices = list(self.index.get_indexer_for([key])) prepared_func = self._prepare_method(setitem) if is_list_like(value): new_data = self.data.apply_func_to_select_indices_along_full_axis( axis, prepared_func, numeric_indices, keep_remaining=True ) else: new_data = self.data.apply_func_to_select_indices( axis, prepared_func, numeric_indices, keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns)
Set the column defined by `key` to the `value` provided. Args: key: The column name to set. value: The value to set the column to. Returns: A new QueryCompiler
juraj-google-style
def json_dict(json_data): if isinstance(json_data, dict): return json_data elif isinstance(json_data, basestring): return json.loads(json_data, object_hook=OrderedDict) else: raise TypeError("'json_data' must be a dictionary or valid JSON string; received: {!r}".format(json_data))
Given a dictionary or JSON string; return a dictionary. Args: json_data(dict, str): Input JSON object. Returns: A Python dictionary with the contents of the JSON object. Raises: TypeError: If the input object is not a dictionary or string.
codesearchnet
def get_version_from_cache_dir(src_file): if src_file is None: return None tmp_dir = local.path(str(CFG["tmp_dir"])) if tmp_dir.exists(): cache_file = tmp_dir / src_file dir_hash = get_hash_of_dirs(cache_file) if dir_hash is None: return None if len(str(dir_hash)) <= 7: return str(dir_hash) return str(dir_hash)[:7] return None
Creates a version for a project out of the hash. The hash is taken from the directory of the source file. Args: src_file: The source file of the project using this function. Returns: Either returns the first 8 digits of the hash as string, the entire hash as a string if the hash consists out of less than 7 digits or None if the path is incorrect.
juraj-google-style
def filter_by_pattern(self, pattern): _filt_values, _filt_datetimes = self._filter_by_pattern(pattern) if self._enumeration is None: self._get_mutable_enumeration() col_obj = self._enumeration['mutable'][self._collection_type] collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes) collection._validated_a_period = self._validated_a_period return collection
Filter the Data Collection based on a list of booleans. Args: pattern: A list of True/False values. Typically, this is a list with a length matching the length of the Data Collections values but it can also be a pattern to be repeated over the Data Collection. Return: A new Data Collection with filtered data
juraj-google-style
def __init__(self, filename=None): self.idx = {'http': 0, 'https': 0} self.test_url = { 'http': 'http: 'https': 'https: } self.proxies = {'http': {}, 'https': {}} self.addr_list = {'http': [], 'https': []} self.dec_ratio = 0.9 self.inc_ratio = 1 / self.dec_ratio self.weight_thr = 0.2 self.logger = logging.getLogger(__name__) if filename is not None: self.load(filename)
Init the pool from a json file. Args: filename (str, optional): if the filename is provided, proxies will be load from it.
juraj-google-style
def on_the_air(self, **kwargs): path = self._get_path('on_the_air') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of TV shows that are currently on the air. This query looks for any TV show that has an episode with an air date in the next 7 days. Args: page: (optional) Minimum 1, maximum 1000. language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def Transfer(self, wallet, from_addr, to_addr, amount, tx_attributes=None): if (not tx_attributes): tx_attributes = [] sb = ScriptBuilder() sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'transfer', [PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)]) (tx, fee, results, num_ops, engine_success) = test_invoke(sb.ToArray(), wallet, [], from_addr=from_addr, invoke_attrs=tx_attributes) return (tx, fee, results)
Transfer a specified amount of the NEP5Token to another address. Args: wallet (neo.Wallets.Wallet): a wallet instance. from_addr (str): public address of the account to transfer the given amount from. to_addr (str): public address of the account to transfer the given amount to. amount (int): quantity to send. tx_attributes (list): a list of TransactionAtribute objects. Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluationstack results.
codesearchnet
def axis_shape_dims_for_broadcast_in_dim(axis, input_shape, insert_dims): if axis is None: raise ValueError('Received `None` value for `axis`') if isinstance(axis, int): axis = (axis,) if len(set(axis)) != len(axis): raise ValueError(f'Repeated axis in `axis`: {axis}') result_dims = len(input_shape) if insert_dims: result_dims += len(axis) canonical_axis = [] for a in axis: if not -result_dims <= a < result_dims: raise ValueError(f'In `axis`, axis {a} is out of bounds for array of dimension {result_dims}') if a < 0: a = a + result_dims canonical_axis.append(a) if len(set(canonical_axis)) != len(canonical_axis): raise ValueError(f'Repeated axis in `axis`: {canonical_axis}') canonical_axis = sorted(canonical_axis) output_shape = list(input_shape) for i in canonical_axis: if insert_dims: output_shape.insert(i, 1) else: output_shape[i] = 1 broadcast_dims = [i for i in range(result_dims) if i not in canonical_axis] return (canonical_axis, output_shape, broadcast_dims)
Turn the `axis` argument to the arguments needed by `broadcast_in_dim`. Args: axis: single int or a tuple of ints for the axis argument. The list of dimensions to reduce or insert. input_shape: the shape of the input as a tuple ints. insert_dims: `False` turns dimensions in `axis` to 1s (use case: reduction along `axis` with `keep_dims=True`). `True`, inserts 1s according to `axis` (use case: `expand_dims`). Returns: A tuple of three lists - The canonical value for `axis`: always a list, negative values have been resolved and values are sorted in ascending order. - The output shape: `input_shape` with 1s at the indices in `axis`, for use as the `shape` argument of `broadcast_in_dim`. - The broadcast dimensions: list of dimensions not in `axis`, for use as the `broadcast_dimensions` argument of `broadcast_in_dim`.
github-repos
def remove(self, key): if self.prepickle: key = pickle.dumps(key) if (key not in self.keys): raise ValueError('The given key does not exist') for (H, hashtable) in zip(self.keys[key], self.hashtables): hashtable.remove_val(H, key) if (not hashtable.get(H)): hashtable.remove(H) self.keys.remove(key)
Remove the key from the index. Args: key (hashable): The unique identifier of a set.
codesearchnet
def set_y_grid_info(self, y_low, y_high, num_y, yscale, yval_name): self._set_grid_info('y', y_low, y_high, num_y, yscale, yval_name) return
Set the grid values for y. Create information for the grid of y values. Args: num_y (int): Number of points on axis. y_low/y_high (float): Lowest/highest value for the axis. yscale (str): Scale of the axis. Choices are 'log' or 'lin'. yval_name (str): Name representing the axis. See GenerateContainer documentation for options for the name.
juraj-google-style
def setKeySequenceCounter(self, iKeySequenceValue): print '%s call setKeySequenceCounter' % self.port print iKeySequenceValue try: cmd = WPANCTL_CMD + 'setprop Network:KeyIndex %s' % str(iKeySequenceValue) if self.__sendCommand(cmd)[0] != 'Fail': time.sleep(1) return True else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger('setKeySequenceCounter() Error: ' + str(e))
set the Key sequence counter corresponding to Thread Network master key Args: iKeySequenceValue: key sequence value Returns: True: successful to set the key sequence False: fail to set the key sequence
juraj-google-style
def convert_persistent_value(self, shift, instruction): command_dict = { 'name': 'pv', 't0': shift+instruction.start_time, 'ch': instruction.channels[0].name, 'val': instruction.command.value } return self._qobj_model(**command_dict)
Return converted `PersistentValueInstruction`. Args: shift(int): Offset time. instruction (PersistentValueInstruction): persistent value instruction. Returns: dict: Dictionary of required parameters.
juraj-google-style
def forward(self, input): output = torch.einsum('eoi,bei->beo', self.weight, input) if self.bias is not None: raise RuntimeError() return output
Args: input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`): The input to the layer.
github-repos
def Svn(url, fname, to=None): if to is None: to = str(CFG["tmp_dir"]) src_dir = local.path(to) / fname if not source_required(src_dir): Copy(src_dir, ".") return from benchbuild.utils.cmd import svn svn("co", url, src_dir) update_hash(src_dir) Copy(src_dir, ".")
Checkout the SVN repo. Args: url (str): The SVN SOURCE repo. fname (str): The name of the repo on disk. to (str): The name of the TARGET folder on disk. Defaults to ``CFG["tmpdir"]``
juraj-google-style
async def _get_popular_people_page(self, page=1): return (await self.get_data(self.url_builder('person/popular', url_params=OrderedDict(page=page))))
Get a specific page of popular person data. Arguments: page (:py:class:`int`, optional): The page to get. Returns: :py:class:`dict`: The page data.
codesearchnet
def getConfig(self, section=None): data = {} if (section is None): for s in self.config.sections(): if ('/' in s): (parent, _s) = s.split('/') data[parent][_s] = dict(self.config.items(s)) else: data[s] = dict(self.config.items(s)) else: data = dict(self.config.items(section)) return data
Returns a dictionary which contains the current config. If a section is setted, only will returns the section config Args: section (str): (Optional) Section name. Returns: dict: Representation of current config
codesearchnet
def __init__(self, bucket, prefix, sagemaker_session): root_dir = sagemaker.utils.get_config_value('local.container_root', sagemaker_session.config) if root_dir: root_dir = os.path.abspath(root_dir) working_dir = tempfile.mkdtemp(dir=root_dir) if root_dir is None and platform.system() == 'Darwin': working_dir = '/private{}'.format(working_dir) sagemaker.utils.download_folder(bucket, prefix, working_dir, sagemaker_session) self.files = LocalFileDataSource(working_dir)
Create an S3DataSource instance Args: bucket (str): S3 bucket name prefix (str): S3 prefix path to the data sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker_session with the desired settings to talk to S3
juraj-google-style
def _unshard_from_sc_to_cpu(stacked_table: tensor.Tensor, from_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]) -> Sequence[tensor.Tensor]: logging.vlog(1, 'To unshuffle_from_sc_to_cpu on stacked_table.shape: %s', stacked_table[0].shape) ret_tensors = [] for layout in from_shard_layouts: padded_table = tpu_embedding_v3_utils.unshuffle_from_sc_to_cpu(stacked_table[0], num_sparse_cores=layout.num_sparse_cores, offset_in_shard=layout.sparse_core_shard_row_offset, size_in_shard=layout.unsharded_padded_shape[0] orig_table = tpu_embedding_v3_utils.remove_padding_from_sc(padded_table, layout.unsharded_shape) logging.vlog(1, 'orig_tensors.shape[%s]: %s', layout.table_name, orig_table.shape) ret_tensors.append(orig_table) return ret_tensors
Undo the shard the feature tables into SparseCore stacked table. Args: stacked_table: The value of a SparseCore stacked and sharded table. from_shard_layouts: The target layouts for the target hardware. Returns: The unsharded feature tables.
github-repos
def get_policies_from_git(self): fldr = mkdtemp() try: url = 'https: policies = {'GLOBAL': {}} if self.dbconfig.get('git_no_ssl_verify', self.ns, False): os.environ['GIT_SSL_NO_VERIFY'] = '1' repo = Repo.clone_from(url, fldr) for obj in repo.head.commit.tree: (name, ext) = os.path.splitext(obj.name) if (ext == '.json'): policies['GLOBAL'][name] = obj.data_stream.read() if ((name == 'roles') and (obj.type == 'tree')): for account in [x for x in obj.trees]: for role in [x for x in account.trees]: role_policies = {policy.name.replace('.json', ''): policy.data_stream.read() for policy in role.blobs if policy.name.endswith('.json')} if (account.name in policies): if (role.name in policies[account.name]): policies[account.name][role.name] += role_policies else: policies[account.name][role.name] = role_policies else: policies[account.name] = {role.name: role_policies} return policies finally: if (os.path.exists(fldr) and os.path.isdir(fldr)): shutil.rmtree(fldr)
Retrieve policies from the Git repo. Returns a dictionary containing all the roles and policies Returns: :obj:`dict` of `str`: `dict`
codesearchnet
def compute_digest_response(self, realm, user_name, method, uri, nonce, cnonce, qop, nc, environ): def md5h(data): return md5(compat.to_bytes(data)).hexdigest() def md5kd(secret, data): return md5h(((secret + ':') + data)) A1 = self.domain_controller.digest_auth_user(realm, user_name, environ) if (not A1): return False A2 = ((method + ':') + uri) if qop: res = md5kd(A1, ((((((((nonce + ':') + nc) + ':') + cnonce) + ':') + qop) + ':') + md5h(A2))) else: res = md5kd(A1, ((nonce + ':') + md5h(A2))) return res
Computes digest hash. Calculation of the A1 (HA1) part is delegated to the dc interface method `digest_auth_user()`. Args: realm (str): user_name (str): method (str): WebDAV Request Method uri (str): nonce (str): server generated nonce value cnonce (str): client generated cnonce value qop (str): quality of protection nc (str) (number), nonce counter incremented by client Returns: MD5 hash string or False if user rejected by domain controller
codesearchnet
def _UpdateUserGroups(self, user, groups): groups = ','.join(groups) self.logger.debug('Updating user %s with groups %s.', user, groups) command = self.usermod_cmd.format(user=user, groups=groups) try: subprocess.check_call(command.split(' ')) except subprocess.CalledProcessError as e: self.logger.warning('Could not update user %s. %s.', user, str(e)) return False else: self.logger.debug('Updated user account %s.', user) return True
Update group membership for a Linux user. Args: user: string, the name of the Linux user account. groups: list, the group names to add the user as a member. Returns: bool, True if user update succeeded.
juraj-google-style
def listen_error_messages_raylet(worker, task_error_queue, threads_stopped): worker.error_message_pubsub_client = worker.redis_client.pubsub(ignore_subscribe_messages=True) error_pubsub_channel = str(ray.gcs_utils.TablePubsub.ERROR_INFO).encode('ascii') worker.error_message_pubsub_client.subscribe(error_pubsub_channel) try: error_messages = global_state.error_messages(worker.task_driver_id) for error_message in error_messages: logger.error(error_message) while True: if threads_stopped.is_set(): return msg = worker.error_message_pubsub_client.get_message() if (msg is None): threads_stopped.wait(timeout=0.01) continue gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(msg['data'], 0) assert (gcs_entry.EntriesLength() == 1) error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData(gcs_entry.Entries(0), 0) driver_id = error_data.DriverId() if (driver_id not in [worker.task_driver_id.binary(), DriverID.nil().binary()]): continue error_message = ray.utils.decode(error_data.ErrorMessage()) if (ray.utils.decode(error_data.Type()) == ray_constants.TASK_PUSH_ERROR): task_error_queue.put((error_message, time.time())) else: logger.error(error_message) finally: worker.error_message_pubsub_client.close()
Listen to error messages in the background on the driver. This runs in a separate thread on the driver and pushes (error, time) tuples to the output queue. Args: worker: The worker class that this thread belongs to. task_error_queue (queue.Queue): A queue used to communicate with the thread that prints the errors found by this thread. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit.
codesearchnet
def metadata(self, path): raise NotImplementedError
Fetch metadata of a file on the :class:`~apache_beam.io.filesystem.FileSystem`. This operation returns metadata as stored in the underlying FileSystem. It should not need to read file data to obtain this value. For web based file systems, this method should also incur as few as possible requests. Args: path: string path of a file. Returns: :class:`~apache_beam.io.filesystem.FileMetadata`. Raises: ``BeamIOError``: if path isn't a file or doesn't exist.
github-repos
def newick(self): node_to_str = dict() for node in self.traverse_postorder(): if node.is_leaf(): if (node.label is None): node_to_str[node] = '' else: node_to_str[node] = str(node.label) else: out = ['('] for c in node.children: out.append(node_to_str[c]) if (c.edge_length is not None): if isinstance(c.edge_length, int): l_str = str(c.edge_length) elif (isinstance(c.edge_length, float) and c.edge_length.is_integer()): l_str = str(int(c.edge_length)) else: l_str = str(c.edge_length) out.append((':%s' % l_str)) out.append(',') del node_to_str[c] out.pop() out.append(')') if (node.label is not None): out.append(str(node.label)) node_to_str[node] = ''.join(out) return node_to_str[self]
Newick string conversion starting at this ``Node`` object Returns: ``str``: Newick string conversion starting at this ``Node`` object
codesearchnet
def _prefix_from_ip_int(self, ip_int): prefixlen = self._max_prefixlen while prefixlen: if (ip_int & 1): break ip_int >>= 1 prefixlen -= 1 if (ip_int == ((1 << prefixlen) - 1)): return prefixlen else: raise NetmaskValueError('Bit pattern does not match /1*0*/')
Return prefix length from a bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format. Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask.
codesearchnet
def download(url, output_file=None, open_file=True, allow_overwrite=False): filename = url.split('/')[(- 1)] if (output_file is None): cache = os.path.join(get_data_home(), filename) else: cache = output_file if (os.path.exists(cache) and (not allow_overwrite)): logger.info('> {} already exists.'.format(cache)) logger.info('> If you have any issue when using this file, ') logger.info('> manually remove the file and try download again.') else: r = request.urlopen(url) try: if six.PY2: content_length = int(r.info().dict['content-length']) elif six.PY3: content_length = int(r.info()['Content-Length']) except: content_length = 0 unit = 1000000 content = b'' with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t: while True: data = r.read(unit) l = len(data) t.update(l) if (l == 0): break content += data with open(cache, 'wb') as f: f.write(content) if (not open_file): return return open(cache, 'rb')
Download a file from URL. Args: url (str): URL. output_file (str, optional): If given, the downloaded file is written to the given path. open_file (bool): If True, it returns an opened file stream of the downloaded file. allow_overwrite (bool): If True, it overwrites an existing file. Returns: Returns file object if open_file is True, otherwise None.
codesearchnet
async def inspect(self, task_id: str) -> Mapping[str, Any]: response = await self.docker._query_json( "tasks/{task_id}".format(task_id=task_id), method="GET" ) return response
Return info about a task Args: task_id: is ID of the task
juraj-google-style
def _dump_data(ground_truth_detections, images_folder_path, output_folder_path): if not os.path.exists(output_folder_path): os.makedirs(output_folder_path) output_images_folder = os.path.join(output_folder_path, 'images') if not os.path.exists(output_images_folder): os.makedirs(output_images_folder) output_proto_file = os.path.join(output_folder_path, 'ground_truth.pb') ground_truth_data = evaluation_stages_pb2.ObjectDetectionGroundTruth() for image_dict in ground_truth_detections.values(): detection_result = ground_truth_data.detection_results.add() detection_result.image_id = image_dict['id'] detection_result.image_name = image_dict['file_name'] for detection_dict in image_dict['detections']: object_instance = detection_result.objects.add() object_instance.bounding_box.normalized_top = detection_dict['bbox'][0] object_instance.bounding_box.normalized_left = detection_dict['bbox'][1] object_instance.bounding_box.normalized_bottom = detection_dict['bbox'][2] object_instance.bounding_box.normalized_right = detection_dict['bbox'][3] object_instance.class_id = detection_dict['category_id'] shutil.copy2(os.path.join(images_folder_path, image_dict['file_name']), output_images_folder) with open(output_proto_file, 'wb') as proto_file: proto_file.write(ground_truth_data.SerializeToString())
Dumps images & data from ground-truth objects into output_folder_path. The following are created in output_folder_path: images/: sub-folder for allowlisted validation images. ground_truth.pb: A binary proto file containing all ground-truth object-sets. Args: ground_truth_detections: A dict mapping image id to ground truth data. Output of _get_ground_truth_detections. images_folder_path: Validation images folder output_folder_path: folder to output files to.
github-repos
def finalize_options(self): self.cwd = os.path.abspath(os.path.dirname(__file__)) self.test_dir = os.path.join(self.cwd, 'tests')
Finalizes the command's options. Args: self (CoverageCommand): the ``CoverageCommand`` instance Returns: ``None``
codesearchnet
class MedianTracker(BaseTracker): def __init__(self, quantile_tracker: Optional[QuantileTracker]=None): self._quantile_tracker = quantile_tracker or BufferedSlidingQuantileTracker(DEFAULT_WINDOW_SIZE, 0.5) assert self._quantile_tracker._q == 0.5, 'quantile_tracker must be initialized with q = 0.5' def push(self, x): self._quantile_tracker.push(x) def get(self): return self._quantile_tracker.get()
Tracks the median of a stream of values using a quantile tracker. This wrapper class encapsulates a `QuantileTracker` configured specifically for the 0.5 quantile (median). Args: quantile_tracker: An optional `QuantileTracker` instance. If not provided, a `BufferedSlidingQuantileTracker` with a default window size 1000 and q=0.5 is created. Raises: AssertionError: If the provided quantile_tracker is not initialized with q=0.5.
github-repos
def get_indent(code: str) -> str: lines = code.split('\n') idx = 0 while idx < len(lines) and len(lines[idx]) == 0: idx += 1 if idx < len(lines): return re.search('^(\\s*)\\S', lines[idx]).groups()[0] return ''
Find the indent in the first non empty line in a code sample. Args: code (`str`): The code to inspect. Returns: `str`: The indent looked at (as string).
github-repos
def get_weights(model_hparams, vocab_size, hidden_dim=None): if (hidden_dim is None): hidden_dim = model_hparams.hidden_size num_shards = model_hparams.symbol_modality_num_shards shards = [] for i in range(num_shards): shard_size = ((vocab_size var_name = ('weights_%d' % i) shards.append(tf.get_variable(var_name, [shard_size, hidden_dim], initializer=tf.random_normal_initializer(0.0, (hidden_dim ** (- 0.5))))) if (num_shards == 1): ret = shards[0] else: ret = tf.concat(shards, 0) if (not tf.executing_eagerly()): ret = common_layers.convert_gradient_to_tensor(ret) return ret
Create or get concatenated embedding or softmax variable. Args: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size Returns: a list of num_shards Tensors.
codesearchnet
def _GetMountpoints(only_physical=True): partitions = psutil.disk_partitions(all=(not only_physical)) return set((partition.mountpoint for partition in partitions))
Fetches a list of mountpoints. Args: only_physical: Determines whether only mountpoints for physical devices (e.g. hard disks) should be listed. If false, mountpoints for things such as memory partitions or `/dev/shm` will be returned as well. Returns: A set of mountpoints.
codesearchnet
def download_archive(self, id_or_uri, file_path): uri = ((self.URI + '/archive/') + extract_id_from_uri(id_or_uri)) return self._client.download(uri, file_path)
Download the details of the Golden Image capture logs, which has been archived based on the specific attribute ID. Args: id_or_uri: ID or URI of the Golden Image. file_path (str): File name to save the archive. Returns: bool: Success.
codesearchnet
def zopen(filename, *args, **kwargs): if Path is not None and isinstance(filename, Path): filename = str(filename) name, ext = os.path.splitext(filename) ext = ext.upper() if ext == ".BZ2": if PY_VERSION[0] >= 3: return bz2.open(filename, *args, **kwargs) else: args = list(args) if len(args) > 0: args[0] = "".join([c for c in args[0] if c != "t"]) if "mode" in kwargs: kwargs["mode"] = "".join([c for c in kwargs["mode"] if c != "t"]) return bz2.BZ2File(filename, *args, **kwargs) elif ext in (".GZ", ".Z"): return gzip.open(filename, *args, **kwargs) else: return io.open(filename, *args, **kwargs)
This function wraps around the bz2, gzip and standard python's open function to deal intelligently with bzipped, gzipped or standard text files. Args: filename (str/Path): filename or pathlib.Path. \*args: Standard args for python open(..). E.g., 'r' for read, 'w' for write. \*\*kwargs: Standard kwargs for python open(..). Returns: File-like object. Supports with context.
juraj-google-style
def get_requires(self, build_requires=False, private_build_requires=False): requires = (self.requires or []) if build_requires: requires = (requires + (self.build_requires or [])) if private_build_requires: requires = (requires + (self.private_build_requires or [])) return requires
Get the requirements of the variant. Args: build_requires (bool): If True, include build requirements. private_build_requires (bool): If True, include private build requirements. Returns: List of `Requirement` objects.
codesearchnet
def AddArguments(cls, argument_group): argument_group.add_argument( '--append', dest='append', action='store_true', default=False, required=cls._DEFAULT_APPEND, help=( 'Defines whether the intention is to append to an already ' 'existing database or overwrite it. Defaults to overwrite.')) argument_group.add_argument( '--evidence', dest='evidence', type=str, default=cls._DEFAULT_EVIDENCE, action='store', required=False, help='Set the evidence field to a specific value, defaults to empty.') argument_group.add_argument( '--fields', dest='fields', type=str, action='store', default=cls._DEFAULT_FIELDS, help=( 'Defines which fields should be indexed in the database.')) argument_group.add_argument( '--additional_fields', dest='additional_fields', type=str, action='store', default='', help=( 'Defines extra fields to be included in the output, in addition to' ' the default fields, which are {0:s}.'.format( cls._DEFAULT_FIELDS)))
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
juraj-google-style