code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_inputs_outputs(signature_def): inputs_tensor_info = signature_def.inputs outputs_tensor_info = signature_def.outputs def gather_names(tensor_info): return [tensor_info[key].name for key in tensor_info] inputs = gather_names(inputs_tensor_info) outputs = gather_names(outputs_tensor_info) return (inputs, outputs)
Get inputs and outputs from SignatureDef. Args: signature_def: SignatureDef in the meta_graph_def for conversion. Returns: The inputs and outputs in the graph for conversion.
github-repos
def murmur2(key): if isinstance(key, bytearray) or (six.PY3 and isinstance(key, bytes)): data = key else: data = bytearray(str(key).encode()) length = len(data) seed = 0x9747b28c m = 0x5bd1e995 r = 24 h = seed ^ length length4 = length for i in range(length4): i4 = i * 4 k = ((data[i4 + 0] & 0xff) + ((data[i4 + 1] & 0xff) << 8) + ((data[i4 + 2] & 0xff) << 16) + ((data[i4 + 3] & 0xff) << 24)) k &= 0xffffffff k *= m k &= 0xffffffff k ^= (k % 0x100000000) >> r k &= 0xffffffff k *= m k &= 0xffffffff h *= m h &= 0xffffffff h ^= k h &= 0xffffffff extra_bytes = length % 4 if extra_bytes >= 3: h ^= (data[(length & ~3) + 2] & 0xff) << 16 h &= 0xffffffff if extra_bytes >= 2: h ^= (data[(length & ~3) + 1] & 0xff) << 8 h &= 0xffffffff if extra_bytes >= 1: h ^= (data[length & ~3] & 0xff) h &= 0xffffffff h *= m h &= 0xffffffff h ^= (h % 0x100000000) >> 13 h &= 0xffffffff h *= m h &= 0xffffffff h ^= (h % 0x100000000) >> 15 h &= 0xffffffff return h
Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 Args: key: if not a bytes type, encoded using default encoding Returns: MurmurHash2 of key bytearray
juraj-google-style
def coords2px(y, x): rows = np.rint([y[0], y[0], y[2], y[2]]).astype(int) cols = np.rint([y[1], y[3], y[1], y[3]]).astype(int) r,c,*_ = x.shape Y = np.zeros((r, c)) Y[rows, cols] = 1 return Y
Transforming coordinates to pixels. Arguments: y : np array vector in which (y[0], y[1]) and (y[2], y[3]) are the the corners of a bounding box. x : image an image Returns: Y : image of shape x.shape
juraj-google-style
def __init__(self, spin_mode="polarized", smearing="fermi_dirac:0.1 eV", algorithm=None, nband=None, fband=None, charge=0.0, comment=None): super().__init__() self.comment = comment self.smearing = Smearing.as_smearing(smearing) self.spin_mode = SpinMode.as_spinmode(spin_mode) self.nband = nband self.fband = fband self.charge = charge self.algorithm = algorithm
Constructor for Electrons object. Args: comment: String comment for Electrons charge: Total charge of the system. Default is 0.
juraj-google-style
def append(self, future): future.prev = self.tail if (self.tail is None): assert (self.head is None) self.head = future else: self.tail.next = future self.tail = future future.add_done_callback(self.remove)
Append an object to the linked list. Args: future (PlasmaObjectFuture): A PlasmaObjectFuture instance.
codesearchnet
def __init__(self, graph, control_inputs) -> None: self._graph = graph if control_inputs is None: self._control_inputs_val = [] self._new_stack = True else: self._control_inputs_val = control_inputs self._new_stack = False self._seen_nodes = set() self._old_stack = None self._old_control_flow_context = None
Create a new `_ControlDependenciesController`. A `_ControlDependenciesController` is the context manager for `with tf.control_dependencies()` blocks. These normally nest, as described in the documentation for `control_dependencies()`. The `control_inputs` argument list control dependencies that must be added to the current set of control dependencies. Because of uniquification the set can be empty even if the caller passed a list of ops. The special value `None` indicates that we want to start a new empty set of control dependencies instead of extending the current set. In that case we also clear the current control flow context, which is an additional mechanism to add control dependencies. Args: graph: The graph that this controller is managing. control_inputs: List of ops to use as control inputs in addition to the current control dependencies. None to indicate that the dependencies should be cleared.
github-repos
def direct_normal_radiation(self, value=9999.0): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `direct_normal_radiation`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `direct_normal_radiation`') self._direct_normal_radiation = value
Corresponds to IDD Field `direct_normal_radiation` Args: value (float): value for IDD Field `direct_normal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def ParseOptions(cls, options, analysis_plugin): if not isinstance(analysis_plugin, sessionize.SessionizeAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of SessionizeAnalysisPlugin') maximum_pause = cls._ParseNumericOption( options, 'sessionize_maximumpause', default_value=10) if maximum_pause <= 0: raise errors.BadConfigOption( 'Maximum pause value {0:d} is not supported. ' 'Value must be greater than 0.'.format(maximum_pause)) analysis_plugin.SetMaximumPause(maximum_pause)
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (OutputModule): analysis_plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
juraj-google-style
def _WriteFile(output_path, name, content): path = os.path.join(output_path, name) with open(path, 'wb') as f: f.write(content) return path
Write given content to a file in a given directory. Args: output_path: The directory to store the file in. name: The name of the file to store the content in. content: The content to write to the file.close Returns: The full path to the written file.
juraj-google-style
def get_absolute_name(package, relative_name): path = package.split('.') if package else [] name = relative_name.lstrip('.') ndots = len(relative_name) - len(name) if ndots > len(path): return relative_name absolute_path = path[:len(path) + 1 - ndots] if name: absolute_path.append(name) return '.'.join(absolute_path)
Joins a package name and a relative name. Args: package: A dotted name, e.g. foo.bar.baz relative_name: A dotted name with possibly some leading dots, e.g. ..x.y Returns: The relative name appended to the parent's package, after going up one level for each leading dot. e.g. foo.bar.baz + ..hello.world -> foo.hello.world The unchanged relative_name if it does not start with a dot or has too many leading dots.
juraj-google-style
def how_vulnerable(chain, blackbox_mapping, sanitiser_nodes, potential_sanitiser, blackbox_assignments, interactive, vuln_deets): for (i, current_node) in enumerate(chain): if (current_node in sanitiser_nodes): vuln_deets['sanitiser'] = current_node vuln_deets['confident'] = True return (VulnerabilityType.SANITISED, interactive) if isinstance(current_node, BBorBInode): if (current_node.func_name in blackbox_mapping['propagates']): continue elif (current_node.func_name in blackbox_mapping['does_not_propagate']): return (VulnerabilityType.FALSE, interactive) elif interactive: user_says = input('Is the return value of {} with tainted argument "{}" vulnerable? ([Y]es/[N]o/[S]top asking)'.format(current_node.label, chain[(i - 1)].left_hand_side)).lower() if user_says.startswith('s'): interactive = False vuln_deets['unknown_assignment'] = current_node return (VulnerabilityType.UNKNOWN, interactive) if user_says.startswith('n'): blackbox_mapping['does_not_propagate'].append(current_node.func_name) return (VulnerabilityType.FALSE, interactive) blackbox_mapping['propagates'].append(current_node.func_name) else: vuln_deets['unknown_assignment'] = current_node return (VulnerabilityType.UNKNOWN, interactive) if potential_sanitiser: vuln_deets['sanitiser'] = potential_sanitiser vuln_deets['confident'] = False return (VulnerabilityType.SANITISED, interactive) return (VulnerabilityType.TRUE, interactive)
Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary. Note: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet. e.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test. Args: chain(list(Node)): A path of nodes between source and sink. blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint. sanitiser_nodes(set): A set of nodes that are sanitisers for the sink. potential_sanitiser(Node): An if or elif node that can potentially cause sanitisation. blackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's. interactive(bool): determines if we ask the user about blackbox functions not in the mapping file. vuln_deets(dict): vulnerability details. Returns: A VulnerabilityType depending on how vulnerable the chain is.
codesearchnet
def resume_training(self, train_data, model_path, valid_data=None): restore_state = self.checkpointer.restore(model_path) loss_fn = self._get_loss_fn() self.train() self._train_model( train_data=train_data, loss_fn=loss_fn, valid_data=valid_data, restore_state=restore_state, )
This model resume training of a classifier by reloading the appropriate state_dicts for each model Args: train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the train split model_path: the path to the saved checpoint for resuming training valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split
juraj-google-style
def capture_image(self, device_label): response = None try: response = requests.post(urls.imagecapture(self._giid, device_label), headers={'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response)
Capture smartcam image Args: device_label (str): device label of camera
codesearchnet
def spawn_reader_writer(get_data_fn, put_data_fn): def _reader_thread(): while True: out = get_data_fn() put_data_fn(out) if not out: break t = threading.Thread(target=_reader_thread) t.daemon = True t.start() return t
Spawn a thread that reads from a data source and writes to a sink. The thread will terminate if it receives a Falsey value from the source. Args: get_data_fn: Data-reading function. Called repeatedly until it returns False-y to indicate that the thread should terminate. put_data_fn: Data-writing function. Returns: threading.Thread
juraj-google-style
def wmo(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `wmo`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `wmo`') self._wmo = value
Corresponds to IDD Field `wmo` usually a 6 digit field. Used as alpha in EnergyPlus. Args: value (str): value for IDD Field `wmo` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def match_shortname(self, name, filled_args=None): filled_count = 0 if (filled_args is not None): filled_count = len(filled_args) possible = [x for x in self.arg_names[filled_count:] if x.startswith(name)] if (len(possible) == 0): raise ArgumentError('Could not convert short-name full parameter name, none could be found', short_name=name, parameters=self.arg_names) elif (len(possible) > 1): raise ArgumentError('Short-name is ambiguous, could match multiple keyword parameters', short_name=name, possible_matches=possible) return possible[0]
Try to convert a prefix into a parameter name. If the result could be ambiguous or there is no matching parameter, throw an ArgumentError Args: name (str): A prefix for a parameter name filled_args (list): A list of filled positional arguments that will be removed from consideration. Returns: str: The full matching parameter name
codesearchnet
def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False): outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext) program = 'pepstats' pepstats_args = '-sequence="{}" -outfile="{}"'.format(infile, outfile) cmd_string = '{} {}'.format(program, pepstats_args) ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True) return outfile
Run EMBOSS pepstats on a FASTA file. Args: infile: Path to FASTA file outfile: Name of output file without extension outdir: Path to output directory outext: Extension of results file, default is ".pepstats" force_rerun: Flag to rerun pepstats Returns: str: Path to output file.
codesearchnet
def sym_has(self, path: Union[utils.KeyPath, str, int]) -> bool: return utils.KeyPath.from_value(path).exists(self)
Returns True if a path exists in the sub-tree. Args: path: A KeyPath object or equivalence. Returns: True if the path exists in current sub-tree, otherwise False.
github-repos
def preprocess_dataset(ingested_dataset_path: str, preprocessed_dataset_path: str, base_artifact_path: str, gcp_project_id: str, region: str, dataflow_staging_root: str, beam_runner: str): timestamp = time.time() target_path = f'{base_artifact_path}/preprocessing/preprocessed_dataset_{timestamp}' Path(preprocessed_dataset_path).parent.mkdir(parents=True, exist_ok=True) with open(preprocessed_dataset_path, 'w') as f: f.write(target_path) pipeline_options = PipelineOptions(runner=beam_runner, project=gcp_project_id, job_name=f'preprocessing-{int(time.time())}', temp_location=dataflow_staging_root, region=region, requirements_file='/requirements.txt', save_main_session=True) with beam.Pipeline(options=pipeline_options) as pipeline: pipeline | 'Read input jsonlines file' >> beam.io.ReadFromText(ingested_dataset_path) | 'Load json' >> beam.Map(json.loads) | 'Filter licenses' >> beam.Filter(valid_license) | 'Download image from URL' >> beam.FlatMap(download_image_from_url) | 'Resize image' >> beam.Map(resize_image, size=IMAGE_SIZE) | 'Clean Text' >> beam.Map(clean_text) | 'Serialize Example' >> beam.Map(serialize_example) | 'Write to Avro files' >> beam.io.WriteToAvro(file_path_prefix=target_path, schema={'namespace': 'preprocessing.example', 'type': 'record', 'name': 'Sample', 'fields': [{'name': 'id', 'type': 'int'}, {'name': 'caption', 'type': 'string'}, {'name': 'image', 'type': 'bytes'}]}, file_name_suffix='.avro')
Preprocess the ingested raw dataset and write the result to avro format. Args: ingested_dataset_path (str): Path to the ingested dataset preprocessed_dataset_path (str): Path to where the preprocessed dataset will be saved base_artifact_path (str): path to the base directory of where artifacts can be stored for this component. gcp_project_id (str): ID for the google cloud project to deploy the pipeline to. region (str): Region in which to deploy the pipeline. dataflow_staging_root (str): Path to staging directory for the dataflow runner. beam_runner (str): Beam runner: DataflowRunner or DirectRunner.
github-repos
def valid_scrabble_word(word): letters_in_bag = {'a': 9, 'b': 2, 'c': 2, 'd': 4, 'e': 12, 'f': 2, 'g': 3, 'h': 2, 'i': 9, 'j': 1, 'k': 1, 'l': 4, 'm': 2, 'n': 6, 'o': 8, 'p': 2, 'q': 1, 'r': 6, 's': 4, 't': 6, 'u': 4, 'v': 2, 'w': 2, 'x': 1, 'y': 2, 'z': 1, '_': 2} for letter in word: if (letter == '?'): continue try: letters_in_bag[letter] -= 1 except KeyError: return False if (letters_in_bag[letter] < 0): letters_in_bag['_'] -= 1 if (letters_in_bag['_'] < 0): return False return True
Checks if the input word could be played with a full bag of tiles. Returns: True or false
codesearchnet
def assignment_propagation(node): n_reads = read_counts(node) to_remove = [] for succ in gast.walk(node): if (isinstance(succ, gast.Assign) and isinstance(succ.value, gast.Name) and (len(succ.targets) == 1) and isinstance(succ.targets[0], gast.Name)): rhs_name = succ.value.id rhs_defs = [def_[1] for def_ in anno.getanno(succ, 'definitions_in') if (def_[0] == rhs_name)] if ((len(rhs_defs) == 1) and isinstance(rhs_defs[0], gast.Assign) and (n_reads[rhs_defs[0]] == 1) and isinstance(rhs_defs[0].value, gast.Name) and isinstance(rhs_defs[0].targets[0], gast.Name)): to_remove.append(rhs_defs[0]) succ.value = rhs_defs[0].value transformers.Remove(to_remove).visit(node) anno.clearanno(node) return node
Perform assignment propagation. Assignment propagation is not a compiler optimization as much as a readability optimization. If a variable name is used only once, it gets renamed when possible e.g. `y = x; z = y` will become `z = x`. Args: node: The AST to optimize. Returns: The optimized AST.
codesearchnet
def run_inference(self, batch: Sequence[torch.Tensor], model: torch.nn.Module, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]: inference_args = {} if not inference_args else inference_args model_id = self._state_dict_path if not self._torch_script_model_path else self._torch_script_model_path return self._inference_fn(batch, model, self._device, inference_args, model_id)
Runs inferences on a batch of Tensors and returns an Iterable of Tensor Predictions. This method stacks the list of Tensors in a vectorized format to optimize the inference call. Args: batch: A sequence of Tensors. These Tensors should be batchable, as this method will call `torch.stack()` and pass in batched Tensors with dimensions (batch_size, n_features, etc.) into the model's forward() function. model: A PyTorch model. inference_args: Non-batchable arguments required as inputs to the model's forward() function. Unlike Tensors in `batch`, these parameters will not be dynamically batched Returns: An Iterable of type PredictionResult.
github-repos
def with_rank_at_least(self, rank): if self.rank is not None and self.rank < rank: raise ValueError('Shape %s must have rank at least %d' % (self, rank)) else: return self
Returns a shape based on `self` with at least the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at least the given rank. Raises: ValueError: If `self` does not represent a shape with at least the given `rank`.
github-repos
def vector(p1, p2): return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])
compute vector between two 3D points Args: p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: 3-vector from p1 - p2
codesearchnet
def extractTimes(self, inp): def handleMatch(time): relative = False if not time: return None elif time.group(1) == 'morning': h = 8 m = 0 elif time.group(1) == 'afternoon': h = 12 m = 0 elif time.group(1) == 'evening': h = 19 m = 0 elif time.group(4) and time.group(5): h, m = 0, 0 converter = NumberService() try: diff = converter.parse(time.group(4)) except: return None if time.group(5) == 'hours': h += diff else: m += diff if time.group(6): converter = NumberService() try: diff = converter.parse(time.group(7)) except: return None if time.group(8) == 'hours': h += diff else: m += diff relative = True else: t = time.group(2) h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1]) try: if time.group(3) == 'pm': h += 12 except IndexError: pass if relative: return self.now + datetime.timedelta(hours=h, minutes=m) else: return datetime.datetime( self.now.year, self.now.month, self.now.day, h, m ) inp = self._preprocess(inp) return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
Extracts time-related information from an input string. Ignores any information related to the specific date, focusing on the time-of-day. Args: inp (str): Input string to be parsed. Returns: A list of datetime objects containing the extracted times from the input snippet, or an empty list if none found.
juraj-google-style
def __init__(self, shape, min_value, max_value, alpha=0.0, beta=0.0, scope='beta', summary_labels=()): assert min_value is None or max_value > min_value self.shape = shape self.min_value = min_value self.max_value = max_value action_size = util.prod(self.shape) self.alpha = Linear(size=action_size, bias=alpha, scope='alpha', summary_labels=summary_labels) self.beta = Linear(size=action_size, bias=beta, scope='beta', summary_labels=summary_labels) super(Beta, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)
Beta distribution. Args: shape: Action shape. min_value: Minimum value of continuous actions. max_value: Maximum value of continuous actions. alpha: Optional distribution bias for the alpha value. beta: Optional distribution bias for the beta value.
juraj-google-style
def ch_start_time(self, *channels: List[Channel]) -> int: return self.timeslots.ch_start_time(*channels)
Return minimum start time for supplied channels. Args: *channels: Supplied channels
codesearchnet
def _get_addresses(tx): from_address = set([vin['address'] for vin in tx['vins']]) if (len(from_address) != 1): raise InvalidTransactionError('Transaction should have inputs from only one address {}'.format(from_address)) vouts = sorted(tx['vouts'], key=(lambda d: d['n']))[:(- 1)] piece_address = vouts[0]['address'] to_address = vouts[(- 1)]['address'] from_address = from_address.pop() return (from_address, to_address, piece_address)
Checks for the from, to, and piece address of a SPOOL transaction. Args: tx (dict): Transaction payload, as returned by :meth:`transactions.Transactions.get()`. .. note:: Formats as returned by JSON-RPC API ``decoderawtransaction`` have yet to be supported. Returns: Tuple([str]): Sender, receiver, and piece addresses.
codesearchnet
def decode_spans(start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray) -> Tuple: if start.ndim == 1: start = start[None] if end.ndim == 1: end = end[None] outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) candidates = np.tril(np.triu(outer), max_answer_len - 1) scores_flat = candidates.flatten() if topk == 1: idx_sort = [np.argmax(scores_flat)] elif len(scores_flat) < topk: idx_sort = np.argsort(-scores_flat) else: idx = np.argpartition(-scores_flat, topk)[0:topk] idx_sort = idx[np.argsort(-scores_flat[idx])] starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) starts = starts[desired_spans] ends = ends[desired_spans] scores = candidates[0, starts, ends] return (starts, ends, scores)
Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual answer. In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or answer end position being before the starting position. The method supports output the k-best answer through the topk argument. Args: start (`np.ndarray`): Individual start probabilities for each token. end (`np.ndarray`): Individual end probabilities for each token. topk (`int`): Indicates how many possible answer span(s) to extract from the model output. max_answer_len (`int`): Maximum size of the answer to extract from the model's output. undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer
github-repos
def tas50(msg): d = hex2bin(data(msg)) if d[45] == '0': return None tas = bin2int(d[46:56]) * 2 return tas
Aircraft true airspeed, BDS 5,0 message Args: msg (String): 28 bytes hexadecimal message (BDS50) string Returns: int: true airspeed in knots
juraj-google-style
def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False): LOG.info("Fetching OMIM files from https: mim2genes_url = 'https: mimtitles_url= 'https: morbidmap_url = 'https: genemap2_url = 'https: mim_files = {} mim_urls = {} if mim2genes is True: mim_urls['mim2genes'] = mim2genes_url if mimtitles is True: mim_urls['mimtitles'] = mimtitles_url if morbidmap is True: mim_urls['morbidmap'] = morbidmap_url if genemap2 is True: mim_urls['genemap2'] = genemap2_url for file_name in mim_urls: url = mim_urls[file_name] mim_files[file_name] = fetch_resource(url) return mim_files
Fetch the necessary mim files using a api key Args: api_key(str): A api key necessary to fetch mim data Returns: mim_files(dict): A dictionary with the neccesary files
juraj-google-style
def get_varname_from_locals(val, locals_, default='varname-not-found', strict=False, cmpfunc_=operator.is_): if val is None or isinstance(val, (int, float, bool)): return default try: for count, val_ in enumerate(six.itervalues(locals_)): if cmpfunc_(val, val_): index_ = count varname = six.text_type(list(locals_.keys())[index_]) except NameError: varname = default if strict: raise return varname
Finds the string name which has where locals_[name] is val Check the varname is in the parent namespace This will only work with objects not primatives Args: val (): some value locals_ (dict): local dictionary to search default (str): strict (bool): Returns: str: the varname which is Val (if it exists)
juraj-google-style
def ensure_crossplat_path(path, winroot='C:'): r cplat_path = path.replace('\\', '/') if cplat_path == winroot: cplat_path += '/' return cplat_path
r""" ensure_crossplat_path Args: path (str): Returns: str: crossplat_path Example(DOCTEST): >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = r'C:\somedir' >>> cplat_path = ensure_crossplat_path(path) >>> result = cplat_path >>> print(result) C:/somedir
juraj-google-style
def clone_with_copy(src_path, dest_path): log.info('Cloning directory tree %s to %s', src_path, dest_path) shutil.copytree(src_path, dest_path)
Clone a directory try by copying it. Args: src_path: The directory to be copied. dest_path: The location to copy the directory to.
codesearchnet
def _get_version(self, root): version = self.get_version(root) if version: return StrictVersion(version) raise UnknownVersionError('Unable to determine the version of the input document. No version information found on the root element.')
Return the version of the root element passed in. Args: root (etree.Element) Returns: distutils.StrictVersion Raises: UnknownVersionError
codesearchnet
def class_logit(layer, label): def inner(T): if isinstance(label, int): class_n = label else: class_n = T("labels").index(label) logits = T(layer) logit = tf.reduce_sum(logits[:, class_n]) return logit return inner
Like channel, but for softmax layers. Args: layer: A layer name string. label: Either a string (refering to a label in model.labels) or an int label position. Returns: Objective maximizing a logit.
juraj-google-style
def version(): cmd = ['dot', '-V'] (out, _) = run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) info = out.decode('ascii') ma = re.search('graphviz version (\\d+\\.\\d+(?:\\.\\d+)?) ', info) if (ma is None): raise RuntimeError return tuple((int(d) for d in ma.group(1).split('.')))
Return the version number tuple from the ``stderr`` output of ``dot -V``. Returns: Two or three ``int`` version ``tuple``. Raises: graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero. RuntimmeError: If the output cannot be parsed into a version number.
codesearchnet
def U(data, bits=None, endian=None, target=None): return globals()[('U%d' % _get_bits(bits, target))](data, endian=endian, target=target)
Unpack an unsigned pointer for a given target. Args: data(bytes): The data to unpack. bits(:class:`pwnypack.target.Target.Bits`): Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`): Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`): Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. Returns: int: The pointer value.
codesearchnet
def logical_name(self): pchar = self._libinput.libinput_seat_get_logical_name(self._handle) return string_at(pchar).decode()
The logical name of the seat. This is an identifier to group sets of devices within the compositor. Returns: str: The logical name of this seat.
codesearchnet
def _ip_string_from_prefix(self, prefixlen=None): if not prefixlen: prefixlen = self._prefixlen return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
Turn a prefix length into a dotted decimal string. Args: prefixlen: An integer, the netmask prefix length. Returns: A string, the dotted decimal netmask string.
juraj-google-style
def fill(self, name_or_slot, value): if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError( 'Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError( 'Cannot fill output with name "%s" that was just ' 'declared within the Pipeline context.' % slot.name) self._context.fill_slot(self._pipeline_key, slot, value)
Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time.
juraj-google-style
def validate_detector(self, detector): resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX, 'validate'), data=detector) resp.raise_for_status()
Validate a detector. Validates the given detector; throws a 400 Bad Request HTTP error if the detector is invalid; otherwise doesn't return or throw anything. Args: detector (object): the detector model object. Will be serialized as JSON.
juraj-google-style
def GetRawKeyFunction(): for get_raw_key_function in (_GetRawKeyFunctionPosix, _GetRawKeyFunctionWindows): try: return get_raw_key_function() except: pass return lambda: None
Returns a function that reads one keypress from stdin with no echo. Returns: A function that reads one keypress from stdin with no echo or a function that always returns None if stdin does not support it.
github-repos
def get_mapping(version=1, exported_at=None, app_name=None): if (exported_at is None): exported_at = timezone.now() app_name = (app_name or settings.HEROKU_CONNECT_APP_NAME) return {'version': version, 'connection': {'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID, 'app_name': app_name, 'exported_at': exported_at.isoformat()}, 'mappings': [model.get_heroku_connect_mapping() for model in get_heroku_connect_models()]}
Return Heroku Connect mapping for the entire project. Args: version (int): Version of the Heroku Connect mapping, default: ``1``. exported_at (datetime.datetime): Time the export was created, default is ``now()``. app_name (str): Name of Heroku application associated with Heroku Connect the add-on. Returns: dict: Heroku Connect mapping. Note: The version does not need to be incremented. Exports from the Heroku Connect website will always have the version number ``1``.
codesearchnet
def rmtree(self, exclude_wildcard=""): if not exclude_wildcard: shutil.rmtree(self.workdir) else: w = WildCard(exclude_wildcard) for dirpath, dirnames, filenames in os.walk(self.workdir): for fname in filenames: path = os.path.join(dirpath, fname) if not w.match(fname): os.remove(path)
Remove all files and directories in the working directory Args: exclude_wildcard: Optional string with regular expressions separated by `|`. Files matching one of the regular expressions will be preserved. example: exclude_wildard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
juraj-google-style
def get_min_eig_vec_proxy(self, use_tf_eig=False): if use_tf_eig: return tf.cond((self.smooth_placeholder < 1e-08), self.tf_min_eig_vec, self.tf_smooth_eig_vec) min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector) def _vector_prod_fn(x): return self.dual_object.get_psd_product(x) estimated_eigen_vector = min_eigen_tf(x=self.eig_init_vec_placeholder, num_steps=self.eig_num_iter_placeholder, learning_rate=self.params['eig_learning_rate'], vector_prod_fn=_vector_prod_fn) return estimated_eigen_vector
Computes the min eigen value and corresponding vector of matrix M. Args: use_tf_eig: Whether to use tf's default full eigen decomposition Returns: eig_vec: Minimum absolute eigen value eig_val: Corresponding eigen vector
codesearchnet
def get_execution_info(self, driver_id, function_descriptor): if self._worker.load_code_from_local: driver_id = ray.DriverID.nil() if not function_descriptor.is_actor_method(): self._load_function_from_local(driver_id, function_descriptor) else: with profiling.profile("wait_for_function"): self._wait_for_function(function_descriptor, driver_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[driver_id][function_id] except KeyError as e: message = ("Error occurs in get_execution_info: " "driver_id: %s, function_descriptor: %s. Message: %s" % (driver_id, function_descriptor, e)) raise KeyError(message) return info
Get the FunctionExecutionInfo of a remote function. Args: driver_id: ID of the driver that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object.
juraj-google-style
def response(self, in_thread: Optional[bool]=None) -> 'Message': data = {'channel': self['channel']} if in_thread: if ('message' in self): data['thread_ts'] = (self['message'].get('thread_ts') or self['message']['ts']) else: data['thread_ts'] = (self.get('thread_ts') or self['ts']) elif (in_thread is None): if (('message' in self) and ('thread_ts' in self['message'])): data['thread_ts'] = self['message']['thread_ts'] elif ('thread_ts' in self): data['thread_ts'] = self['thread_ts'] return Message(data)
Create a response message. Depending on the incoming message the response can be in a thread. By default the response follow where the incoming message was posted. Args: in_thread (boolean): Overwrite the `threading` behaviour Returns: a new :class:`slack.event.Message`
codesearchnet
def requirements(requirements_file): return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file.
juraj-google-style
def velocity(msg): if 5 <= typecode(msg) <= 8: return surface_velocity(msg) elif typecode(msg) == 19: return airborne_velocity(msg) else: raise RuntimeError("incorrect or inconsistant message types, expecting 4<TC<9 or TC=19")
Calculate the speed, heading, and vertical rate (handles both airborne or surface message) Args: msg (string): 28 bytes hexadecimal message string Returns: (int, float, int, string): speed (kt), ground track or heading (degree), rate of climb/descend (ft/min), and speed type ('GS' for ground speed, 'AS' for airspeed)
juraj-google-style
def run_resume_status(self, entity, project_name, name): query = gql() response = self.gql(query, variable_values={ 'entity': entity, 'project': project_name, 'name': name, }) if 'model' not in response or 'bucket' not in response['model']: return None project = response['model'] self.set_setting('project', project_name) if 'entity' in project: self.set_setting('entity', project['entity']['name']) return project['bucket']
Check if a run exists and get resume information. Args: entity (str, optional): The entity to scope this project to. project_name (str): The project to download, (can include bucket) run (str, optional): The run to download
juraj-google-style
def to_text(self): if self.items is None: return else: text = '' for i, item in enumerate(self.items): text += ' %s. %s\n' % (i + 1, item.to_text()) return text
Render a Text MessageElement as plain text Args: None Returns: Str the plain text representation of the Text MessageElement Raises: Errors are propagated
juraj-google-style
def get_hash(self): if self.__index_hash: return self.__index_hash key = self.request.method key += URLHelper.get_protocol(self.request.url) key += URLHelper.get_subdomain(self.request.url) key += URLHelper.get_hostname(self.request.url) key += URLHelper.get_tld(self.request.url) key += URLHelper.get_path(self.request.url) key += str(URLHelper.get_ordered_params(self.request.url)) if (self.request.data is not None): key += str(self.request.data.keys()) self.__index_hash = key return self.__index_hash
Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item.
codesearchnet
def _logmessage_transform(cls, s, by=2): if len(s) >= by: return s[by:].strip('\n') return s.strip('\n')
Preprocess/cleanup a bzr log message before parsing Args: s (str): log message string by (int): cutoff threshold for log message length Returns: str: preprocessed log message string
juraj-google-style
def get_session(self, app_path, session_id): if (app_path not in self._applications): raise ValueError(('Application %s does not exist on this server' % app_path)) return self._applications[app_path].get_session(session_id)
Get an active a session by name application path and session ID. Args: app_path (str) : The configured application path for the application to return a session for. session_id (str) : The session ID of the session to retrieve. Returns: ServerSession
codesearchnet
def forward(self, probabilities, temperature=1.0, eps=0.0001): if probabilities.ndim == 3: probabilities = probabilities.unsqueeze(1) one_minus_probabilities = torch.clamp(1 - probabilities, eps, 1) probabilities = torch.clamp(probabilities, eps, 1) y = log_binom(self.k_minus_1, self.k_idx) + self.k_idx * torch.log(probabilities) + (self.k_minus_1 - self.k_idx) * torch.log(one_minus_probabilities) return self.act(y / temperature, dim=1)
Compute the log binomial distribution for probabilities. Args: probabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Tensor containing probabilities of each class. temperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1): Temperature of distribution. eps (`float`, *optional*, defaults to 1e-4): Small number for numerical stability. Returns: `torch.Tensor` of shape `(batch_size, num_channels, height, width)`: Log binomial distribution logbinomial(p;t).
github-repos
def create(rpc_layer, address): if rpc_layer != 'grpc': raise ValueError('Only GRPC backend is supported at the moment.') return GrpcServer(address=address)
Create TF RPC server at given address. Args: rpc_layer: Communication layer between client and server. Only "grpc" rpc layer is supported at the moment. address: Address where RPC server is hosted. Returns: An instance of `tf.distribute.experimental.rpc.Server` class. Raises: A ValueError if rpc_layer other than "grpc" is used. Only GRPC is supported at the moment. Example usage: >>> import portpicker >>> @tf.function(input_signature=[ ... tf.TensorSpec([], tf.int32), ... tf.TensorSpec([], tf.int32)]) ... def remote_fn(a, b): ... return tf.add(a, b) >>> port = portpicker.pick_unused_port() >>> address = "localhost:{}".format(port) >>> server = tf.distribute.experimental.rpc.Server.create("grpc", address) >>> server.register("addition", remote_fn) >>> server.start()
github-repos
def region_code_for_number(numobj): country_code = numobj.country_code regions = COUNTRY_CODE_TO_REGION_CODE.get(country_code, None) if regions is None: return None if len(regions) == 1: return regions[0] else: return _region_code_for_number_from_list(numobj, regions)
Returns the region where a phone number is from. This could be used for geocoding at the region level. Only guarantees correct results for valid, full numbers (not short-codes, or invalid numbers). Arguments: numobj -- The phone number object whose origin we want to know Returns the region where the phone number is from, or None if no region matches this calling code.
juraj-google-style
def sort_segment_points(Aps, Bps): mid = [] j = 0 mid.append(Aps[0]) for i in range((len(Aps) - 1)): dist = distance_tt_point(Aps[i], Aps[(i + 1)]) for m in range(j, len(Bps)): distm = distance_tt_point(Aps[i], Bps[m]) if (dist > distm): direction = dot(normalize(line(Aps[i].gen2arr(), Aps[(i + 1)].gen2arr())), normalize(Bps[m].gen2arr())) if (direction > 0): j = (m + 1) mid.append(Bps[m]) break mid.append(Aps[(i + 1)]) for m in range(j, len(Bps)): mid.append(Bps[m]) return mid
Takes two line segments and sorts all their points, so that they form a continuous path Args: Aps: Array of tracktotrip.Point Bps: Array of tracktotrip.Point Returns: Array with points ordered
codesearchnet
def end_statement(self, stmt): self.active_stmts.remove(stmt)
Marks the end of a statement. Args: stmt: Hashable, a key by which the statement can be identified in the CFG's stmt_prev and stmt_next attributes; must match a key previously passed to begin_statement.
github-repos
def _get_all_groups(): with salt.utils.winapi.Com(): nt = win32com.client.Dispatch('AdsNameSpaces') results = nt.GetObject('', 'WinNT: results.Filter = ['group'] return results
A helper function that gets a list of group objects for all groups on the machine Returns: iter: A list of objects for all groups on the machine
codesearchnet
def cancelOrder(self, order: Order) -> Trade: self.client.cancelOrder(order.orderId) now = datetime.datetime.now(datetime.timezone.utc) key = self.wrapper.orderKey(order.clientId, order.orderId, order.permId) trade = self.wrapper.trades.get(key) if trade: if (not trade.isDone()): status = trade.orderStatus.status if (((status == OrderStatus.PendingSubmit) and (not order.transmit)) or (status == OrderStatus.Inactive)): newStatus = OrderStatus.Cancelled else: newStatus = OrderStatus.PendingCancel logEntry = TradeLogEntry(now, newStatus, '') trade.log.append(logEntry) trade.orderStatus.status = newStatus self._logger.info(f'cancelOrder: {trade}') trade.cancelEvent.emit(trade) trade.statusEvent.emit(trade) self.cancelOrderEvent.emit(trade) self.orderStatusEvent.emit(trade) if (newStatus == OrderStatus.Cancelled): trade.cancelledEvent.emit(trade) else: self._logger.error(f'cancelOrder: Unknown orderId {order.orderId}') return trade
Cancel the order and return the Trade it belongs to. Args: order: The order to be canceled.
codesearchnet
def getDocumentIDs(aleph_search_result, number_of_docs=(- 1)): downer = Downloader() if ('set_number' not in aleph_search_result): return [] set_number = str(aleph_search_result['set_number']) if (len(set_number) < 6): set_number = (((6 - len(set_number)) * '0') + set_number) if (number_of_docs <= 0): number_of_docs = aleph_search_result['no_entries'] set_data = downer.download((ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(SET_NUMBER=set_number, NUMBER_OF_DOCS=number_of_docs))) dom = dhtmlparser.parseString(set_data) set_data = dom.find('ill-get-set') if (len(set_data) <= 0): raise AlephException("Aleph didn't returned set data.") ids = [] for library in set_data: documents = _alephResultToDict(library) if ('error' in documents): raise AlephException(('getDocumentIDs: ' + documents['error'])) if isinstance(documents['doc-number'], list): ids.extend(map((lambda x: DocumentID(x, documents['set-library'], aleph_search_result['base'])), set(documents['doc-number']))) else: ids.append(DocumentID(documents['doc-number'], documents['set-library'], aleph_search_result['base'])) return ids
Get IDs, which can be used as parameters for other functions. Args: aleph_search_result (dict): returned from :func:`searchInAleph` number_of_docs (int, optional): how many :class:`DocumentID` from set given by `aleph_search_result` should be returned. Default -1 for all of them. Returns: list: :class:`DocumentID` named tuples to given `aleph_search_result`. Raises: AlephException: If Aleph returns unknown format of data. Note: Returned :class:`DocumentID` can be used as parameters to :func:`downloadMARCXML`.
codesearchnet
def configure_collective_ops(self, collective_leader='', scoped_allocator_enabled_ops=('CollectiveReduce',), use_nccl_communication=False, device_filters=None): if self._collective_leader is not None: if self._collective_leader != collective_leader or self._collective_scoped_allocator_enabled_ops != scoped_allocator_enabled_ops or self._collective_use_nccl_communication != use_nccl_communication or (self._collective_device_filters != device_filters): raise ValueError('Collective ops are already configured.') else: return if self._context_handle is not None: raise RuntimeError('Collective ops must be configured at program startup') self._collective_leader = collective_leader self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops self._collective_use_nccl_communication = use_nccl_communication self._collective_device_filters = device_filters
Configure collective ops. Collective group leader is necessary for collective ops to run, other configurations are mainly for the purpose of performance. Args: collective_leader: a device string for collective leader, e.g. "/job:worker/replica:0/task:0"; empty string means local execution of collective ops. scoped_allocator_enabled_ops: a tuple or a list of op names for scoped allocator to run with. use_nccl_communication: whether to use nccl communication for collective ops. device_filters: a tuple or a list of device strings. If set, corresponding task can only see the devices filtered by these device filters. Raises: RuntimeError: if this method is not called at program startup.
github-repos
def as_json(self, entity_url, context=None): try: urllib.request.urlopen(entity_url) except urllib.error.HTTPError: raise ValueError('Cannot open {}'.format(entity_url)) entity_graph = self.read(entity_url) entity_json = json.loads(entity_graph.serialize(format='json-ld', context=context).decode()) return json.dumps(entity_json)
Method takes a entity uri and attempts to return the Fedora Object as a JSON-LD. Args: entity_url(str): Fedora Commons URL of Entity context(None): Returns JSON-LD with Context, default is None Returns: str: JSON-LD of Fedora Object
codesearchnet
def save_to_well_known_file(credentials, well_known_file=None): if well_known_file is None: well_known_file = _get_well_known_file() config_dir = os.path.dirname(well_known_file) if not os.path.isdir(config_dir): raise OSError( 'Config directory does not exist: {0}'.format(config_dir)) credentials_data = credentials.serialization_data _save_private_file(well_known_file, credentials_data)
Save the provided GoogleCredentials to the well known file. Args: credentials: the credentials to be saved to the well known file; it should be an instance of GoogleCredentials well_known_file: the name of the file where the credentials are to be saved; this parameter is supposed to be used for testing only
juraj-google-style
def _parse_doc(doc): lines = doc.split('\n') descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines)) if (len(descriptions) < 3): description = lines[0] else: description = '{0}\n\n{1}'.format(lines[0], textwrap.dedent('\n'.join(descriptions[2:]))) args = list(itertools.takewhile(_checker(_KEYWORDS_OTHERS), itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines))) argmap = {} if (len(args) > 1): for pair in args[1:]: kv = [v.strip() for v in pair.split(':')] if (len(kv) >= 2): argmap[kv[0]] = ':'.join(kv[1:]) return dict(headline=descriptions[0], description=description, args=argmap)
Parse a docstring. Parse a docstring and extract three components; headline, description, and map of arguments to help texts. Args: doc: docstring. Returns: a dictionary.
codesearchnet
def get_path_list(self, type_str=None): return list( reversed( [v.label_str for v in self.parent_gen if type_str in (None, v.type_str)] ) )
Get list of the labels of the nodes leading up to this node from the root. Args: type_str: SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include information from nodes of that type. Returns: list of str: The labels of the nodes leading up to this node from the root.
juraj-google-style
def send_html(self, html, body=None, msgtype='m.text'): return self.client.api.send_message_event(self.room_id, 'm.room.message', self.get_html_content(html, body, msgtype))
Send an html formatted message. Args: html (str): The html formatted message to be sent. body (str): The unformatted body of the message to be sent.
codesearchnet
def parse_genes(gene_lines): genes = [] header = [] hgnc_identifiers = set() delimiter = '\t' delimiters = ['\t', ' ', ';'] for i,line in enumerate(gene_lines): line = line.rstrip() if not len(line) > 0: continue if line.startswith(' if not line.startswith(' line_length = 0 delimiter = None for alt in delimiters: head_line = line.split(alt) if len(head_line) > line_length: line_length = len(head_line) delimiter = alt header = [word.lower() for word in line[1:].split(delimiter)] else: if i == 0: line_length = 0 for alt in delimiters: head_line = line.split(alt) if len(head_line) > line_length: line_length = len(head_line) delimiter = alt if ('hgnc' in line or 'HGNC' in line): header = [word.lower() for word in line.split(delimiter)] continue if line.split(delimiter)[0].isdigit(): header = ['hgnc_id'] else: header = ['hgnc_symbol'] splitted_line = line.split(delimiter) gene_info = dict(zip(header, splitted_line)) info_found = False for key in gene_info: if gene_info[key]: info_found = True break if not info_found: continue try: gene = parse_gene(gene_info) except Exception as e: LOG.warning(e) raise SyntaxError("Line {0} is malformed".format(i + 1)) identifier = gene.pop('identifier') if not identifier in hgnc_identifiers: hgnc_identifiers.add(identifier) genes.append(gene) return genes
Parse a file with genes and return the hgnc ids Args: gene_lines(iterable(str)): Stream with genes Returns: genes(list(dict)): Dictionaries with relevant gene info
juraj-google-style
def __init__(self, location=None, parent=None, **kwargs): if not location: raise ValueError('Missing location value.') super(LocationPathSpec, self).__init__(parent=parent, **kwargs) self.location = location
Initializes a path specification. Args: location (Optional[str]): location. parent (Optional[PathSpec]): parent path specification. Raises: ValueError: when location is not set.
juraj-google-style
def get_effect_class(self, class_name, package_name=None) -> Type[Effect]: if package_name: return effects.find_effect_class("{}.{}".format(package_name, class_name)) return effects.find_effect_class(class_name)
Get an effect class from the effect registry. Args: class_name (str): The exact class name of the effect Keyword Args: package_name (str): The python path to the effect package the effect name is located. This is optional and can be used to avoid issue with class name collisions. Returns: Effect class
juraj-google-style
def wtime_to_minutes(time_string): hours, mins, seconds = time_string.split(':') return int(hours) * 60 + int(mins) + 1
wtime_to_minutes Convert standard wallclock time string to minutes. Args: - Time_string in HH:MM:SS format Returns: (int) minutes
juraj-google-style
def simplify_U(theta, phi, lam): gate = U3Gate(theta, phi, lam) if abs(gate.params[0] % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U1Gate(gate.params[0] + gate.params[1] + gate.params[2]) if isinstance(gate, U3Gate): if abs((gate.params[0] - math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U2Gate(gate.params[1], gate.params[2] + (gate.params[0] - math.pi / 2)) if abs((gate.params[0] + math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U2Gate(gate.params[1] + math.pi, gate.params[2] - math.pi + (gate.params[0] + math.pi / 2)) if isinstance(gate, U1Gate) and abs(gate.params[0] % (4.0 * math.pi)) < _CUTOFF_PRECISION: gate = IdGate() return gate
Return the gate u1, u2, or u3 implementing U with the fewest pulses. The returned gate implements U exactly, not up to a global phase. Args: theta, phi, lam: input Euler rotation angles for a general U gate Returns: Gate: one of IdGate, U1Gate, U2Gate, U3Gate.
juraj-google-style
def _get_user_command_string(self): sdk_version = int(self._device.build_info['build_version_sdk']) if sdk_version < 24: return '' return f'--user {self.user_id}'
Gets the appropriate command argument for specifying device user ID. By default, this client operates within the current user. We don't add the `--user {ID}` argument when Android's SDK is below 24, where multi-user support is not well implemented. Returns: A string of the command argument section to be formatted into adb commands.
github-repos
def _create_validation_schema(schema_cls): validation_schema = schema_cls() for (_, field) in validation_schema.fields.items(): if isinstance(field, ModelTypeValidator): validate_function = field.__class__.check_type field._deserialize = MethodType(validate_function, field) return validation_schema
Create a patched Schema for validating models. Model validation is not part of Marshmallow. Schemas have a ``validate`` method but this delegates execution on ``load`` and discards the result. Similarly, ``load`` will call ``_deserialize`` on every field in the schema. This function patches the ``_deserialize`` instance method of each field to make it call a custom defined method ``check_type`` provided by Qiskit in the different fields at ``qiskit.validation.fields``. Returns: BaseSchema: a copy of the original Schema, overriding the ``_deserialize()`` call of its fields.
codesearchnet
def _colourise(text: str, colour: str) -> str: if COLOUR: text = style(text, fg=colour, bold=True) return text
Colour text, if possible. Args: text: Text to colourise colour: Colour to display text in Returns: Colourised text, if possible
codesearchnet
def delete_token(self, token_name, project_name, dataset_name): return self.resources.delete_token(token_name, project_name, dataset_name)
Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted.
juraj-google-style
def _ParseAbstractInteger(text, is_long=False): try: if is_long: return long(text, 0) else: return int(text, 0) except ValueError: raise ValueError('Couldn\'t parse integer: %s' % text)
Parses an integer without checking size/signedness. Args: text: The text to parse. is_long: True if the value should be returned as a long integer. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer.
juraj-google-style
def eq_or_parent(self, other): return (self.parts[:len(other.parts)] == other.parts[:len(self.parts)])
Check whether ``other`` is an ancestor. Returns: (bool) True IFF ``other`` is an ancestor or equal to ``self``, else False.
codesearchnet
def fetch(self, rebuild=False, cache=True): if rebuild: return self._process_table(cache) try: return self.read_cache() except FileNotFoundError: return self._process_table(cache)
Fetches the table and applies all post processors. Args: rebuild (bool): Rebuild the table and ignore cache. Default: False cache (bool): Cache the finished table for faster future loading. Default: True
juraj-google-style
def _VerifyRecord(self, pls_record): future_timestamp = ( timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS) if pls_record.last_written_time > future_timestamp: return False first_word, _, _ = pls_record.query.partition(' ') if first_word.lower() not in self._PLS_KEYWORD: return False return True
Verifies a PLS Recall record. Args: pls_record (pls_recall_record): a PLS Recall record to verify. Returns: bool: True if this is a valid PLS Recall record, False otherwise.
juraj-google-style
def max(x, axis=None, keepdims=False): return math_ops.reduce_max(x, axis, keepdims)
Maximum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with maximum values of `x`.
github-repos
def bofh_excuse(how_many=1): excuse_path = os.path.join(os.path.dirname(__file__), 'bofh_excuses.json') with open(excuse_path, 'r') as _f: excuse_dict = json.load(_f) return [generate_random_string(excuse_dict) for _ in range(int(how_many))]
Generate random BOFH themed technical excuses! Args: how_many: Number of excuses to generate. (Default: 1) Returns: A list of BOFH excuses.
juraj-google-style
def _ParseItem(self, parser_mediator, olecf_item): result = False event_data = OLECFItemEventData() event_data.name = olecf_item.name event_data.offset = 0 event_data.size = olecf_item.size creation_time, modification_time = self._GetTimestamps(olecf_item) if creation_time: date_time = dfdatetime_filetime.Filetime(timestamp=creation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) result = True if modification_time: date_time = dfdatetime_filetime.Filetime(timestamp=modification_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) result = True for sub_item in olecf_item.sub_items: if self._ParseItem(parser_mediator, sub_item): result = True return result
Parses an OLECF item. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. olecf_item (pyolecf.item): OLECF item. Returns: bool: True if an event was produced.
juraj-google-style
def threat(self, name, **kwargs): group_obj = Threat(name, **kwargs) return self._group(group_obj)
Add Threat data to Batch object Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Threat.
juraj-google-style
def encode(self, obj): if isinstance(obj, np.ndarray): if obj.ndim == 1 and obj.dtype == 'int16': numpy_to_weld = self.utils.numpy_to_weld_int16_arr elif obj.ndim == 1 and obj.dtype == 'int32': numpy_to_weld = self.utils.numpy_to_weld_int_arr elif obj.ndim == 1 and obj.dtype == 'int64': numpy_to_weld = self.utils.numpy_to_weld_long_arr elif obj.ndim == 1 and obj.dtype == 'float32': numpy_to_weld = self.utils.numpy_to_weld_float_arr elif obj.ndim == 1 and obj.dtype == 'float64': numpy_to_weld = self.utils.numpy_to_weld_double_arr elif obj.ndim == 2 and obj.dtype == 'int16': numpy_to_weld = self.utils.numpy_to_weld_int16_arr_arr elif obj.ndim == 2 and obj.dtype == 'int32': numpy_to_weld = self.utils.numpy_to_weld_int_arr_arr elif obj.ndim == 2 and obj.dtype == 'int64': numpy_to_weld = self.utils.numpy_to_weld_long_arr_arr elif obj.ndim == 2 and obj.dtype == 'float32': numpy_to_weld = self.utils.numpy_to_weld_float_arr_arr elif obj.ndim == 2 and obj.dtype == 'float64': numpy_to_weld = self.utils.numpy_to_weld_double_arr_arr elif obj.ndim == 2 and obj.dtype == 'bool': numpy_to_weld = self.utils.numpy_to_weld_bool_arr_arr elif obj.ndim == 1 and obj.dtype == 'bool': numpy_to_weld = self.utils.numpy_to_weld_bool_arr else: numpy_to_weld = self.utils.numpy_to_weld_char_arr_arr elif isinstance(obj, str): numpy_to_weld = self.utils.numpy_to_weld_char_arr else: raise Exception("Unable to encode; invalid object type") numpy_to_weld.restype = self.py_to_weld_type(obj).ctype_class numpy_to_weld.argtypes = [py_object] weld_vec = numpy_to_weld(obj) return weld_vec
Converts Python object to Weld object. Args: obj: Python object that needs to be converted to Weld format Returns: Weld formatted object
juraj-google-style
def broadcast(self, gossip_message, message_type, exclude=None): with self._lock: if exclude is None: exclude = [] for connection_id in self._peers.copy(): if connection_id not in exclude and \ self._network.is_connection_handshake_complete( connection_id): self.send( message_type, gossip_message.SerializeToString(), connection_id, one_way=True)
Broadcast gossip messages. Broadcast the message to all peers unless they are in the excluded list. Args: gossip_message: The message to be broadcast. message_type: Type of the message. exclude: A list of connection_ids that should be excluded from this broadcast.
juraj-google-style
def make_sharded_variable_creator(strategy: distribute_lib.Strategy) -> Callable[..., Any]: tpu_devices = strategy.extended._tpu_devices def _create_sharded_variable(next_creator, *args, **kwargs): kwargs['skip_mirrored_creator'] = True shard_dim = 0 num_replicas, num_cores_per_replica = tpu_devices.shape is_ckpt_init_value = is_checkpoint_initial_value(kwargs['initial_value']) arg_spec = tf_inspect.getfullargspec(kwargs['initial_value']) if is_ckpt_init_value and 'shard_info' not in arg_spec.args and ('shard_info' not in arg_spec.kwonlyargs): raise ValueError('When a sharded variable is initialized from a checkpoint, shard_info must be in arguments of the init function.') name, shape, dtype, unwrapped_initial_value, restore_uid = extract_variable_info(kwargs) shape = ops.tensor_shape.TensorShape(shape) num_devices = num_replicas * num_cores_per_replica if shape[shard_dim] % num_devices != 0: raise ValueError('Only evenly sharding across devices is currently supported. Got shape {} and {} devices'.format(shape, num_devices)) partition_shape = shape.as_list() partition_shape[shard_dim] = partition_shape[shard_dim] unwrapped_arg_spec = tf_inspect.getargspec(unwrapped_initial_value) sharding_aware = 'shard_info' in unwrapped_arg_spec.args variables = [] partition_offset = [0] * len(shape) for replica_id in range(num_replicas): for logic_core_id in range(num_cores_per_replica): with ops.device(tpu_devices[replica_id][logic_core_id]): kwargs['name'] = f'{name}/{replica_id}' kwargs['shape'] = partition_shape if sharding_aware: shard_info = base.ShardInfo(tensor_shape.as_shape(partition_shape), copy.deepcopy(partition_offset)) kwargs['initial_value'] = functools.partial(kwargs['initial_value'], shard_info=shard_info) partition_offset[shard_dim] += partition_shape[shard_dim] else: kwargs['initial_value'] = functools.partial(unwrapped_initial_value, shape=partition_shape, dtype=dtype) variables.append(next_creator(*args, **kwargs)) result = TPUEmbeddingShardedVariable(strategy, variables, tf_variables.VariableAggregation.NONE, None) if restore_uid is not None: result._maybe_initialize_trackable() result._update_uid = restore_uid return result return _create_sharded_variable
Create a variable creator which shards across all the tpu device. Args: strategy: a TPUStrategy object. Returns: The sharded variable creator.
github-repos
def _wrap_callback_errors(callback, message): try: callback(message) except Exception: _LOGGER.exception( "Top-level exception occurred in callback while processing a " "message" ) message.nack()
Wraps a user callback so that if an exception occurs the message is nacked. Args: callback (Callable[None, Message]): The user callback. message (~Message): The Pub/Sub message.
juraj-google-style
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None): def regularizer(tensor): with tf.name_scope(scope, 'L1L2Regularizer', [tensor]): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=tensor.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=tensor.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') return regularizer
Define a L1L2 regularizer. Args: weight_l1: scale the L1 loss by this factor. weight_l2: scale the L2 loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
codesearchnet
def deserialize(config, custom_objects=None): from keras.src.saving import serialization_lib return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects)
Deserializes a serialized `DTypePolicy` instance. Args: config: `DTypePolicy` configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras `DTypePolicy` instance.
github-repos
def make_collective(self, num_processes, gpu_per_process): cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver() devices = ['/job:worker/replica:0/task:%d/device:CPU:0' % cluster_resolver.task_id] if gpu_per_process > 0: devices = ['/job:worker/replica:0/task:%d/device:GPU:%d' % (cluster_resolver.task_id, i) for i in range(gpu_per_process)] group_size = num_processes * len(devices) collective = cross_device_ops_lib.CollectiveAllReduce(devices=devices, group_size=group_size, options=collective_util.Options()) return (collective, devices, cluster_resolver.task_id)
Returns collectives and other info to be used in tests. Args: num_processes: an integer indicating the number of processes that participate in the collective. gpu_per_process: number of GPUs (0 if no GPUs) used by each process. Returns: A tuple of (collective, devices, pid) where collective is a instance of `CollectiveAllReduce`, devices are a list of local devices (str) attached to the current process, and pid is the id of this process among all participant processes.
github-repos
def process_file(self, path): if self._config.verbose: self._logger.info('Processing file "%s"', path) output_path = ('%s%s' % (path, BATCH_EXTENSION)) with open(output_path, 'w') as file: for line in lines_generator(path): file.write(('%s\n' % self._cucco.normalize(line.encode().decode('utf-8')))) self._logger.debug('Created file "%s"', output_path)
Process a file applying normalizations. Get a file as input and generate a new file with the result of applying normalizations to every single line in the original file. The extension for the new file will be the one defined in BATCH_EXTENSION. Args: path: Path to the file.
codesearchnet
def _merge_bee(self, bee): random_dimension = randint(0, len(self._value_ranges) - 1) second_bee = randint(0, self._num_employers - 1) while (bee.id == self._employers[second_bee].id): second_bee = randint(0, self._num_employers - 1) new_bee = deepcopy(bee) new_bee.values[random_dimension] = self.__onlooker.calculate_positions( new_bee.values[random_dimension], self._employers[second_bee].values[random_dimension], self._value_ranges[random_dimension] ) fitness_score = new_bee.get_score(self._fitness_fxn( new_bee.values, **self._args )) return (fitness_score, new_bee.values, new_bee.error)
Shifts a random value for a supplied bee with in accordance with another random bee's value Args: bee (EmployerBee): supplied bee to merge Returns: tuple: (score of new position, values of new position, fitness function return value of new position)
juraj-google-style
def getModPath(self, *paths): dirn = self.getModDir() return s_common.genpath(dirn, *paths)
Construct a path relative to this module's working directory. Args: *paths: A list of path strings Notes: This creates the module specific directory if it does not exist. Returns: (str): The full path (or None if no cortex dir is configured).
codesearchnet
def _get_full_signature_list(self): return self._interpreter.GetSignatureDefs()
Gets list of SignatureDefs in the model. Example, ``` signatures = interpreter._get_full_signature_list() print(signatures) # { # 'add': {'inputs': {'x': 1, 'y': 0}, 'outputs': {'output_0': 4}} # } Then using the names in the signature list you can get a callable from get_signature_runner(). ``` Returns: A list of SignatureDef details in a dictionary structure. It is keyed on the SignatureDef method name, and the value holds dictionary of inputs and outputs.
github-repos
def CopyFrom(self, other_msg): if (self is other_msg): return self.Clear() self.MergeFrom(other_msg)
Copies the content of the specified message into the current message. The method clears the current message and then merges the specified message using MergeFrom. Args: other_msg: Message to copy into the current one.
codesearchnet
def _preprocess_sqlite_index(asql_query, library, backend, connection): new_query = None if asql_query.strip().lower().startswith('index'): logger.debug( '_preprocess_index: create index query found.\n asql query: {}' .format(asql_query)) index = parse_index(asql_query) partition = library.partition(index.source) table = backend.install(connection, partition, materialize=True) index_name = '{}_{}_ind'.format(partition.vid, '_'.join(index.columns)) new_query = 'CREATE INDEX IF NOT EXISTS {index} ON {table} ({columns});'.format( index=index_name, table=table, columns=','.join(index.columns)) logger.debug( '_preprocess_index: preprocess finished.\n asql query: {}\n new query: {}' .format(asql_query, new_query)) return new_query or asql_query
Creates materialized view for each indexed partition found in the query. Args: asql_query (str): asql query library (ambry.Library): backend (SQLiteBackend): connection (apsw.Connection): Returns: str: converted asql if it contains index query. If not, returns asql_query as is.
juraj-google-style
def load(self): from scipy.io import netcdf_file from scipy import interpolate import numpy as np f = netcdf_file(self.input_file) out = dict() lats = f.variables['lat'][:].copy() lons = f.variables['lon'][:].copy() out['data'] = np.roll(f.variables[self.variable_name][(:, :, :)].copy(), shift=(len(lons) lons = np.roll(lons, shift=(len(lons) lons[(lons > 180)] -= 360 out['data'] = np.ma.array(out['data']) out['data'][(out['data'] < (- 1000000.0))] = np.ma.masked out['lat_idx'] = interpolate.interp1d(x=lats, y=np.arange(len(lats))) out['lon_idx'] = interpolate.interp1d(x=lons, y=np.arange(len(lons))) f.close() return out
Load the climate data as a map Returns: dict: {data: masked 3D numpy array containing climate data per month (first axis), lat_idx: function converting a latitude to the (fractional) row index in the map, lon_idx: function converting a longitude to the (fractional) column index in the map}
codesearchnet