code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def help(self, print_output=True): help_text = self._rpc('help') if print_output: print(help_text) else: return help_text
Calls the help RPC, which returns the list of RPC calls available. This RPC should normally be used in an interactive console environment where the output should be printed instead of returned. Otherwise, newlines will be escaped, which will make the output difficult to read. Args: print_output: A bool for whether the output should be printed. Returns: A str containing the help output otherwise None if print_output wasn't set.
codesearchnet
def surface_velocity(msg): if common.typecode(msg) < 5 or common.typecode(msg) > 8: raise RuntimeError("%s: Not a surface message, expecting 5<TC<8" % msg) mb = common.hex2bin(msg)[32:] trk_status = int(mb[12]) if trk_status == 1: trk = common.bin2int(mb[13:20]) * 360.0 / 128.0 trk = round(trk, 1) else: trk = None mov = common.bin2int(mb[5:12]) if mov == 0 or mov > 124: spd = None elif mov == 1: spd = 0 elif mov == 124: spd = 175 else: movs = [2, 9, 13, 39, 94, 109, 124] kts = [0.125, 1, 2, 15, 70, 100, 175] i = next(m[0] for m in enumerate(movs) if m[1] > mov) step = (kts[i] - kts[i-1]) * 1.0 / (movs[i]-movs[i-1]) spd = kts[i-1] + (mov-movs[i-1]) * step spd = round(spd, 2) return spd, trk, 0, 'GS'
Decode surface velocity from from a surface position message Args: msg (string): 28 bytes hexadecimal message string Returns: (int, float, int, string): speed (kt), ground track (degree), rate of climb/descend (ft/min), and speed type ('GS' for ground speed, 'AS' for airspeed)
juraj-google-style
def __init__(self, key_dtype, value_dtype, default_value, name='MutableHashTable', checkpoint=True, experimental_is_anonymous=False): self._default_value = ops.convert_to_tensor(default_value, dtype=value_dtype) self._value_shape = self._default_value.get_shape() self._checkpoint = checkpoint self._key_dtype = key_dtype self._value_dtype = value_dtype self._name = name self._is_anonymous = experimental_is_anonymous if not self._is_anonymous: self._shared_name = None if context.executing_eagerly(): self._shared_name = 'table_%d' % (ops.uid(),) super(MutableHashTable, self).__init__(key_dtype, value_dtype) self._resource_handle = self._create_resource() if checkpoint: saveable = MutableHashTable._Saveable(self, name) if not context.executing_eagerly(): ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
Creates an empty `MutableHashTable` object. Creates a table, the type of its keys and values are specified by key_dtype and value_dtype, respectively. Args: key_dtype: the type of the key tensors. value_dtype: the type of the value tensors. default_value: The value to use if a key is missing in the table. name: A name for the operation (optional). checkpoint: if True, the contents of the table are saved to and restored from checkpoints. If `shared_name` is empty for a checkpointed table, it is shared using the table node name. experimental_is_anonymous: Whether to use anonymous mode for the table (default is False). In anonymous mode, the table resource can only be accessed via a resource handle. It can't be looked up by a name. When all resource handles pointing to that resource are gone, the resource will be deleted automatically. Returns: A `MutableHashTable` object. Raises: ValueError: If checkpoint is True and no name was specified.
github-repos
def walk(self, walker): def walk_func(step): for dep in self.graph.downstream(step.name): if (not dep.ok): step.set_status(FailedStatus('dependency has failed')) return step.ok return step.run() return self.graph.walk(walker, walk_func)
Walks each step in the underlying graph, in topological order. Args: walker (func): a walker function to be passed to :class:`stacker.dag.DAG` to walk the graph.
codesearchnet
def get_output_dict(stack): outputs = {} if 'Outputs' not in stack: return outputs for output in stack['Outputs']: logger.debug(" %s %s: %s", stack['StackName'], output['OutputKey'], output['OutputValue']) outputs[output['OutputKey']] = output['OutputValue'] return outputs
Returns a dict of key/values for the outputs for a given CF stack. Args: stack (dict): The stack object to get outputs from. Returns: dict: A dictionary with key/values for each output on the stack.
juraj-google-style
def create_sonos_playlist(self, title): response = self.avTransport.CreateSavedQueue([ ('InstanceID', 0), ('Title', title), ('EnqueuedURI', ''), ('EnqueuedURIMetaData', ''), ]) item_id = response['AssignedObjectID'] obj_id = item_id.split(':', 2)[1] uri = "file: res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")] return DidlPlaylistContainer( resources=res, title=title, parent_id='SQ:', item_id=item_id)
Create a new empty Sonos playlist. Args: title: Name of the playlist :rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
juraj-google-style
class TvpLoss(nn.Module): def __init__(self, losses): super().__init__() self.loss_map = {'iou': self.loss_iou, 'distance': self.loss_distance, 'duration': self.loss_duration} for loss in losses: if loss not in self.loss_map: raise ValueError(f'Loss {loss} not supported') self.losses = losses def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time) union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time) iou = 1 - inter.clamp(min=0) / union return iou def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0) mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0) distance_diff = torch.div(torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration).clamp(min=0.2) return distance_diff def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): duration_candidates = torch.sub(candidates_end_time, candidates_start_time) duration_groundtruth = torch.sub(end_time, start_time) duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration)) duration_diff = duration_diff.clamp(min=0.4) return duration_diff def forward(self, logits, labels): duration, start_time, end_time = labels candidates = torch.mul(logits, duration) candidates_start_time, candidates_end_time = (candidates[:, 0].float(), candidates[:, 1].float()) losses_dict = {} for loss in self.losses: losses_dict.update({loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)}) return losses_dict
This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: losses (`List[str]`): List of all the losses to be applied.
github-repos
def diff(self, a_ref, target=None, b_ref=None): result = {} diff_dct = self.scm.get_diff_trees(a_ref, b_ref=b_ref) result[DIFF_A_REF] = diff_dct[DIFF_A_REF] result[DIFF_B_REF] = diff_dct[DIFF_B_REF] if diff_dct[DIFF_EQUAL]: result[DIFF_EQUAL] = True return result result[DIFF_LIST] = [] diff_outs = _get_diff_outs(self, diff_dct) if (target is None): result[DIFF_LIST] = [_diff_royal(self, path, diff_outs[path]) for path in diff_outs] elif (target in diff_outs): result[DIFF_LIST] = [_diff_royal(self, target, diff_outs[target])] else: msg = "Have not found file/directory '{}' in the commits" raise FileNotInCommitError(msg.format(target)) return result
Gerenates diff message string output Args: target(str) - file/directory to check diff of a_ref(str) - first tag (optional) b_ref(str) - second git tag Returns: string: string of output message with diff info
codesearchnet
def get_input(self, value, _search=None): if _search is None: if isinstance(value, string_types): _search = lambda s: s.name elif isinstance(value, type): _search = type for i in self.inputs: step = i.get_input(value, _search) if step is not None: return step if _search(self) == value: return self
Searches the tree for a step Args: value: The value to search for. If value is a string then the search looks for a step of that name. If the value is a type, it looks for a step of that type. Returns: The first step found via a depth-first search.
juraj-google-style
def send(self, conn): if conn is None: raise ValueError("Cannot send to connection None") with (yield conn.write_lock.acquire()): sent = 0 yield conn.write_message(self.header_json, locked=False) sent += len(self.header_json) yield conn.write_message(self.metadata_json, locked=False) sent += len(self.metadata_json) yield conn.write_message(self.content_json, locked=False) sent += len(self.content_json) sent += yield self.write_buffers(conn, locked=False) raise gen.Return(sent)
Send the message on the given connection. Args: conn (WebSocketHandler) : a WebSocketHandler to send messages Returns: int : number of bytes sent
juraj-google-style
def generate(self, information, timeout=(- 1)): return self._client.create(information, timeout=timeout)
Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients. Args: information (dict): Information to generate the certificate for RabbitMQ clients. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: RabbitMQ certificate generated
codesearchnet
async def disconnect(self, conn_id): self._ensure_connection(conn_id, True) dev = self._get_property(conn_id, 'device') dev.connected = False self._teardown_connection(conn_id)
Asynchronously disconnect from a connected device Args: conn_id (int): A unique identifier that will refer to this connection callback (callback): A callback that will be called as callback(conn_id, adapter_id, success, failure_reason)
codesearchnet
def get_predicted_structure(self, structure, icsd_vol=False): new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, icsd_vol=icsd_vol)) return new_structure
Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume Returns: a Structure object with predicted volume
juraj-google-style
def truncate(self, new_count): self.posterior_state_estimates = self.posterior_state_estimates[:new_count] self.prior_state_estimates = self.prior_state_estimates[:new_count] self.measurements = self.measurements[:new_count] self.process_matrices = self.process_matrices[:new_count] self.process_covariances = self.process_covariances[:new_count]
Truncate the filter as if only *new_count* :py:meth:`.predict`, :py:meth:`.update` steps had been performed. If *new_count* is greater than :py:attr:`.state_count` then this function is a no-op. Measurements, state estimates, process matrices and process noises which are truncated are discarded. Args: new_count (int): Number of states to retain.
juraj-google-style
def __init__(self, name, **kwargs): if enabled: self._traceme = _pywrap_traceme.TraceMe(name, **kwargs) else: self._traceme = None
Creates a trace event in the profiler. Args: name: The name of the trace event. **kwargs: Keyword arguments added to the trace event. Both the key and value are of types that can be converted to strings, which will be interpreted by the profiler according to the traceme name. Example usage: ```python tf.profiler.experimental.start('logdir') for step in range(num_steps): # Creates a trace event for each training step with the # step number. with tf.profiler.experimental.Trace("Train", step_num=step): train_fn() tf.profiler.experimental.stop() ``` The example above uses the keyword argument "step_num" to specify the training step being traced.
github-repos
def BuildServiceStub(self, cls): def _ServiceStubInit(stub, rpc_channel): stub.rpc_channel = rpc_channel self.cls = cls cls.__init__ = _ServiceStubInit for method in self.descriptor.methods: setattr(cls, method.name, self._GenerateStubMethod(method))
Constructs the stub class. Args: cls: The class that will be constructed.
juraj-google-style
async def send_script(self, conn_id, data): self._ensure_connection(conn_id, True) dev = self._get_property(conn_id, 'device') conn_string = self._get_property(conn_id, 'connection_string') await self.notify_progress(conn_string, 'script', 0, len(data)) await self.notify_progress(conn_string, 'script', len(data) await self.notify_progress(conn_string, 'script', len(data), len(data)) dev.script = data
Asynchronously send a a script to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection data (bytes or bytearray): the script to send to the device
juraj-google-style
def start_cluster_server(ctx, num_gpus=1, rdma=False): import tensorflow as tf from . import gpu_info logging.info('{0}: ======== {1}:{2} ========'.format(ctx.worker_num, ctx.job_name, ctx.task_index)) cluster_spec = ctx.cluster_spec logging.info('{0}: Cluster spec: {1}'.format(ctx.worker_num, cluster_spec)) if (tf.test.is_built_with_cuda() and (num_gpus > 0)): my_addr = cluster_spec[ctx.job_name][ctx.task_index] my_host = my_addr.split(':')[0] flattened = [v for sublist in cluster_spec.values() for v in sublist] local_peers = [p for p in flattened if p.startswith(my_host)] my_index = local_peers.index(my_addr) gpu_initialized = False retries = 3 while ((not gpu_initialized) and (retries > 0)): try: if (ctx.job_name == 'ps'): num_gpus = 1 gpus_to_use = gpu_info.get_gpus(num_gpus, my_index) gpu_prompt = ('GPU' if (num_gpus == 1) else 'GPUs') logging.info('{0}: Using {1}: {2}'.format(ctx.worker_num, gpu_prompt, gpus_to_use)) os.environ['CUDA_VISIBLE_DEVICES'] = gpus_to_use cluster = tf.train.ClusterSpec(cluster_spec) if rdma: server = tf.train.Server(cluster, ctx.job_name, ctx.task_index, protocol='grpc+verbs') else: server = tf.train.Server(cluster, ctx.job_name, ctx.task_index) gpu_initialized = True except Exception as e: print(e) logging.error('{0}: Failed to allocate GPU, trying again...'.format(ctx.worker_num)) retries -= 1 time.sleep(10) if (not gpu_initialized): raise Exception('Failed to allocate GPU') else: os.environ['CUDA_VISIBLE_DEVICES'] = '' logging.info('{0}: Using CPU'.format(ctx.worker_num)) cluster = tf.train.ClusterSpec(cluster_spec) server = tf.train.Server(cluster, ctx.job_name, ctx.task_index) return (cluster, server)
Function that wraps the creation of TensorFlow ``tf.train.Server`` for a node in a distributed TensorFlow cluster. This is intended to be invoked from within the TF ``map_fun``, replacing explicit code to instantiate ``tf.train.ClusterSpec`` and ``tf.train.Server`` objects. Args: :ctx: TFNodeContext containing the metadata specific to this node in the cluster. :num_gpu: number of GPUs desired :rdma: boolean indicating if RDMA 'iverbs' should be used for cluster communications. Returns: A tuple of (cluster_spec, server)
codesearchnet
def register_event(self, direction, verb, child_fn, priority=10): event_managers = [] if direction in ('in', 'both'): event_managers.append(self._events_in) if direction in ('out', 'both'): event_managers.append(self._events_out) if direction == 'girc': event_managers.append(self._girc_events) for event_manager in event_managers: event_manager.register(verb, child_fn, priority=priority)
Register an event with all servers. Args: direction (str): `in`, `out`, `both`, or `girc`. verb (str): Event name, `all`, or `raw`. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first). Note: `all` will not match `raw` events. If you wish to receive both `raw` and all other events, you need to register these separately.
juraj-google-style
def AFF4Path(self, client_urn): if not self.HasField("pathtype"): raise ValueError("Can't determine AFF4 path without a valid pathtype.") first_component = self[0] dev = first_component.path if first_component.HasField("offset"): dev += ":{}".format(first_component.offset if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and self[1].pathtype == PathSpec.PathType.TSK): result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev] start = 1 else: result = [self.AFF4_PREFIXES[first_component.pathtype]] start = 0 for p in self[start]: component = p.path if p.HasField("offset"): component += ":{}".format(p.offset if p.HasField("stream_name"): component += ":" + p.stream_name result.append(component) return client_urn.Add("/".join(result))
Returns the AFF4 URN this pathspec will be stored under. Args: client_urn: A ClientURN. Returns: A urn that corresponds to this pathspec. Raises: ValueError: If pathspec is not of the correct type.
juraj-google-style
def default_storable(python_type, exposes=None, version=None, storable_type=None, peek=default_peek): if not exposes: for extension in expose_extensions: try: exposes = extension(python_type) except (SystemExit, KeyboardInterrupt): raise except: pass else: if exposes: break if not exposes: raise AttributeError('`exposes` required for type: {!r}'.format(python_type)) return Storable(python_type, key=storable_type, \ handlers=StorableHandler(version=version, exposes=exposes, \ poke=poke(exposes), peek=peek(python_type, exposes)))
Default mechanics for building the storable instance for a type. Arguments: python_type (type): type. exposes (iterable): attributes exposed by the type. version (tuple): version number. storable_type (str): universal string identifier for the type. peek (callable): peeking routine. Returns: Storable: storable instance.
juraj-google-style
def export_model(self, export_formats, export_dir=None): export_dir = export_dir or self.logdir return self._export_model(export_formats, export_dir)
Exports model based on export_formats. Subclasses should override _export_model() to actually export model to local directory. Args: export_formats (list): List of formats that should be exported. export_dir (str): Optional dir to place the exported model. Defaults to self.logdir. Return: A dict that maps ExportFormats to successfully exported models.
juraj-google-style
def fill_tree(self, tree, input_dict): def removeAll(tree): if tree.model().rowCount() > 0: for i in range(0, tree.model().rowCount()): item = tree.model().item(i) del item tree.model().removeRows(0, tree.model().rowCount()) tree.model().reset() def add_probe(tree, instrument, probes): item = QtGui.QStandardItem(instrument) item.setEditable(False) for probe in probes.split(','): child_name = QtGui.QStandardItem(probe) child_name.setDragEnabled(True) child_name.setSelectable(True) child_name.setEditable(False) item.appendRow(child_name) tree.model().appendRow(item) removeAll(tree) for index, (instrument, probes) in enumerate(input_dict.items()): add_probe(tree, instrument, probes) tree.expandAll()
fills a tree with nested parameters Args: tree: QtGui.QTreeView parameters: dictionary or Parameter object Returns:
juraj-google-style
def github_belspec_files(spec_dir, force: bool=False): if (not force): dtnow = datetime.datetime.utcnow() delta = datetime.timedelta(1) yesterday = (dtnow - delta) for fn in glob.glob(f'{spec_dir}/bel*yaml'): if (datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday): log.info('Skipping BEL Specification update - specs less than 1 day old') return repo_url = 'https: params = {} github_access_token = os.getenv('GITHUB_ACCESS_TOKEN', '') if github_access_token: params = {'access_token': github_access_token} r = requests.get(repo_url, params=params) if (r.status_code == 200): results = r.json() for f in results: url = f['download_url'] fn = os.path.basename(url) if (('yaml' not in fn) and ('yml' in fn)): fn = fn.replace('yml', 'yaml') r = requests.get(url, params=params, allow_redirects=True) if (r.status_code == 200): open(f'{spec_dir}/{fn}', 'wb').write(r.content) else: sys.exit(f'Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}') else: sys.exit(f'Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}')
Get belspec files from Github repo Args: spec_dir: directory to store the BEL Specification and derived files force: force update of BEL Specifications from Github - skipped if local files less than 1 day old
codesearchnet
def get_obj(self, objpath, metahash, dst_path): incachepath = self.path_in_cache(objpath, metahash) if (not os.path.exists(incachepath)): raise CacheMiss(('%s not in cache.' % incachepath)) else: log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest()) if (not os.path.exists(os.path.dirname(dst_path))): os.makedirs(os.path.dirname(dst_path)) os.link(incachepath, dst_path)
Get object from cache, write it to dst_path. Args: objpath: filename relative to buildroot (example: mini-boot/blahblah/somefile.bin) metahash: metahash. See targets/base.py dst_path: Absolute path where the file should be written. Raises: CacheMiss: if the item is not in the cache
codesearchnet
def run(self, resources): hwman = resources['connection'] updater = hwman.hwman.app(name='device_updater') updater.run_script(self._script, no_reboot=self._no_reboot)
Actually send the trub script. Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step.
codesearchnet
def run_inference(self, batch: Sequence[numpy.ndarray], inference_session: ort.InferenceSession, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]: predictions = self._model_inference_fn(inference_session, batch, inference_args) return utils._convert_to_result(batch, predictions)
Runs inferences on a batch of numpy arrays. Args: batch: A sequence of examples as numpy arrays. They should be single examples. inference_session: An onnx inference session. Must be runnable with input x where x is sequence of numpy array inference_args: Any additional arguments for an inference. Returns: An Iterable of type PredictionResult.
github-repos
def mt_report(context, case_id, test, outpath=None): LOG.info('exporting mitochondrial variants for case "{}"'.format(case_id)) adapter = context.obj['adapter'] query = {'chrom': 'MT'} case_obj = adapter.case(case_id=case_id) if (not case_obj): LOG.warning('Could not find a scout case with id "{}". No report was created.'.format(case_id)) context.abort() samples = case_obj.get('individuals') mt_variants = list(adapter.variants(case_id=case_id, query=query, nr_of_variants=(- 1), sort_key='position')) if (not mt_variants): LOG.warning('There are no MT variants associated to case {} in database!'.format(case_id)) context.abort() today = datetime.datetime.now().strftime('%Y-%m-%d') if (not outpath): outpath = str(os.getcwd()) written_files = 0 for sample in samples: sample_id = sample['individual_id'] sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id) document_name = ('.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx') workbook = Workbook(os.path.join(outpath, document_name)) Report_Sheet = workbook.add_worksheet() if (test and sample_lines and workbook): written_files += 1 continue row = 0 for (col, field) in enumerate(MT_EXPORT_HEADER): Report_Sheet.write(row, col, field) for (row, line) in enumerate(sample_lines, 1): for (col, field) in enumerate(line): Report_Sheet.write(row, col, field) workbook.close() if os.path.exists(os.path.join(outpath, document_name)): written_files += 1 if test: LOG.info('Number of excel files that can be written to folder {0}: {1}'.format(outpath, written_files)) else: LOG.info('Number of excel files written to folder {0}: {1}'.format(outpath, written_files)) return written_files
Export all mitochondrial variants for each sample of a case and write them to an excel file Args: adapter(MongoAdapter) case_id(str) test(bool): True if the function is called for testing purposes outpath(str): path to output file Returns: written_files(int): number of written or simulated files
codesearchnet
def Create(self, request, global_params=None): config = self.GetMethodConfig('Create') return self._RunMethod(config, request, global_params=global_params)
Creates a new `BuildTrigger`. This API is experimental. Args: request: (CloudbuildProjectsTriggersCreateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BuildTrigger) The response message.
github-repos
def _country_level_time_zones_for_number(numobj): cc = str(numobj.country_code) for prefix_len in range(TIMEZONE_LONGEST_PREFIX, 0, -1): prefix = cc[:(1 + prefix_len)] if prefix in TIMEZONE_DATA: return TIMEZONE_DATA[prefix] return _UNKNOWN_TIME_ZONE_LIST
Returns the list of time zones corresponding to the country calling code of a number. Arguments: numobj -- the phone number to look up Returns a list of the corresponding time zones or a single element list with the default unknown time zone if no other time zone was found or if the number was invalid
juraj-google-style
def _parse_list(cls, args): argparser = ArgumentParser(prog="cluster list") group = argparser.add_mutually_exclusive_group() group.add_argument("--id", dest="cluster_id", help="show cluster with this id") group.add_argument("--label", dest="label", help="show cluster with this label") group.add_argument("--state", dest="state", action="store", choices=['up', 'down', 'pending', 'terminating'], help="list only clusters in the given state") pagination_group = group.add_argument_group() pagination_group.add_argument("--page", dest="page", action="store", type=int, help="page number") pagination_group.add_argument("--per-page", dest="per_page", action="store", type=int, help="number of clusters to be retrieved per page") arguments = argparser.parse_args(args) return vars(arguments)
Parse command line arguments to construct a dictionary of cluster parameters that can be used to determine which clusters to list. Args: `args`: sequence of arguments Returns: Dictionary that can be used to determine which clusters to list
juraj-google-style
def verified(context, collaborator, test, outpath=None): written_files = 0 collaborator = (collaborator or 'cust000') LOG.info('Exporting verified variants for cust {}'.format(collaborator)) adapter = context.obj['adapter'] verified_vars = adapter.verified(institute_id=collaborator) LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator)) if (not verified_vars): LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator)) return None document_lines = export_verified_variants(verified_vars) today = datetime.datetime.now().strftime('%Y-%m-%d') document_name = ('.'.join(['verified_variants', collaborator, today]) + '.xlsx') if (test and document_lines): written_files += 1 LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines))) return written_files if (not outpath): outpath = str(os.getcwd()) workbook = Workbook(os.path.join(outpath, document_name)) Report_Sheet = workbook.add_worksheet() row = 0 for (col, field) in enumerate(VERIFIED_VARIANTS_HEADER): Report_Sheet.write(row, col, field) for (row, line) in enumerate(document_lines, 1): for (col, field) in enumerate(line): Report_Sheet.write(row, col, field) workbook.close() if os.path.exists(os.path.join(outpath, document_name)): LOG.info('Success. Verified variants file of {} lines was written to disk'.format(len(document_lines))) written_files += 1 return written_files
Export variants which have been verified for an institute and write them to an excel file. Args: collaborator(str): institute id test(bool): True if the function is called for testing purposes outpath(str): path to output file Returns: written_files(int): number of written or simulated files
codesearchnet
def most_frequent_terms(self, depth): counts = self.term_counts() top_terms = set(list(counts.keys())[:depth]) end_count = list(counts.values())[:depth][-1] bucket = self.term_count_buckets()[end_count] return top_terms.union(set(bucket))
Get the X most frequent terms in the text, and then probe down to get any other terms that have the same count as the last term. Args: depth (int): The number of terms. Returns: set: The set of frequent terms.
juraj-google-style
def encrypt(self, mesg): seqn = next(self._tx_sn) rv = self._tx_tinh.enc(s_msgpack.en((seqn, mesg))) return rv
Wrap a message with a sequence number and encrypt it. Args: mesg: The mesg to encrypt. Returns: bytes: The encrypted message.
juraj-google-style
def __init__(self, output_mediator): super(ElasticsearchOutputModule, self).__init__(output_mediator) self._raw_fields = False
Initializes an Elasticsearch output module. Args: output_mediator (OutputMediator): mediates interactions between output modules and other components, such as storage and dfvfs.
juraj-google-style
def _create_object_from_type_and_dict(cls, obj_dict): value = object.__new__(cls) value.__dict__.update(obj_dict) return value
Creates an object, bypassing the constructor. Creates an object of type `cls`, whose `__dict__` is updated to contain `obj_dict`. Args: cls: The type of the new object. obj_dict: A `Mapping` that should be used to initialize the new object's `__dict__`. Returns: An object of type `cls`.
github-repos
def ice_register_write(self, register_index, value, delay=False): self._dll.JLINKARM_WriteICEReg(register_index, int(value), int(delay)) return None
Writes a value to an ARM ICE register. Args: self (JLink): the ``JLink`` instance register_index (int): the ICE register to write to value (int): the value to write to the ICE register delay (bool): boolean specifying if the write should be delayed Returns: ``None``
codesearchnet
def _get_parser_call_method(self, parser_to_method): def inner_call(args=None, instance=None): parser = self._cls.parser namespace = parser.parse_args(_get_args_to_parse(args, sys.argv)) if instance is None: if "__init__" not in parser_to_method: raise ParseThisError(("'__init__' method is not decorated. " "Please provide an instance to " "'{}.parser.call' or decorate the " "'__init___' method with " "'create_parser'" .format(self._cls.__name__))) instance = _call_method_from_namespace(self._cls, "__init__", namespace) method_name = parser_to_method[namespace.method] return _call_method_from_namespace(instance, method_name, namespace) return inner_call
Return the parser special method 'call' that handles sub-command calling. Args: parser_to_method: mapping of the parser registered name to the method it is linked to
juraj-google-style
def lookup_entity(self, entity=None): if (self._lookuptype == 'clublogxml'): entity = int(entity) if (entity in self._entities): return self._strip_metadata(self._entities[entity]) else: raise KeyError elif (self._lookuptype == 'redis'): if (self._redis_prefix is None): raise KeyError('redis_prefix is missing') json_data = self._redis.get(((self._redis_prefix + '_entity_') + str(entity))) if (json_data is not None): my_dict = self._deserialize_data(json_data) return self._strip_metadata(my_dict) elif (self._lookuptype == 'qrz'): result = self._lookup_qrz_dxcc(entity, self._apikey) return result raise KeyError
Returns lookup data of an ADIF Entity Args: entity (int): ADIF identifier of country Returns: dict: Dictionary containing the country specific data Raises: KeyError: No matching entity found Example: The following code queries the the Clublog XML database for the ADIF entity Turkmenistan, which has the id 273. >>> from pyhamtools import LookupLib >>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey") >>> print my_lookuplib.lookup_entity(273) { 'deleted': False, 'country': u'TURKMENISTAN', 'longitude': 58.4, 'cqz': 17, 'prefix': u'EZ', 'latitude': 38.0, 'continent': u'AS' } Note: This method is available for the following lookup type - clublogxml - redis - qrz.com
codesearchnet
def as_data_frame(self, **kwargs): try: import pandas as pd except ImportError: raise ImportError("What are you doing trying to export a Layout " "as a pandas DataFrame when you don't have " "pandas installed? Eh? Eh?") if kwargs: files = self.get(return_type='obj', **kwargs) else: files = self.files.values() data = pd.DataFrame.from_records([f.entities for f in files]) data.insert(0, 'path', [f.path for f in files]) return data
Return information for all Files tracked in the Layout as a pandas DataFrame. Args: kwargs: Optional keyword arguments passed on to get(). This allows one to easily select only a subset of files for export. Returns: A pandas DataFrame, where each row is a file, and each column is a tracked entity. NaNs are injected whenever a file has no value for a given attribute.
juraj-google-style
def ProcessMessage(self, message): cert = rdf_crypto.Certificate(message.payload) queue = self.well_known_session_id.Queue() client_id = message.source try: enrolment_cache.Get(client_id) return except KeyError: enrolment_cache.Put(client_id, 1) if data_store.AFF4Enabled(): client = aff4.FACTORY.Create( client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token) client_cert = client.Get(client.Schema.CERT) if data_store.RelationalDBEnabled(): try: md = data_store.REL_DB.ReadClientMetadata(client_id.Basename()) client_cert = md.certificate except db.UnknownClientError: client_cert = None if data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientMetadata( client_id.Basename(), fleetspeak_enabled=False) if not client_cert: flow.StartAFF4Flow( client_id=client_id, flow_name=CAEnroler.__name__, csr=cert, queue=queue, token=self.token)
Begins an enrollment flow for this client. Args: message: The Certificate sent by the client. Note that this message is not authenticated.
juraj-google-style
def delete(self, entity): key = _normalize_key(entity) if (key is None): return self.ndb_delete(entity) self.deletes.append(key)
Registers entity to delete from datastore. Args: entity: an entity, model instance, or key to delete.
codesearchnet
def baby_names(max_length=15): names = [] lengths = [] targets = [] with open(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'baby_names.csv'), 'rb') as f: first = True for l in csv.reader(f, delimiter=','): if first: first = False continue assert (len(l) == 4), l name = l[0] if (max_length < len(name)): raise ValueError(('Max length is too small: %d > %d' % (max_length, len(name)))) chars = [convert_to_int(c) for c in name] names.append((chars + ([EOS] * (max_length - len(chars))))) lengths.append([len(name)]) values = [float(l[2]), float(l[3])] if (abs((sum(values) - 1)) > 0.001): raise ValueError(('Each row must sum to 1: %s' % l)) targets.append(values) return (np.array(names), np.array(targets), np.array(lengths))
Opens the baby_names csv file and produces numpy array. Args: max_length: The maximum length, 15 was the longest name when this was written. Short entries will be padded with the EOS marker. Returns: A numpy array of the names converted to ascii codes, the labels and an array of lengths. Raises: ValueError: if max_length is too small.
codesearchnet
def push_datapackage(descriptor, backend, **backend_options): warnings.warn('Functions "push/pull_datapackage" are deprecated. Please use "Package" class', UserWarning) tables = [] schemas = [] datamap = {} mapping = {} model = Package(descriptor) plugin = import_module(('jsontableschema.plugins.%s' % backend)) storage = plugin.Storage(**backend_options) for resource in model.resources: if (not resource.tabular): continue name = resource.descriptor.get('name', None) table = _convert_path(resource.descriptor['path'], name) schema = resource.descriptor['schema'] data = resource.table.iter(keyed=True) def values(schema, data): for item in data: row = [] for field in schema['fields']: row.append(item.get(field['name'], None)) (yield tuple(row)) tables.append(table) schemas.append(schema) datamap[table] = values(schema, data) if (name is not None): mapping[name] = table schemas = _convert_schemas(mapping, schemas) for table in tables: if (table in storage.buckets): storage.delete(table) storage.create(tables, schemas) for table in storage.buckets: if (table in datamap): storage.write(table, datamap[table]) return storage
Push Data Package to storage. All parameters should be used as keyword arguments. Args: descriptor (str): path to descriptor backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs
codesearchnet
def dismiss(self, targets, exit_when=None, sleep_interval=0.5, appearance_timeout=20, timeout=120): try: self.wait_for_any(targets, timeout=appearance_timeout) except PocoTargetTimeout: warnings.warn('Waiting timeout when trying to dismiss something before them appear. Targets are {}'.encode('utf-8').format(targets)) return start_time = time.time() while True: no_target = True for t in targets: if t.exists(): try: for n in t: try: n.click(sleep_interval=sleep_interval) no_target = False except: pass except: pass time.sleep(sleep_interval) should_exit = (exit_when() if exit_when else False) if (no_target or should_exit): return if ((time.time() - start_time) > timeout): raise PocoTargetTimeout('dismiss', targets)
Automatically dismiss the target objects Args: targets (:obj:`list`): list of poco objects to be dropped exit_when: termination condition, default is None which means to automatically exit when list of ``targets`` is empty sleep_interval: time interval between each actions for the given targets, default is 0.5s appearance_timeout: time interval to wait for given target to appear on the screen, automatically exit when timeout, default is 20s timeout: dismiss function timeout, default is 120s Raises: PocoTargetTimeout: when dismiss time interval timeout, under normal circumstances, this should not happen and if happens, it will be reported
codesearchnet
def prefer_static_broadcast_shape(shape1, shape2, name='prefer_static_broadcast_shape'): with ops.name_scope(name, values=[shape1, shape2]): def make_shape_tensor(x): return ops.convert_to_tensor(x, name='shape', dtype=dtypes.int32) def get_tensor_shape(s): if isinstance(s, tensor_shape.TensorShape): return s s_ = tensor_util.constant_value(make_shape_tensor(s)) if s_ is not None: return tensor_shape.TensorShape(s_) return None def get_shape_tensor(s): if not isinstance(s, tensor_shape.TensorShape): return make_shape_tensor(s) if s.is_fully_defined(): return make_shape_tensor(s.as_list()) raise ValueError('Cannot broadcast from partially defined `TensorShape`.') shape1_ = get_tensor_shape(shape1) shape2_ = get_tensor_shape(shape2) if shape1_ is not None and shape2_ is not None: return array_ops.broadcast_static_shape(shape1_, shape2_) shape1_ = get_shape_tensor(shape1) shape2_ = get_shape_tensor(shape2) return array_ops.broadcast_dynamic_shape(shape1_, shape2_)
Convenience function which statically broadcasts shape when possible. Args: shape1: `1-D` integer `Tensor`. Already converted to tensor! shape2: `1-D` integer `Tensor`. Already converted to tensor! name: A string name to prepend to created ops. Returns: The broadcast shape, either as `TensorShape` (if broadcast can be done statically), or as a `Tensor`.
github-repos
def _package_to_staging(staging_package_url): import google.datalab.ml as ml package_root = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../')) setup_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'master_setup.py')) tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz') print('Building package and uploading to %s' % tar_gz_path) ml.package_and_copy(package_root, setup_path, tar_gz_path) return tar_gz_path
Repackage this package from local installed location and copy it to GCS. Args: staging_package_url: GCS path.
juraj-google-style
def get_column(self, X, column): if isinstance(X, pd.DataFrame): return X[column].values return X[(:, column)]
Return a column of the given matrix. Args: X: `numpy.ndarray` or `pandas.DataFrame`. column: `int` or `str`. Returns: np.ndarray: Selected column.
codesearchnet
def outer(vector1, vector2=None): if vector2 is None: vector2 = np.array(vector1).conj() else: vector2 = np.array(vector2).conj() return np.outer(vector1, vector2)
Construct the outer product of two vectors. The second vector argument is optional, if absent the projector of the first vector will be returned. Args: vector1 (ndarray): the first vector. vector2 (ndarray): the (optional) second vector. Returns: np.array: The matrix |v1><v2|.
juraj-google-style
def from_api(cls, **kwargs): vals = cls.get_non_empty_vals({cls._to_snake_case(k): v for (k, v) in kwargs.items()}) remove = [] for (attr, val) in vals.items(): try: vals[attr] = cls._parse_property(attr, val) except HelpScoutValidationException: remove.append(attr) logger.info('Unexpected property received in API response', exc_info=True) for attr in remove: del vals[attr] return cls(**cls.get_non_empty_vals(vals))
Create a new instance from API arguments. This will switch camelCase keys into snake_case for instantiation. It will also identify any ``Instance`` or ``List`` properties, and instantiate the proper objects using the values. The end result being a fully Objectified and Pythonified API response. Returns: BaseModel: Instantiated model using the API values.
codesearchnet
def unpack(self, buff, offset=0): begin = offset for name, value in self.get_class_attributes(): if type(value).__name__ != "Header": size = self._unpack_attribute(name, value, buff, begin) begin += size
Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. It is an inplace method and it receives the binary data of the message **without the header**. Args: buff (bytes): Binary data package to be unpacked, without the header. offset (int): Where to begin unpacking.
juraj-google-style
def _get_def_class(self, class_obj, member_name): member_obj = getattr(class_obj, member_name) for def_class_obj in inspect.getmro(class_obj): if (member_name in def_class_obj.__dict__): if (def_class_obj.__name__ in self._excluded_classes): return class_obj return def_class_obj self._logger.warning('%s: Definition class not found for member %s.%s, defaulting to class %s', self._log_prefix, class_obj.__name__, member_name, class_obj.__name__) return class_obj
Return the class object in MRO order that defines a member. class_obj: Class object that exposes (but not necessarily defines) the member. I.e. starting point of the search. member_name: Name of the member (method or attribute). Returns: Class object that defines the member.
codesearchnet
def set_config_variables(repo, variables): with repo.config_writer() as writer: for k, value in variables.items(): section, option = k.split('.') writer.set_value(section, option, value) writer.release()
Set config variables Args: repo (git.Repo): repo variables (dict): entries of the form 'user.email': 'you@example.com'
juraj-google-style
def set_integer(self, option, value): try: int_value = int(value) except ValueError as err: print(err.args) self.options[option] = value
Set an integer option. Args: option (str): name of option. value (int): value of the option. Raises: ValueError: Value must be an integer.
codesearchnet
def clone(self) -> 'Event': return self.__class__(copy.deepcopy(self.event), copy.deepcopy(self.metadata))
Clone the event Returns: :class:`slack.events.Event`
codesearchnet
def open(self, mode=None): if (mode is None): mode = self.mode elif (mode not in ['r', 'w', 'a']): raise ValueError("Invalid mode! Modes: ['a', 'r', 'w']") if (self._file is None): self._file = h5py.File(self.path, mode=mode)
Open the container file. Args: mode (str): Either 'r' for read-only, 'w' for truncate and write or 'a' for append. (default: 'a'). If ``None``, uses ``self.mode``.
codesearchnet
def _CheckParserCanProcessFileEntry(self, parser, file_entry): for filter_object in parser.FILTERS: if filter_object.Match(file_entry): return True return False
Determines if a parser can process a file entry. Args: file_entry (dfvfs.FileEntry): file entry. parser (BaseParser): parser. Returns: bool: True if the file entry can be processed by the parser object.
juraj-google-style
def _name_search(cls, method, filters): filters = cls._get_name_filters(filters) return [ cls.deserialize(cls._zeep_to_dict(row)) for row in method(filters) ]
Helper for search methods that use name filters. Args: method (callable): The Five9 API method to call with the name filters. filters (dict): A dictionary of search parameters, keyed by the name of the field to search. This should conform to the schema defined in :func:`five9.Five9.create_criteria`. Returns: list[BaseModel]: A list of records representing the result.
juraj-google-style
def divide(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0] with tf.name_scope(name, default_name="divide"): x1, x2 = binary_arguments_to_tensors(x1, x2) return multiply(x1, reciprocal(x2), output_shape=output_shape)
Binary division with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
juraj-google-style
def update_state(self, y_true, y_pred, sample_weight=None): deps = [] if not self._built: self._build(tensor_shape.TensorShape(y_pred.shape)) if self.multi_label or self.label_weights is not None: shapes = [(y_true, ('N', 'L'))] if self.multi_label: shapes.extend([(self.true_positives, ('T', 'L')), (self.true_negatives, ('T', 'L')), (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))]) if self.label_weights is not None: shapes.append((self.label_weights, ('L',))) deps = [check_ops.assert_shapes(shapes, message='Number of labels is not consistent.')] label_weights = None if self.multi_label else self.label_weights if self._from_logits: y_pred = activations.sigmoid(y_pred) with ops.control_dependencies(deps): return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, self._thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight, multi_label=self.multi_label, label_weights=label_weights)
Accumulates confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Returns: Update op.
github-repos
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: outputs, past = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs, past = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs
Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoTokenizer, FlaxMarianMTModel >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=64, return_tensors="jax") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```
github-repos
def cmd_rollback(context): last_stable = get_versions(context, return_stable=True) if len(last_stable) != 1: fail("Didn't find a version marked stable for key: {} in env/service: {}/{}".format( context.key, context.env, context.service_name)) context.value = last_stable[0].value context.commit_hash = last_stable[0].commit_hash context.build_number = last_stable[0].build_number context.location = last_stable[0].location context.stable = True cmd_set(context)
Roll back by finding the most recent "stable" tagged version, and putting it again, so that it's the new "current" version. Args: context: a populated EFVersionContext object
juraj-google-style
def solidity_resolve_address(hex_code, library_symbol, library_address): if library_address.startswith('0x'): raise ValueError('Address should not contain the 0x prefix') try: decode_hex(library_address) except TypeError: raise ValueError('library_address contains invalid characters, it must be hex encoded.') if ((len(library_symbol) != 40) or (len(library_address) != 40)): raise ValueError('Address with wrong length') return hex_code.replace(library_symbol, library_address)
Change the bytecode to use the given library address. Args: hex_code (bin): The bytecode encoded in hexadecimal. library_name (str): The library that will be resolved. library_address (str): The address of the library. Returns: bin: The bytecode encoded in hexadecimal with the library references resolved.
codesearchnet
def parent_suite(self): if (self.context and self.context.parent_suite_path): return Suite.load(self.context.parent_suite_path) return None
Get the current parent suite. A parent suite exists when a context within a suite is active. That is, during execution of a tool within a suite, or after a user has entered an interactive shell in a suite context, for example via the command- line syntax 'tool +i', where 'tool' is an alias in a suite. Returns: `Suite` object, or None if there is no current parent suite.
codesearchnet
def present(name, parent=None, vlan=None): ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} comment_bridge_created = 'Bridge {0} created.'.format(name) comment_bridge_notcreated = 'Unable to create bridge: {0}.'.format(name) comment_bridge_exists = 'Bridge {0} already exists.'.format(name) comment_bridge_mismatch = ('Bridge {0} already exists, but has a different' ' parent or VLAN ID.').format(name) changes_bridge_created = {name: {'old': 'Bridge {0} does not exist.'.format(name), 'new': 'Bridge {0} created'.format(name), } } bridge_exists = __salt__['openvswitch.bridge_exists'](name) if bridge_exists: current_parent = __salt__['openvswitch.bridge_to_parent'](name) if current_parent == name: current_parent = None current_vlan = __salt__['openvswitch.bridge_to_vlan'](name) if current_vlan == 0: current_vlan = None if __opts__['test']: if bridge_exists: if current_parent == parent and current_vlan == vlan: ret['result'] = True ret['comment'] = comment_bridge_exists else: ret['result'] = False ret['comment'] = comment_bridge_mismatch else: ret['result'] = None ret['comment'] = comment_bridge_created return ret if bridge_exists: if current_parent == parent and current_vlan == vlan: ret['result'] = True ret['comment'] = comment_bridge_exists else: ret['result'] = False ret['comment'] = comment_bridge_mismatch else: bridge_create = __salt__['openvswitch.bridge_create']( name, parent=parent, vlan=vlan) if bridge_create: ret['result'] = True ret['comment'] = comment_bridge_created ret['changes'] = changes_bridge_created else: ret['result'] = False ret['comment'] = comment_bridge_notcreated return ret
Ensures that the named bridge exists, eventually creates it. Args: name: The name of the bridge. parent: The name of the parent bridge (if the bridge shall be created as a fake bridge). If specified, vlan must also be specified. vlan: The VLAN ID of the bridge (if the bridge shall be created as a fake bridge). If specified, parent must also be specified.
juraj-google-style
def __init__(self): super(JLinkWatchpointInfo, self).__init__() self.SizeOfStruct = ctypes.sizeof(self)
Initializes the ``JLinkWatchpointInfo`` instance. Sets the size of the structure. Args: self (JLinkWatchpointInfo): the ``JLinkWatchpointInfo`` instance Returns: ``None``
juraj-google-style
def GetHasher(cls, hasher_name): hasher_name = hasher_name.lower() if (hasher_name not in cls._hasher_classes): raise KeyError('hasher class not set for name: {0:s}.'.format(hasher_name)) hasher_class = cls._hasher_classes[hasher_name] return hasher_class()
Retrieves an instance of a specific hasher. Args: hasher_name (str): the name of the hasher to retrieve. Returns: BaseHasher: hasher. Raises: KeyError: if hasher class is not set for the corresponding name.
codesearchnet
def plot_residuals(self, plot=None): if (plot is None): import matplotlib.pyplot as plot x = numpy.arange(1, (len(self.residuals) + 1)) y = _gvar.mean(self.residuals) yerr = _gvar.sdev(self.residuals) plot.errorbar(x=x, y=y, yerr=yerr, fmt='o', color='b') plot.ylabel('normalized residuals') xr = [x[0], x[(- 1)]] plot.plot([x[0], x[(- 1)]], [0, 0], 'r-') plot.fill_between(x=xr, y1=[(- 1), (- 1)], y2=[1, 1], color='r', alpha=0.075) return plot
Plot normalized fit residuals. The sum of the squares of the residuals equals ``self.chi2``. Individual residuals should be distributed about one, in a Gaussian distribution. Args: plot: :mod:`matplotlib` plotter. If ``None``, uses ``matplotlib.pyplot`. Returns: Plotter ``plot``.
codesearchnet
def delete_nsg(access_token, subscription_id, resource_group, nsg_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name, '?api-version=', NETWORK_API]) return do_delete(endpoint, access_token)
Delete network security group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. nsg_name (str): Name of the NSG. Returns: HTTP response.
codesearchnet
def cluster_spec(self): raise NotImplementedError()
Retrieve the current state of the cluster and return a `tf.train.ClusterSpec`. Returns: A `tf.train.ClusterSpec` representing the state of the cluster at the moment this function is called. Implementors of this function must take care in ensuring that the ClusterSpec returned is up-to-date at the time of calling this function. This usually means retrieving the information from the underlying cluster management system every time this function is invoked and reconstructing a cluster_spec, rather than attempting to cache anything.
github-repos
def set_topic(self, topic): if (not topic): topic = '' result = self._connection.put(('room/%s' % self.id), {'room': {'topic': topic}}) if result['success']: self._load() return result['success']
Set the room topic. Args: topic (str): Topic Returns: bool. Success
codesearchnet
def make_layer_stack(layers=gin.REQUIRED, num_layers=6): return LayerStack(([cls() for cls in layers] * num_layers))
Configurable layer stack. Args: layers: a list of subclasses of TransformerLayer num_layers: an integer Returns: a LayerStack
codesearchnet
def kaiser(x, beta): if any_symbolic_tensors((x,)): return Kaiser(beta).symbolic_call(x) return backend.numpy.kaiser(x, beta)
Kaiser window function. The Kaiser window is defined as: `w[n] = I0(beta * sqrt(1 - (2n / (N - 1) - 1)^2)) / I0(beta)` where I0 is the modified zeroth-order Bessel function of the first kind. Args: x: Scalar or 1D Tensor. The window length. beta: Float. Shape parameter for the Kaiser window. Returns: A 1D tensor containing the Kaiser window values. Example: >>> x = keras.ops.convert_to_tensor(5) >>> keras.ops.kaiser(x, beta=14.0) array([7.7268669e-06, 1.6493219e-01, 1.0000000e+00, 1.6493219e-01, 7.7268669e-06], dtype=float32)
github-repos
def add_event(self, event): if not self._closed: self._try_put(event)
Adds an event to the event file. Args: event: An `Event` protocol buffer.
github-repos
def bessel_i1e(x, name=None): with ops.name_scope(name, 'bessel_i1e', [x]): return gen_special_math_ops.bessel_i1e(x)
Computes the Bessel i1e function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_i1e([-1., -0.5, 0.5, 1.]).numpy() array([-0.20791042, -0.15642083, 0.15642083, 0.20791042], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i1e @end_compatibility
github-repos
def get_median(self): return self._median_tracker.get()
Retrieves the current median value. Returns: float: The median of the values within the defined window. Returns `NaN` if the window is empty.
github-repos
def get_unassigned_ports(self): uri = '{}/unassignedPortsForPortMonitor'.format(self.data['uri']) response = self._helper.do_get(uri) return self._helper.get_members(response)
Gets the collection ports from the member interconnects which are eligible for assignment to an anlyzer port Returns: dict: Collection of ports
codesearchnet
def from_task(cls, task): target = cls(name=task.get_name(), params=task.get_param_string()) return target
Create a new target representing a task and its parameters Args: task: Task instance to create target for; the task class has to inherit from :class:`ozelot.tasks.TaskBase`. Returns: ozelot.tasks.ORMTarget: a new target instance
codesearchnet
def setup_lookup_table(self, hamiltonian='nearest-neighbour'): expected_hamiltonian_values = ['nearest-neighbour', 'coordination_number'] if (hamiltonian not in expected_hamiltonian_values): raise ValueError self.lattice.jump_lookup_table = lookup_table.LookupTable(self.lattice, hamiltonian)
Create a jump-probability look-up table corresponding to the appropriate Hamiltonian. Args: hamiltonian (Str, optional): String specifying the simulation Hamiltonian. valid values are 'nearest-neighbour' (default) and 'coordination_number'. Returns: None
codesearchnet
def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool=False) -> tf.Tensor: scale = (height * width if height > width: feat_width = int(np.round(width / scale)) feat_height = shape_list(attentions)[2] else: feat_height = int(np.round(height / scale)) feat_width = shape_list(attentions)[2] batch_size = shape_list(attentions)[0] groups = shape_list(attentions)[1] attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width)) attentions = tf.transpose(attentions, perm=(0, 2, 3, 1)) if align_corners: attentions = tf.compat.v1.image.resize(attentions, size=(height, width), method='bilinear', align_corners=align_corners) else: attentions = tf.image.resize(attentions, size=(height, width), method='bilinear') attentions = tf.transpose(attentions, perm=(0, 3, 1, 2)) return attentions
Args: attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] height (`int`): height of the output attention map width (`int`): width of the output attention map align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. Returns: `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width]
github-repos
def exists(self): try: info = self._api.tables_get(self._name_parts) except google.datalab.utils.RequestException as e: if (e.status == 404): return False raise e except Exception as e: raise e self._info = info return True
Checks if the table exists. Returns: True if the table exists; False otherwise. Raises: Exception if there was an error requesting information about the table.
codesearchnet
def remove_checkpoint(checkpoint_prefix, checkpoint_format_version=saver_pb2.SaverDef.V2, meta_graph_suffix='meta'): _delete_file_if_exists(meta_graph_filename(checkpoint_prefix, meta_graph_suffix)) if checkpoint_format_version == saver_pb2.SaverDef.V2: _delete_file_if_exists(checkpoint_prefix + '.index') _delete_file_if_exists(checkpoint_prefix + '.data-?????-of-?????') else: _delete_file_if_exists(checkpoint_prefix)
Removes a checkpoint given by `checkpoint_prefix`. Args: checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to `SaverDef.V2`. meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
github-repos
def get_ethernet_networks(self): network_uris = self.data.get('networkUris') networks = [] if network_uris: for uri in network_uris: networks.append(self._ethernet_networks.get_by_uri(uri)) return networks
Gets a list of associated ethernet networks of an uplink set. Args: id_or_uri: Can be either the uplink set id or the uplink set uri. Returns: list: Associated ethernet networks.
codesearchnet
def error_print(msg, color=colorama.Fore.RED, file=sys.stderr): if CLI_QUIET: return file.write('{sep}{bright}{color}Error: {normal}{msg}{sep}{reset}'.format(sep=_linesep_for_file(file), bright=colorama.Style.BRIGHT, color=color, normal=colorama.Style.NORMAL, msg=msg, reset=colorama.Style.RESET_ALL)) file.flush()
Print the error message to the file in the specified color. Args: msg: The error message to be printed. color: Optional colorama color string to be applied to the message. You can concatenate colorama color strings together here, but note that style strings will not be applied. file: A file object to which the baracketed text will be written. Intended for use with CLI output file objects, specifically sys.stderr.
codesearchnet
def apply_actions(self, actions): modified = [] for a in actions: if ('dict' in a): k = a['dict'] modified.append(k) self.feffinp[k] = self.modify_object(a['action'], self.feffinp[k]) elif ('file' in a): self.modify(a['action'], a['file']) else: raise ValueError('Unrecognized format: {}'.format(a)) if modified: feff = self.feffinp feff_input = '\n\n'.join((str(feff[k]) for k in ['HEADER', 'PARAMETERS', 'POTENTIALS', 'ATOMS'] if (k in feff))) for (k, v) in six.iteritems(feff): with open(os.path.join('.', k), 'w') as f: f.write(str(v)) with open(os.path.join('.', 'feff.inp'), 'w') as f: f.write(feff_input)
Applies a list of actions to the FEFF Input Set and rewrites modified files. Args: actions [dict]: A list of actions of the form {'file': filename, 'action': moddermodification} or {'dict': feffinput_key, 'action': moddermodification}
codesearchnet
def _send_receive(self, payload): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) s.settimeout(self.connection_timeout) s.connect((self.address, self.port)) s.send(payload) data = s.recv(1024) s.close() return data
Send single buffer `payload` and receive a single buffer. Args: payload(bytes): Data to send.
juraj-google-style
def get_dimensions(js_dict, naming): dimensions = [] dim_names = [] if check_version_2(js_dict): dimension_dict = js_dict else: dimension_dict = js_dict['dimension'] for dim in dimension_dict['id']: dim_name = js_dict['dimension'][dim]['label'] if not dim_name: dim_name = dim if naming == 'label': dim_label = get_dim_label(js_dict, dim) dimensions.append(dim_label) dim_names.append(dim_name) else: dim_index = get_dim_index(js_dict, dim) dimensions.append(dim_index) dim_names.append(dim) return dimensions, dim_names
Get dimensions from input data. Args: js_dict (dict): dictionary containing dataset data and metadata. naming (string, optional): dimension naming. Possible values: 'label' \ or 'id'. Returns: dimensions (list): list of pandas data frames with dimension \ category data. dim_names (list): list of strings with dimension names.
juraj-google-style
def __init__(self, output_mediator): super(DynamicFieldsHelper, self).__init__() self._output_mediator = output_mediator
Initializes a dynamic fields helper. Args: output_mediator (OutputMediator): output mediator.
juraj-google-style
def parse(self) -> bool: self.skip_ws() res = self._feature_disj() self.skip_ws() if (not self.at_end()): raise InvalidFeatureExpression(self) return res
Parse and evaluate a complete feature expression. Raises: InvalidFeatureExpression: If the if-feature expression is not syntactically correct. UnknownPrefix: If a prefix of a feature name is not declared.
codesearchnet
def sheetNames(book=None): if book: if (not (book.lower() in [x.lower() for x in bookNames()])): return False else: book = activeBook() if (not book): return False poBook = PyOrigin.WorksheetPages(book) if (not len(poBook)): return None return [x.GetName() for x in poBook.Layers()]
return sheet names of a book. Args: book (str, optional): If a book is given, pull names from that book. Otherwise, try the active one Returns: list of sheet names (typical case). None if book has no sheets. False if book doesn't exlist.
codesearchnet
def get_kinds(start=None, end=None): q = Kind.query() if ((start is not None) and (start != '')): q = q.filter((Kind.key >= Kind.key_for_kind(start))) if (end is not None): if (end == ''): return [] q = q.filter((Kind.key < Kind.key_for_kind(end))) return [x.kind_name for x in q]
Return all kinds in the specified range, for the current namespace. Args: start: only return kinds >= start if start is not None. end: only return kinds < end if end is not None. Returns: A list of kind names between the (optional) start and end values.
codesearchnet
def _remove_hidden_parts(projected_surface): surface = np.copy(projected_surface) surface[(~ _make_occlusion_mask(projected_surface))] = np.nan return surface
Removes parts of a projected surface that are not visible. Args: projected_surface (surface): the surface to use Returns: surface: A projected surface.
codesearchnet
def get_unstable_entries(self, charge_to_discharge=True): list_copy = list(self._unstable_entries) return (list_copy if charge_to_discharge else list_copy.reverse())
Returns the unstable entries for the electrode. Args: charge_to_discharge: Order from most charge to most discharged state? Defaults to True. Returns: A list of unstable entries in the electrode, ordered by amount of the working ion.
codesearchnet
def get_tag_hash(self, tag_name): tag_object = get_single_item_from_sequence( sequence=self._github_repository.tags(), condition=lambda tag: tag.name == tag_name, no_item_error_message='No tag "{}" exist'.format(tag_name), too_many_item_error_message='Too many tags "{}" found'.format(tag_name), ) return tag_object.commit.sha
Fetch the commit hash that was tagged with ``tag_name``. Args: tag_name (str): the name of the tag Returns: str: the commit hash linked by the tag
juraj-google-style
def __init__(self, global_rate_limit_qps: int, latency_per_request: float, max_concurrent_requests: int, use_metrics: bool): self._rate_limit = global_rate_limit_qps self._latency_per_request = datetime.timedelta(seconds=latency_per_request) self._num_shards = max(1, min(int(self._rate_limit * self._latency_per_request.total_seconds()), max_concurrent_requests)) self.use_metrics = use_metrics
Creates a RateLimit object. global_rate_limit_qps and latency_per_request are used to determine how the data should be sharded via: global_rate_limit_qps * latency_per_request.total_seconds() For example, global_rate_limit_qps = 500 and latency_per_request=.5 seconds. Then the data will be sharded into 500*.5=250 groups. Each group can be processed in parallel and will call the 'process' function at most once every latency_per_request. It is important to note that the max QPS may not be reach based on how many workers are scheduled. Args: global_rate_limit_qps: QPS to rate limit requests across all workers to. latency_per_request: The expected latency per request. max_concurrent_requests: Maximum allowed concurrent api requests to EE.
github-repos
def fetch_friends(self, user): if USING_ALLAUTH: raise NotImplementedError('VKontakte support is not implemented for django-allauth') else: social_auth_backend = VKOAuth2Backend() tokens = social_auth_backend.tokens(user) oauth_token = tokens['access_token'] api = vkontakte.API(token=oauth_token) return api.get('friends.get')
fethces friends from VKontakte using the access_token fethched by django-social-auth. Note - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth Returns: collection of friend objects fetched from VKontakte
codesearchnet
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None): if filename.startswith('modelzoo: import torchvision model_urls = dict() for (_, name, ispkg) in pkgutil.walk_packages(torchvision.models.__path__): if (not ispkg): _zoo = import_module('torchvision.models.{}'.format(name)) _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) model_name = filename[11:] checkpoint = model_zoo.load_url(model_urls[model_name]) elif filename.startswith('open-mmlab: model_name = filename[13:] checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name]) elif filename.startswith(('http: checkpoint = model_zoo.load_url(filename) else: if (not osp.isfile(filename)): raise IOError('{} is not a checkpoint file'.format(filename)) checkpoint = torch.load(filename, map_location=map_location) if isinstance(checkpoint, OrderedDict): state_dict = checkpoint elif (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)): state_dict = checkpoint['state_dict'] else: raise RuntimeError('No state_dict found in checkpoint file {}'.format(filename)) if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()} if hasattr(model, 'module'): load_state_dict(model.module, state_dict, strict, logger) else: load_state_dict(model, state_dict, strict, logger) return checkpoint
Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Either a filepath or URL or modelzoo://xxxxxxx. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. Returns: dict or OrderedDict: The loaded checkpoint.
codesearchnet
def _CreatePerformanceTarget(client, campaign_group_id): cgpt_service = client.GetService('CampaignGroupPerformanceTargetService', version='v201809') operations = [{ 'operator': 'ADD', 'operand': { 'campaignGroupId': campaign_group_id, 'performanceTarget': { 'efficiencyTargetType': 'CPC_LESS_THAN_OR_EQUAL_TO', 'efficiencyTargetValue': 3000000, 'spendTargetType': 'MAXIMUM', 'spendTarget': { 'microAmount': 500000000 }, 'volumeGoalType': 'MAXIMIZE_CLICKS', 'volumeTargetValue': 3000, 'startDate': datetime.datetime.now().strftime('%Y%m%d'), 'endDate': (datetime.datetime.now() + datetime.timedelta(90)).strftime('%Y%m%d') } } }] cgpt = cgpt_service.mutate(operations)['value'][0] print ('Campaign performance target with ID "%d" was added for campaign ' 'group ID "%d".' % (cgpt['id'], cgpt['campaignGroupId']))
Creates a performance target for the campaign group. Args: client: an AdWordsClient instance. campaign_group_id: an integer ID for the campaign group.
juraj-google-style
def __mul__(self, other: 'TensorFluent') -> 'TensorFluent': return self._binary_op(self, other, tf.multiply, tf.float32)
Returns a TensorFluent for the multiplication arithmetic operator. Args: self: The first operand. other: The second operand. Returns: A TensorFluent wrapping the operator's output.
juraj-google-style
def call_each(seq): try: reduce((lambda _, y: y()), seq) except TypeError as e: if (text_type(e) != 'reduce() of empty sequence with no initial value'): raise
Calls each element of sequence to invoke the side effect. Args: seq: Returns: None
codesearchnet