code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def list_sites(): ret = dict() ps_cmd = ['Get-ChildItem', '-Path', "'IIS:\\Sites'", '|', 'Select-Object applicationPool, applicationDefaults, Bindings, ID, Name, PhysicalPath, State'] keep_keys = ('certificateHash', 'certificateStoreName', 'protocol', 'sslFlags') cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True) try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) except ValueError: raise CommandExecutionError('Unable to parse return data as Json.') for item in items: bindings = dict() for binding in item['bindings']['Collection']: if (binding['protocol'] not in ['http', 'https']): continue filtered_binding = dict() for key in binding: if (key in keep_keys): filtered_binding.update({key.lower(): binding[key]}) binding_info = binding['bindingInformation'].split(':', 2) (ipaddress, port, hostheader) = [element.strip() for element in binding_info] filtered_binding.update({'hostheader': hostheader, 'ipaddress': ipaddress, 'port': port}) bindings[binding['bindingInformation']] = filtered_binding application_defaults = dict() for attribute in item['applicationDefaults']['Attributes']: application_defaults.update({attribute['Name']: attribute['Value']}) ret[item['name']] = {'apppool': item['applicationPool'], 'bindings': bindings, 'applicationDefaults': application_defaults, 'id': item['id'], 'state': item['state'], 'sourcepath': item['physicalPath']} if (not ret): log.warning('No sites found in output: %s', cmd_ret['stdout']) return ret
List all the currently deployed websites. Returns: dict: A dictionary of the IIS sites and their properties. CLI Example: .. code-block:: bash salt '*' win_iis.list_sites
codesearchnet
def set_name(self, name, anyway=False): set_name(self.startEA, name, anyway=anyway)
Set Function Name. Default behavior throws an exception when setting to a name that already exists in the IDB. to make IDA automatically add a counter to the name (like in the GUI,) use `anyway=True`. Args: name: Desired name. anyway: `True` to set anyway.
codesearchnet
def walknset_vars(self, task_class=None, *args, **kwargs): def change_task(task): if task_class is not None and task.__class__ is not task_class: return False return True if self.is_work: for task in self: if not change_task(task): continue task.set_vars(*args, **kwargs) elif self.is_flow: for task in self.iflat_tasks(): if not change_task(task): continue task.set_vars(*args, **kwargs) else: raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__)
Set the values of the ABINIT variables in the input files of the nodes Args: task_class: If not None, only the input files of the tasks belonging to class `task_class` are modified. Example: flow.walknset_vars(ecut=10, kptopt=4)
juraj-google-style
def _wait_for_function(self, function_descriptor, driver_id, timeout=10): start_time = time.time() warning_sent = False while True: with self.lock: if (self._worker.actor_id.is_nil() and (function_descriptor.function_id in self._function_execution_info[driver_id])): break elif ((not self._worker.actor_id.is_nil()) and (self._worker.actor_id in self._worker.actors)): break if ((time.time() - start_time) > timeout): warning_message = 'This worker was asked to execute a function that it does not have registered. You may have to restart Ray.' if (not warning_sent): ray.utils.push_error_to_driver(self._worker, ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR, warning_message, driver_id=driver_id) warning_sent = True time.sleep(0.001)
Wait until the function to be executed is present on this worker. This method will simply loop until the import thread has imported the relevant function. If we spend too long in this loop, that may indicate a problem somewhere and we will push an error message to the user. If this worker is an actor, then this will wait until the actor has been defined. Args: function_descriptor : The FunctionDescriptor of the function that we want to execute. driver_id (str): The ID of the driver to push the error message to if this times out.
codesearchnet
def _model_setup(): context.set_log_device_placement(True) batch_size = 64 steps = 2 with collective_strategy.CollectiveAllReduceStrategy().scope(): train_ds, _ = mnist_testing_utils.mnist_synthetic_dataset(batch_size, steps) model = mnist_testing_utils.get_mnist_model((28, 28, 1)) return (batch_size, steps, train_ds, model)
Set up a MNIST Keras model for testing purposes. Builds a MNIST Keras model and returns model information. Returns: A tuple of (batch_size, steps, train_dataset, mode)
github-repos
def norm_zero_one(array, dim=None): if (not util_type.is_float(array)): array = array.astype(np.float32) array_max = array.max(dim) array_min = array.min(dim) array_exnt = np.subtract(array_max, array_min) array_norm = np.divide(np.subtract(array, array_min), array_exnt) return array_norm
normalizes a numpy array from 0 to 1 based in its extent Args: array (ndarray): dim (int): Returns: ndarray: CommandLine: python -m utool.util_alg --test-norm_zero_one Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> import utool as ut >>> array = np.array([ 22, 1, 3, 2, 10, 42, ]) >>> dim = None >>> array_norm = norm_zero_one(array, dim) >>> result = ut.repr2(list(array_norm), precision=3) >>> print(result) [0.512, 0.000, 0.049, 0.024, 0.220, 1.000]
codesearchnet
def __init__(self, operation, shape, dtype, name=None, index=0): if not isinstance(shape, Shape): raise ValueError("shape must be a Shape got %s" % shape.to_string) if not isinstance(dtype, tf.DType): raise ValueError("dtype must be a tf.DType got %s" % dtype) self._mesh = operation.mesh self._operation = operation self._shape = shape self._dtype = dtype if name is None: name = self.operation.name + ":" + str(index) self._name = name
Create a Tensor. Args: operation: the Operation that outputs this tensor shape: a Shape dtype: a tf.DType name: an optional string index: optional integer, the index among operation's output tensors
juraj-google-style
def getSpatialReferenceId(self, session): statement = .format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.srid
Retrieve the spatial reference id by which the geometry column is registered. This method is a veneer for an SQL query that calls the ``ST_SRID()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: PostGIS spatial reference ID.
juraj-google-style
def expand(tmpl, *args, **kwargs): replacer = functools.partial(_expand_variable_match, list(args), kwargs) return _VARIABLE_RE.sub(replacer, tmpl)
Expand a path template with the given variables. ..code-block:: python >>> expand('users/*/messages/*', 'me', '123') users/me/messages/123 >>> expand('/v1/{name=shelves/*/books/*}', name='shelves/1/books/3') /v1/shelves/1/books/3 Args: tmpl (str): The path template. args: The positional variables for the path. kwargs: The named variables for the path. Returns: str: The expanded path Raises: ValueError: If a positional or named variable is required by the template but not specified or if an unexpected template expression is encountered.
codesearchnet
def add_logger(name, level=None, format=None): format = format or '%(filename)-11s %(lineno)-3d: %(message)s' log = logging.getLogger(name) log.setLevel(level or logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setFormatter(logging.Formatter(format)) log.addHandler(ch) return log
Set up a stdout logger. Args: name (str): name of the logger level: defaults to logging.INFO format (str): format string for logging output. defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``. Returns: The logger object.
juraj-google-style
async def vsetup(self, author): if self.vready: logger.warning('Attempt to init voice when already initialised') return if (self.state != 'starting'): logger.error("Attempt to init from wrong state ('{}'), must be 'starting'.".format(self.state)) return self.logger.debug('Setting up voice') self.vchannel = author.voice.voice_channel if self.vchannel: self.statuslog.info('Connecting to voice') try: self.vclient = (await client.join_voice_channel(self.vchannel)) except discord.ClientException as e: logger.exception(e) self.statuslog.warning("I'm already connected to a voice channel.") return except discord.opus.OpusNotLoaded as e: logger.exception(e) logger.error('Could not load Opus. This is an error with your FFmpeg setup.') self.statuslog.error('Could not load Opus.') return except discord.DiscordException as e: logger.exception(e) self.statuslog.error("I couldn't connect to the voice channel. Check my permissions.") return except Exception as e: self.statuslog.error('Internal error connecting to voice, disconnecting.') logger.error('Error connecting to voice {}'.format(e)) return else: self.statuslog.error("You're not connected to a voice channel.") return self.vready = True
Creates the voice client Args: author (discord.Member): The user that the voice ui will seek
codesearchnet
def bind_to_uniform_block(self, binding=0, *, offset=0, size=(- 1)) -> None: self.mglo.bind_to_uniform_block(binding, offset, size)
Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
codesearchnet
def get( self, section_name, key_name, ): value = None try: value = self.local_config.get(section_name, key_name) except Exception as error_msg: self.logger.warning( '%s.%s not found in local config', section_name, key_name ) try: value = self.global_config.get(section_name, key_name) except Exception as error_msg: self.logger.error( '%s.%s not found in global config', section_name, key_name ) raise KeyError('Could not find option in local/global config') return value
Replicate configparser.get() functionality Args: section_name (str): section name in config key_name (str): key name in config.section_name Returns: str: do not check defaults, only return local value Raises: KeyError: unable to find option in either local or global config
juraj-google-style
def _LogForwardedIpChanges( self, configured, desired, to_add, to_remove, interface): if not to_add and not to_remove: return self.logger.info( 'Changing %s IPs from %s to %s by adding %s and removing %s.', interface, configured or None, desired or None, to_add or None, to_remove or None)
Log the planned IP address changes. Args: configured: list, the IP address strings already configured. desired: list, the IP address strings that will be configured. to_add: list, the forwarded IP address strings to configure. to_remove: list, the forwarded IP address strings to delete. interface: string, the output device to modify.
juraj-google-style
def ParseLSQuarantineRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = LsQuarantineEventData() event_data.agent = self._GetRowValue(query_hash, row, 'Agent') event_data.data = self._GetRowValue(query_hash, row, 'Data') event_data.query = query event_data.url = self._GetRowValue(query_hash, row, 'URL') timestamp = self._GetRowValue(query_hash, row, 'Time') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a launch services quarantine event row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def read(self, size): data_list = [] bytes_read = 0 last_block_position = self.position while bytes_read < size: bytes_from_remaining = min(size - bytes_read, len(self.remaining)) data_list.append(self.remaining[0:bytes_from_remaining]) self.remaining = self.remaining[bytes_from_remaining:] self.position += bytes_from_remaining bytes_read += bytes_from_remaining if not self.remaining: try: self.remaining = self.conn.recv_bytes() except EOFError: break last_block = b''.join(data_list) if last_block: self.last_block_position = last_block_position self.last_block = last_block return last_block
Read data from the wrapped pipe connection. Args: size: Number of bytes to read. Actual number of bytes read is always equal to size unless EOF is reached. Returns: data read as str.
github-repos
def class_predictor(self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor]=None, query_mask: Optional[torch.Tensor]=None) -> Tuple[torch.FloatTensor]: pred_logits, image_class_embeds = self.class_head(image_feats, query_embeds, query_mask) return (pred_logits, image_class_embeds)
Args: image_feats: Features extracted from the `image_text_embedder`. query_embeds: Text query embeddings. query_mask: Must be provided with query_embeddings. A mask indicating which query embeddings are valid.
github-repos
def bessel_j1(x, name=None): with ops.name_scope(name, 'bessel_j1', [x]): return gen_special_math_ops.bessel_j1(x)
Computes the Bessel j1 function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_j1([0.5, 1., 2., 4.]).numpy() array([ 0.24226846, 0.44005059, 0.57672481, -0.06604333], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.j1 @end_compatibility
github-repos
def build_data(data_path, size, dataset): image_size = 32 if dataset == "cifar10": label_bytes = 1 label_offset = 0 elif dataset == "cifar100": label_bytes = 1 label_offset = 1 depth = 3 image_bytes = image_size * image_size * depth record_bytes = label_bytes + label_offset + image_bytes def load_transform(value): record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes]) label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32) depth_major = tf.reshape( tf.slice(record, [label_bytes], [image_bytes]), [depth, image_size, image_size]) image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) return (image, label) data_files = tf.gfile.Glob(data_path) data = tf.contrib.data.FixedLengthRecordDataset(data_files, record_bytes=record_bytes) data = data.map(load_transform) data = data.batch(size) iterator = data.make_one_shot_iterator() return iterator.get_next()
Creates the queue and preprocessing operations for the dataset. Args: data_path: Filename for cifar10 data. size: The number of images in the dataset. dataset: The dataset we are using. Returns: queue: A Tensorflow queue for extracting the images and labels.
juraj-google-style
def constant(interval=1): try: itr = iter(interval) except TypeError: itr = itertools.repeat(interval) for val in itr: yield val
Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values.
juraj-google-style
def cancel(self): raise NotImplementedError()
Cancels the pipeline execution. Raises: IOError: If there is a persistent problem getting job information. NotImplementedError: If the runner does not support this operation. Returns: The final state of the pipeline.
github-repos
def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): logger.warning_once('`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use `post_process_semantic_segmentation`.') out_logits, raw_masks = (outputs.logits, outputs.pred_masks) empty_label = out_logits.shape[-1] - 1 preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.tolist()) for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes): cur_scores, cur_labels = cur_logits.softmax(-1).max(-1) keep = cur_labels.ne(empty_label) & (cur_scores > threshold) cur_scores = cur_scores[keep] cur_labels = cur_labels[keep] cur_masks = cur_masks[keep] cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode='bilinear').squeeze(1) cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1 predictions = {'scores': cur_scores, 'labels': cur_labels, 'masks': cur_masks} preds.append(predictions) return preds
Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch. Args: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. threshold (`float`, *optional*, defaults to 0.9): Threshold to use to filter out queries. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model.
github-repos
def normalize_collaboration(collaboration): if (not collaboration): return [] collaboration = collaboration.strip() if (collaboration.startswith('(') and collaboration.endswith(')')): collaboration = collaboration[1:(- 1)] collaborations = _RE_AND.split(collaboration) collaborations = (_RE_COLLABORATION_LEADING.sub('', collab) for collab in collaborations) collaborations = (_RE_COLLABORATION_TRAILING.sub('', collab) for collab in collaborations) return [collab.strip() for collab in collaborations]
Normalize collaboration string. Args: collaboration: a string containing collaboration(s) or None Returns: list: List of extracted and normalized collaborations Examples: >>> from inspire_schemas.utils import normalize_collaboration >>> normalize_collaboration('for the CMS and ATLAS Collaborations') ['CMS', 'ATLAS']
codesearchnet
def get_src_folder(self): with open(('%s/settings.gradle' % self.path)) as f: for line in f.readlines(): if line.startswith('include'): matches = re.findall("\\'\\:?(.+?)\\'", line) if (len(matches) == 0): continue for folder in matches: if self.is_app_folder(folder): return folder return 'app'
Gets the app source folder from settings.gradle file. Returns: A string containing the project source folder name (default is "app")
codesearchnet
def has_filename(self, filename): fixpath = lambda path: osp.normcase(osp.realpath(path)) for index, finfo in enumerate(self.data): if fixpath(filename) == fixpath(finfo.filename): return index return None
Return the self.data index position for the filename. Args: filename: Name of the file to search for in self.data. Returns: The self.data index for the filename. Returns None if the filename is not found in self.data.
juraj-google-style
def run_population(population, evolution, gpus): population_size = len(population) for k in range(population_size procs = [] for j in range(len(gpus)): i = k * len(gpus) + j if i < population_size: save_path = expand_path( evolution.get_value_from_config(parse_config(population[i]), evolution.path_to_models_save_path)) save_path.mkdir(parents=True, exist_ok=True) f_name = save_path / "config.json" save_json(population[i], f_name) with save_path.joinpath('out.txt').open('w', encoding='utf8') as outlog,\ save_path.joinpath('err.txt').open('w', encoding='utf8') as errlog: env = dict(os.environ) if len(gpus) > 1 or gpus[0] != -1: env['CUDA_VISIBLE_DEVICES'] = str(gpus[j]) procs.append(Popen("{} -m deeppavlov train {}".format(sys.executable, str(f_name)), shell=True, stdout=outlog, stderr=errlog, env=env)) for j, proc in enumerate(procs): i = k * len(gpus) + j log.info(f'Waiting on {i}th proc') if proc.wait() != 0: save_path = expand_path( evolution.get_value_from_config(parse_config(population[i]), evolution.path_to_models_save_path)) with save_path.joinpath('err.txt').open(encoding='utf8') as errlog: log.warning(f'Population {i} returned an error code {proc.returncode} and an error log:\n' + errlog.read()) return None
Change save and load paths for obtained population, save config.json with model config, run population via current python executor (with which evolve.py already run) and on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs) Args: population: list of dictionaries - configs of current population evolution: ParamsEvolution gpus: list of given devices (list of integers) Returns: None
juraj-google-style
def __init__(self, file_object, encoding='utf-8', end_of_line='\n'): super(TextFile, self).__init__() self._file_object = file_object self._file_object_size = file_object.get_size() self._encoding = encoding self._end_of_line = end_of_line.encode(self._encoding) self._end_of_line_length = len(self._end_of_line) self._lines = [] self._lines_buffer = b'' self._lines_buffer_offset = 0 self._current_offset = 0
Initializes the text file. Args: file_object (FileIO): a file-like object to read from. encoding (Optional[str]): text encoding. end_of_line (Optional[str]): end of line indicator.
juraj-google-style
def convert_legacy_structure(output_types, output_shapes, output_classes): flat_types = nest.flatten(output_types) flat_shapes = nest.flatten(output_shapes) flat_classes = nest.flatten(output_classes) flat_ret = [] for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes, flat_classes): if isinstance(flat_class, type_spec.TypeSpec): flat_ret.append(flat_class) elif issubclass(flat_class, sparse_tensor.SparseTensor): flat_ret.append(sparse_tensor.SparseTensorSpec(flat_shape, flat_type)) elif issubclass(flat_class, tensor_lib.Tensor): flat_ret.append(tensor_lib.TensorSpec(flat_shape, flat_type)) elif issubclass(flat_class, tensor_array_ops.TensorArray): flat_ret.append(tensor_array_ops.TensorArraySpec(flat_shape[2:], flat_type, dynamic_size=tensor_shape.dimension_value(flat_shape[0]), infer_shape=tensor_shape.dimension_value(flat_shape[1]))) else: raise TypeError('Could not build a structure for output class {}. Make sure any component class in `output_classes` inherits from one of the following classes: `tf.TypeSpec`, `tf.sparse.SparseTensor`, `tf.Tensor`, `tf.TensorArray`.'.format(flat_class.__name__)) return nest.pack_sequence_as(output_classes, flat_ret)
Returns a `Structure` that represents the given legacy structure. This method provides a way to convert from the existing `Dataset` and `Iterator` structure-related properties to a `Structure` object. A "legacy" structure is represented by the `tf.data.Dataset.output_types`, `tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes` properties. TODO(b/110122868): Remove this function once `Structure` is used throughout `tf.data`. Args: output_types: A nested structure of `tf.DType` objects corresponding to each component of a structured value. output_shapes: A nested structure of `tf.TensorShape` objects corresponding to each component a structured value. output_classes: A nested structure of Python `type` objects corresponding to each component of a structured value. Returns: A `Structure`. Raises: TypeError: If a structure cannot be built from the arguments, because one of the component classes in `output_classes` is not supported.
github-repos
def guess_content_kind(path=None, web_video_data=None, questions=None): if questions and len(questions) > 0: return content_kinds.EXERCISE if path: ext = os.path.splitext(path)[1][1:].lower() if ext in content_kinds.MAPPING: return content_kinds.MAPPING[ext] raise InvalidFormatException("Invalid file type: Allowed formats are {0}".format([key for key, value in content_kinds.MAPPING.items()])) elif web_video_data: return content_kinds.VIDEO else: return content_kinds.TOPIC
guess_content_kind: determines what kind the content is Args: files (str or list): files associated with content Returns: string indicating node's kind
juraj-google-style
def load(self, label_lookup_path, uid_lookup_path): if (not tf.gfile.Exists(uid_lookup_path)): tf.logging.fatal('File does not exist %s', uid_lookup_path) if (not tf.gfile.Exists(label_lookup_path)): tf.logging.fatal('File does not exist %s', label_lookup_path) proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines() uid_to_human = {} p = re.compile('[n\\d]*[ \\S,]*') for line in proto_as_ascii_lines: parsed_items = p.findall(line) uid = parsed_items[0] human_string = parsed_items[2] uid_to_human[uid] = human_string node_id_to_uid = {} proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines() for line in proto_as_ascii: if line.startswith(' target_class:'): target_class = int(line.split(': ')[1]) if line.startswith(' target_class_string:'): target_class_string = line.split(': ')[1] node_id_to_uid[target_class] = target_class_string[1:(- 2)] node_id_to_name = {} for (key, val) in node_id_to_uid.items(): if (val not in uid_to_human): tf.logging.fatal('Failed to locate: %s', val) name = uid_to_human[val] node_id_to_name[key] = name return node_id_to_name
Loads a human readable English name for each softmax node. Args: label_lookup_path: string UID to integer node ID. uid_lookup_path: string UID to human-readable string. Returns: dict from integer node ID to human-readable string.
codesearchnet
def locator(self, value): self._locator = value self._latitude, self._longitude = utils.from_grid_locator(value)
Update the locator, and trigger a latitude and longitude update. Args: value (str): New Maidenhead locator string
juraj-google-style
def center_slab(slab): bdists = sorted([nn[1] for nn in slab.get_neighbors(slab[0], 10) if (nn[1] > 0)]) r = (bdists[0] * 3) all_indices = [i for (i, site) in enumerate(slab)] for site in slab: if any([(nn[1] > slab.lattice.c) for nn in slab.get_neighbors(site, r)]): shift = ((1 - site.frac_coords[2]) + 0.05) slab.translate_sites(all_indices, [0, 0, shift]) weights = [s.species.weight for s in slab] center_of_mass = np.average(slab.frac_coords, weights=weights, axis=0) shift = (0.5 - center_of_mass[2]) slab.translate_sites(all_indices, [0, 0, shift]) return slab
The goal here is to ensure the center of the slab region is centered close to c=0.5. This makes it easier to find the surface sites and apply operations like doping. There are three cases where the slab in not centered: 1. The slab region is completely between two vacuums in the box but not necessarily centered. We simply shift the slab by the difference in its center of mass and 0.5 along the c direction. 2. The slab completely spills outside the box from the bottom and into the top. This makes it incredibly difficult to locate surface sites. We iterate through all sites that spill over (z>c) and shift all sites such that this specific site is now on the other side. Repeat for all sites with z>c. 3. This is a simpler case of scenario 2. Either the top or bottom slab sites are at c=0 or c=1. Treat as scenario 2. Args: slab (Slab): Slab structure to center Returns: Returns a centered slab structure
codesearchnet
def _check_default_values(method_signature, base_signature): for base_param_name, base_default_value in base_signature.defaults.items(): if base_param_name in base_signature.kwonly_params: if base_param_name not in method_signature.kwonly_params and base_param_name not in method_signature.param_names: continue method_param_name = base_param_name else: base_param_index = base_signature.param_names.index(base_param_name) if base_param_index >= len(method_signature.param_names): continue method_param_name = method_signature.param_names[base_param_index] try: method_default_value = method_signature.defaults[method_param_name] except KeyError: return SignatureError(SignatureErrorType.DEFAULT_PARAMETER_MISMATCH, f"Parameter '{method_param_name}' must have a default value.") try: base_default = abstract_utils.get_atomic_python_constant(base_default_value) method_default = abstract_utils.get_atomic_python_constant(method_default_value) except abstract_utils.ConversionError: continue if base_default != method_default: return SignatureError(SignatureErrorType.DEFAULT_VALUE_MISMATCH, f"Parameter '{base_param_name}' must have the same default value.") return None
Checks that default parameter values of the overriding method match. Args: method_signature: signature of the overriding method. base_signature: signature of the overridden method. Returns: SignatureError if a mismatch is detected. Otherwise returns None.
github-repos
def process_tfma(schema_file, big_query_table=None, eval_model_dir=None, max_eval_rows=None, pipeline_args=None, publish_to_bq=False, project=None, metrics_table=None, metrics_dataset=None): if big_query_table is None: raise ValueError('--big_query_table should be provided.') slice_spec = [tfma.slicer.SingleSliceSpec(), tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])] metrics_namespace = metrics_table schema = taxi.read_schema(schema_file) eval_shared_model = tfma.default_eval_shared_model(eval_saved_model_path=eval_model_dir, add_metrics_callbacks=[tfma.post_export_metrics.calibration_plot_and_prediction_histogram(), tfma.post_export_metrics.auc_plots()]) metrics_monitor = None if publish_to_bq: metrics_monitor = MetricsReader(publish_to_bq=publish_to_bq, project_name=project, bq_table=metrics_table, bq_dataset=metrics_dataset, namespace=metrics_namespace, filters=MetricsFilter().with_namespace(metrics_namespace)) pipeline = beam.Pipeline(argv=pipeline_args) query = taxi.make_sql(big_query_table, max_eval_rows, for_eval=True) raw_feature_spec = taxi.get_raw_feature_spec(schema) raw_data = pipeline | 'ReadBigQuery' >> ReadFromBigQuery(query=query, project=project, use_standard_sql=True) | 'Measure time: Start' >> beam.ParDo(MeasureTime(metrics_namespace)) | 'CleanData' >> beam.Map(lambda x: taxi.clean_raw_data_dict(x, raw_feature_spec)) coder = taxi.make_proto_coder(schema) extractors = tfma.default_extractors(eval_shared_model=eval_shared_model, slice_spec=slice_spec, desired_batch_size=None, materialize=False) evaluators = tfma.default_evaluators(eval_shared_model=eval_shared_model, desired_batch_size=None, num_bootstrap_samples=1) _ = raw_data | 'ToSerializedTFExample' >> beam.Map(coder.encode) | 'Extract Results' >> tfma.InputsToExtracts() | 'Extract and evaluate' >> tfma.ExtractAndEvaluate(extractors=extractors, evaluators=evaluators) | 'Map Evaluations to PCollection' >> MapEvalToPCollection() | 'Measure time: End' >> beam.ParDo(MeasureTime(metrics_namespace)) result = pipeline.run() result.wait_until_finish() if metrics_monitor: metrics_monitor.publish_metrics(result)
Runs a batch job to evaluate the eval_model against the given input. Args: schema_file: A file containing a text-serialized Schema that describes the eval data. big_query_table: A BigQuery table name specified as DATASET.TABLE which should be the input for evaluation. This can only be set if input_csv is None. eval_model_dir: A directory where the eval model is located. max_eval_rows: Number of rows to query from BigQuery. pipeline_args: additional DataflowRunner or DirectRunner args passed to the beam pipeline. publish_to_bq: project: metrics_dataset: metrics_table: Raises: ValueError: if input_csv and big_query_table are not specified correctly.
github-repos
def _refresh(self, http): try: self._retrieve_info(http) self.access_token, self.token_expiry = _metadata.get_token( http, service_account=self.service_account_email) except http_client.HTTPException as err: raise client.HttpAccessTokenRefreshError(str(err))
Refreshes the access token. Skip all the storage hoops and just refresh using the API. Args: http: an object to be used to make HTTP requests. Raises: HttpAccessTokenRefreshError: When the refresh fails.
juraj-google-style
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor: if mask is None: mask = x.new_ones(x.shape[:-1]) if not self.starting: x = x.transpose(-2, -3) mask = mask.transpose(-1, -2) x = self.layer_norm(x) mask_bias = (self.inf * (mask - 1))[..., :, None, None, :] triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1)) triangle_bias = triangle_bias.unsqueeze(-4) biases = [mask_bias, triangle_bias] if chunk_size is not None: x = self._chunk(x, biases, chunk_size, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma, inplace_safe=inplace_safe) else: x = self.mha(q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma) if not self.starting: x = x.transpose(-2, -3) return x
Args: x: [*, I, J, C_in] input tensor (e.g. the pair representation) Returns: [*, I, J, C_in] output tensor
github-repos
def find_vasp_calculations(): dir_list = [('./' + re.sub('vasprun\\.xml', '', path)) for path in glob.iglob('**/vasprun.xml', recursive=True)] gz_dir_list = [('./' + re.sub('vasprun\\.xml\\.gz', '', path)) for path in glob.iglob('**/vasprun.xml.gz', recursive=True)] return (dir_list + gz_dir_list)
Returns a list of all subdirectories that contain either a vasprun.xml file or a compressed vasprun.xml.gz file. Args: None Returns: (List): list of all VASP calculation subdirectories.
codesearchnet
def disp(obj: Any, mode: str='') -> None: if _Options.LINE in mode: raise NotImplementedError('Line mode not supported in `disp()`') _display_and_return(obj, options=mode)
Display the object. This is the functional API for the `;` auto display magic. Args: obj: The object to display mode: Any mode supported by `ecolab.auto_display()`
github-repos
def find_all_documented_objects() -> List[str]: documented_obj = [] documented_methods_map = {} for doc_file in Path(PATH_TO_DOC).glob('**/*.md'): with open(doc_file, 'r', encoding='utf-8', newline='\n') as f: content = f.read() raw_doc_objs = re.findall('\\[\\[autodoc\\]\\]\\s+(\\S+)\\s+', content) documented_obj += [obj.split('.')[-1] for obj in raw_doc_objs] for obj in raw_doc_objs: obj_public_methods = re.findall(f'\\[\\[autodoc\\]\\] {obj}((\\n\\s+-.*)+)', content) if len(obj_public_methods) == 0: continue else: documented_methods_map[obj] = re.findall('(?<=-\\s).*', obj_public_methods[0][0]) return (documented_obj, documented_methods_map)
Parse the content of all doc files to detect which classes and functions it documents. Returns: `List[str]`: The list of all object names being documented. `Dict[str, List[str]]`: A dictionary mapping the object name (full import path, e.g. `integrations.PeftAdapterMixin`) to its documented methods
github-repos
def get_appliance_by_name(self, appliance_name): appliances = self.get_appliances() if appliances: for appliance in appliances: if appliance['name'] == appliance_name: return appliance return None
Gets the particular Image Streamer resource based on its name. Args: appliance_name: The Image Streamer resource name. Returns: dict: Image Streamer resource.
juraj-google-style
def write(grp, out_path): with open(out_path, 'w') as f: for x in grp: f.write((str(x) + '\n'))
Write a GRP to a text file. Args: grp (list): GRP object to write to new-line delimited text file out_path (string): output path Returns: None
codesearchnet
def _has_valid_catchup_replies(self, seq_no: int, txns_to_process: List[Tuple[(int, Any)]]) -> Tuple[(bool, str, int)]: assert (seq_no == txns_to_process[0][0]) (node_name, catchup_rep) = self._find_catchup_reply_for_seq_no(seq_no) txns = catchup_rep.txns txns = [self._provider.transform_txn_for_ledger(txn) for (s, txn) in txns_to_process[:len(txns)] if (str(s) in txns)] temp_tree = self._ledger.treeWithAppliedTxns(txns) proof = catchup_rep.consProof final_size = self._catchup_till.final_size final_hash = self._catchup_till.final_hash try: logger.info('{} verifying proof for {}, {}, {}, {}, {}'.format(self, temp_tree.tree_size, final_size, temp_tree.root_hash, final_hash, proof)) verified = self._provider.verifier(self._ledger_id).verify_tree_consistency(temp_tree.tree_size, final_size, temp_tree.root_hash, Ledger.strToHash(final_hash), [Ledger.strToHash(p) for p in proof]) except Exception as ex: logger.info('{} could not verify catchup reply {} since {}'.format(self, catchup_rep, ex)) verified = False return (bool(verified), node_name, len(txns))
Transforms transactions for ledger! Returns: Whether catchup reply corresponding to seq_no Name of node from which txns came Number of transactions ready to be processed
codesearchnet
class FlaxImageClassifierOutputWithNoAttention(ModelOutput): logits: Optional[jnp.ndarray] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None
Base class for outputs of image classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage.
github-repos
def save_as(self, filename: str) -> None: lib.TCOD_image_save(self.image_c, filename.encode("utf-8"))
Save the Image to a 32-bit .bmp or .png file. Args: filename (Text): File path to same this Image.
juraj-google-style
def make_all(module_name, doc_string_modules=None): if doc_string_modules is None: doc_string_modules = [_sys.modules[module_name]] cur_members = set((name for name, _ in _tf_inspect.getmembers(_sys.modules[module_name]))) results = set() for doc_module in doc_string_modules: results.update([m.group(1) for m in _reference_pattern.finditer(doc_module.__doc__) if m.group(1) in cur_members]) return list(results)
Generates `__all__` from the docstring of one or more modules. Usage: `make_all(__name__)` or `make_all(__name__, [sys.modules(__name__), other_module])`. The doc string modules must each a docstring, and `__all__` will contain all symbols with `@@` references, where that symbol currently exists in the module named `module_name`. Args: module_name: The name of the module (usually `__name__`). doc_string_modules: a list of modules from which to take docstring. If None, then a list containing only the module named `module_name` is used. Returns: A list suitable for use as `__all__`.
github-repos
def matches(self, spec): if callable(spec) and not isinstance(spec, type): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) specification = (self.__class__.__name__, self.group, self.label) split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False) for s in split_spec)) if all(nocompare): return True match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc)) self_spec = match_fn(split_spec) unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec if unescaped_match: return True sanitizers = [util.sanitize_identifier, util.group_sanitizer, util.label_sanitizer] identifier_specification = tuple(fn(ident, escape=False) for ident, fn in zip(specification, sanitizers)) identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec return identifier_match
Whether the spec applies to this object. Args: spec: A function, spec or type to check for a match * A 'type[[.group].label]' string which is compared against the type, group and label of this object * A function which is given the object and returns a boolean. * An object type matched using isinstance. Returns: bool: Whether the spec matched this object.
juraj-google-style
def _ParseQuery(self, parser_mediator, database, query, callback, cache): row_cache = cache.GetRowCache(query) try: rows = database.Query(query) except sqlite3.DatabaseError as exception: parser_mediator.ProduceExtractionWarning( 'unable to run query: {0:s} on database with error: {1!s}'.format( query, exception)) return for index, row in enumerate(rows): if parser_mediator.abort: break row_hash = self._HashRow(row) if row_hash in row_cache: continue try: callback(parser_mediator, query, row, cache=cache, database=database) except Exception as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse row: {0:d} with callback: {1:s} on database ' 'with error: {2!s}').format( index, callback.__name__, exception)) return row_cache.add(row_hash)
Queries a database and parses the results. Args: parser_mediator (ParserMediator): parser mediator. database (SQLiteDatabase): database. query (str): query. callback (function): function to invoke to parse an individual row. cache (SQLiteCache): cache.
juraj-google-style
def make_noise_surface(dims=DEFAULT_DIMS, blur=10, seed=None): if (seed is not None): np.random.seed(seed) return gaussian_filter(np.random.normal(size=dims), blur)
Makes a surface by generating random noise and blurring it. Args: dims (pair): the dimensions of the surface to create blur (float): the amount of Gaussian blur to apply seed (int): a random seed to use (optional) Returns: surface: A surface.
codesearchnet
def device_function(self, op): if not self._merge_devices and op.device: return op.device current_device = pydev.DeviceSpec.from_string(op.device or '') node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def if self._ps_tasks and self._ps_device and (node_def.op in self._ps_ops): ps_device = pydev.DeviceSpec.from_string(self._ps_device) current_job, ps_job = (current_device.job, ps_device.job) if ps_job and (not current_job or current_job == ps_job): ps_device = ps_device.replace(task=self._ps_strategy(op)) ps_device = ps_device.make_merged_spec(current_device) return ps_device.to_string() worker_device = pydev.DeviceSpec.from_string(self._worker_device or '') worker_device = worker_device.make_merged_spec(current_device) return worker_device.to_string()
Choose a device for `op`. Args: op: an `Operation`. Returns: The device to use for the `Operation`.
github-repos
def GetValueRepresentation(cls, value, version=sorted(_SERVICE_MAP.keys())[-1]): if isinstance(value, str) or isinstance(value, unicode): return {'value': value, 'xsi_type': 'TextValue'} elif isinstance(value, bool): return {'value': value, 'xsi_type': 'BooleanValue'} elif isinstance(value, numbers.Number): return {'value': value, 'xsi_type': 'NumberValue'} elif isinstance(value, datetime.datetime): if value.tzinfo is None: raise googleads.errors.GoogleAdsValueError( 'Datetime %s is not timezone aware.' % value ) return { 'xsi_type': 'DateTimeValue', 'value': { 'date': { 'year': value.year, 'month': value.month, 'day': value.day, }, 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'timeZoneId' if version >= 'v201811' else 'timeZoneID': value.tzinfo.zone, } } elif isinstance(value, datetime.date): return { 'xsi_type': 'DateValue', 'value': { 'year': value.year, 'month': value.month, 'day': value.day, } } elif isinstance(value, list): if value and not all(isinstance(x, type(value[0])) for x in value): raise googleads.errors.GoogleAdsValueError('Cannot pass more than one ' 'type in a set.') return { 'xsi_type': 'SetValue', 'values': [cls.GetValueRepresentation(v, version) for v in value] } else: raise googleads.errors.GoogleAdsValueError( 'Can\'t represent unknown type: %s.' % type(value))
Converts a single python value to its PQL representation. Args: value: A python value. version: A string identifying the Ad Manager version the value object is compatible with. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. Returns: The value formatted for PQL statements which are compatible with a particular API version.
juraj-google-style
def absolute_name(self): if (self.is_root() or self.parent.is_root()): return utils.slugify(self.name) return ':'.join([self.parent.absolute_name, utils.slugify(self.name)])
Get the absolute name of ``self``. Returns: str: the absolute name.
codesearchnet
def get_all_pattern_variables(self, patternnumber): _checkPatternNumber(patternnumber) outputstring = '' for stepnumber in range(8): outputstring += 'SP{0}: {1} Time{0}: {2}\n'.format(stepnumber, \ self.get_pattern_step_setpoint( patternnumber, stepnumber), \ self.get_pattern_step_time( patternnumber, stepnumber) ) outputstring += 'Actual step: {0}\n'.format(self.get_pattern_actual_step( patternnumber) ) outputstring += 'Additional cycles: {0}\n'.format(self.get_pattern_additional_cycles( patternnumber) ) outputstring += 'Linked pattern: {0}\n'.format(self.get_pattern_link_topattern( patternnumber) ) return outputstring
Get all variables for a given pattern at one time. Args: patternnumber (integer): 0-7 Returns: A descriptive multiline string.
juraj-google-style
def _step(time, output_ta_t, prev_output, *states): current_input = tuple((ta.read(time) for ta in input_ta)) current_input = tree.pack_sequence_as(inputs, current_input) mask_t = masking_fn(time) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) flat_output = tree.flatten(output) flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output) flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output) flat_state = tree.flatten(states) flat_new_state = tree.flatten(new_states) flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state) new_states = tree.pack_sequence_as(new_states, flat_final_state) ta_index_to_write = time if return_all_outputs else 0 output_ta_t = tuple((ta.write(ta_index_to_write, out) for ta, out in zip(output_ta_t, flat_new_output))) return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)
RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. prev_output: tuple of outputs from time - 1. *states: List of states. Returns: Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
github-repos
def exit_code(self, code): if code is not None and code in [0, 1, 3]: self._exit_code = code else: self.log.warning(u'Invalid exit code')
Set the App exit code. For TC Exchange Apps there are 3 supported exit codes. * 0 indicates a normal exit * 1 indicates a failure during execution * 3 indicates a partial failure Args: code (integer): The exit code value for the app.
juraj-google-style
def function_from_graph_def(graph_def, inputs, outputs, captures=None): def _imports_graph_def(): importer.import_graph_def(graph_def, name='') graph = ops.get_default_graph() if captures is not None: for c in captures: graph.add_capture(captures[c], graph.get_tensor_by_name(str(c) + ':0')) wrapped_import = wrap_function(_imports_graph_def, []) import_graph = wrapped_import.graph return wrapped_import.prune(nest.map_structure(import_graph.as_graph_element, inputs), nest.map_structure(import_graph.as_graph_element, outputs))
Creates a ConcreteFunction from a GraphDef. Args: graph_def: A GraphDef to make a function out of. inputs: A Tensor name or nested structure of names in `graph_def` which should be inputs to the function. outputs: A Tensor name or nested structure of names in `graph_def` which should be outputs of the function. captures: (Optional) A dictionary mapping node names in `graph_def` that should be captured as inputs to tensors containing the value of the captured inputs. Returns: A ConcreteFunction.
github-repos
def init_logger(name='', handler_path_levels=None, level=logging.INFO, formatter=None, formatter_str=None, datefmt='%Y-%m-%d %H:%M:%S'): levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL} if (not formatter): if formatter_str: formatter_str = formatter_str else: formatter_str = '%(asctime)s %(levelname)-5s [%(name)s] %(filename)s(%(lineno)s): %(message)s' formatter = logging.Formatter(formatter_str, datefmt=datefmt) logger = (name if isinstance(name, logging.Logger) else logging.getLogger(str(name))) logger.setLevel(level) handler_path_levels = (handler_path_levels or [['', 'INFO']]) for each_handler in handler_path_levels: (path, handler_level) = each_handler handler = (logging.FileHandler(path) if path else logging.StreamHandler()) handler.setLevel((levels.get(handler_level.upper(), 1) if isinstance(handler_level, str) else handler_level)) handler.setFormatter(formatter) logger.addHandler(handler) return logger
Add a default handler for logger. Args: name = '' or logger obj. handler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]] level = the least level for the logger. formatter = logging.Formatter( '%(levelname)-7s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s', "%Y-%m-%d %H:%M:%S") formatter_str = '%(levelname)-7s %(asctime)s %(name)s (%(funcName)s: %(lineno)s): %(message)s' custom formatter: %(asctime)s %(created)f %(filename)s %(funcName)s %(levelname)s %(levelno)s %(lineno)s %(message)s %(module)s %(name)s %(pathname)s %(process)s %(relativeCreated)s %(thread)s %(threadName)s
codesearchnet
def from_config(config_file, use_admin=False): with open(config_file) as f: d = json.load(f) user = (d['admin_user'] if use_admin else d['readonly_user']) password = (d['admin_password'] if use_admin else d['readonly_password']) return QueryEngine(host=d['host'], port=d['port'], database=d['database'], user=user, password=password, collection=d['collection'], aliases_config=d.get('aliases_config', None))
Initialize a QueryEngine from a JSON config file generated using mgdb init. Args: config_file: Filename of config file. use_admin: If True, the admin user and password in the config file is used. Otherwise, the readonly_user and password is used. Defaults to False. Returns: QueryEngine
codesearchnet
def split(self, n): new_range_filters = [] name = self.start[0] prop_cls = self.prop.__class__ if (prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS): splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](self.start[2], self.end[2], n, (self.start[1] == '>='), (self.end[1] == '<=')) start_filter = (name, '>=', splitpoints[0]) for p in splitpoints[1:]: end_filter = (name, '<', p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, '>=', p) else: splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](self.start[2], self.end[2], n) start_filter = self.start for p in splitpoints: end_filter = (name, '<', p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, '>=', p) new_range_filters.append([start_filter, self.end]) for f in new_range_filters: f.extend(self._equality_filters) return [self.__class__(f, self.model_class_path) for f in new_range_filters]
Evenly split this range into contiguous, non overlapping subranges. Args: n: number of splits. Returns: a list of contiguous, non overlapping sub PropertyRanges. Maybe less than n when not enough subranges.
codesearchnet
def least_loaded_node(self): nodes = [broker.nodeId for broker in self.cluster.brokers()] random.shuffle(nodes) inflight = float('inf') found = None for node_id in nodes: conn = self._conns.get(node_id) connected = ((conn is not None) and conn.connected()) blacked_out = ((conn is not None) and conn.blacked_out()) curr_inflight = (len(conn.in_flight_requests) if (conn is not None) else 0) if (connected and (curr_inflight == 0)): return node_id elif ((not blacked_out) and (curr_inflight < inflight)): inflight = curr_inflight found = node_id if (found is not None): return found return None
Choose the node with fewest outstanding requests, with fallbacks. This method will prefer a node with an existing connection and no in-flight-requests. If no such node is found, a node will be chosen randomly from disconnected nodes that are not "blacked out" (i.e., are not subject to a reconnect backoff). If no node metadata has been obtained, will return a bootstrap node (subject to exponential backoff). Returns: node_id or None if no suitable node was found
codesearchnet
def recursive_create_dir(dirname): recursive_create_dir_v2(dirname)
Creates a directory and all parent/intermediate directories. It succeeds if dirname already exists and is writable. Args: dirname: string, name of the directory to be created Raises: errors.OpError: If the operation fails.
github-repos
def populate(self, filename): if os.path.isfile(filename): fid_st = os.stat(filename) self.name = os.path.abspath(filename) self.full_name = filename self.size = fid_st.st_size self.last_modified = fid_st.st_mtime self.last_accessed = fid_st.st_atime self.last_info_changed = fid_st.st_ctime self.location = os.path.dirname(filename)
Finds the file-stats and populates the class with stat values. Args: filename (str): name of the file.
codesearchnet
def set(self, x: int, y: int, back_r: int, back_g: int, back_b: int, fore_r: int, fore_g: int, fore_b: int, char: str) -> None: i = ((self.width * y) + x) self.back_r[i] = back_r self.back_g[i] = back_g self.back_b[i] = back_b self.fore_r[i] = fore_r self.fore_g[i] = fore_g self.fore_b[i] = fore_b self.char[i] = ord(char)
Set the background color, foreground color and character of one cell. Args: x (int): X position to change. y (int): Y position to change. back_r (int): Red background color, from 0 to 255. back_g (int): Green background color, from 0 to 255. back_b (int): Blue background color, from 0 to 255. fore_r (int): Red foreground color, from 0 to 255. fore_g (int): Green foreground color, from 0 to 255. fore_b (int): Blue foreground color, from 0 to 255. char (AnyStr): A single character str or bytes object.
codesearchnet
def compute_metrics(self, previous): delta_t = self.time_difference(previous) delta_x = self.distance(previous) vel = 0 delta_v = 0 acc = 0 if delta_t != 0: vel = delta_x/delta_t delta_v = vel - previous.vel acc = delta_v/delta_t self.dt = delta_t self.dx = delta_x self.acc = acc self.vel = vel return self
Computes the metrics of this point Computes and updates the dt, vel and acc attributes. Args: previous (:obj:`Point`): Point before Returns: :obj:`Point`: Self
juraj-google-style
def _get_state_cache_size_bytes(options): max_cache_memory_usage_mb = options.view_as(WorkerOptions).max_cache_memory_usage_mb experiments = options.view_as(DebugOptions).experiments or [] for experiment in experiments: if re.match('state_cache_size=', experiment): _LOGGER.warning('--experiments=state_cache_size=X is deprecated and will be removed in future releases.Please use --max_cache_memory_usage_mb=X to set the cache size for user state API and side inputs.') return int(re.match('state_cache_size=(?P<state_cache_size>.*)', experiment).group('state_cache_size')) << 20 return max_cache_memory_usage_mb << 20
Return the maximum size of state cache in bytes. Returns: an int indicating the maximum number of bytes to cache.
github-repos
def set_memcache_policy(self, func): if func is None: func = self.default_memcache_policy elif isinstance(func, bool): func = lambda unused_key, flag=func: flag self._memcache_policy = func
Set the memcache policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should be cached. May be None.
juraj-google-style
def group_structures(self, s_list, anonymous=False): if self._subset: raise ValueError("allow_subset cannot be used with" " group_structures") original_s_list = list(s_list) s_list = self._process_species(s_list) if anonymous: c_hash = lambda c: c.anonymized_formula else: c_hash = self._comparator.get_hash s_hash = lambda s: c_hash(s[1].composition) sorted_s_list = sorted(enumerate(s_list), key=s_hash) all_groups = [] for k, g in itertools.groupby(sorted_s_list, key=s_hash): unmatched = list(g) while len(unmatched) > 0: i, refs = unmatched.pop(0) matches = [i] if anonymous: inds = filter(lambda i: self.fit_anonymous(refs, unmatched[i][1]), list(range(len(unmatched)))) else: inds = filter(lambda i: self.fit(refs, unmatched[i][1]), list(range(len(unmatched)))) inds = list(inds) matches.extend([unmatched[i][0] for i in inds]) unmatched = [unmatched[i] for i in range(len(unmatched)) if i not in inds] all_groups.append([original_s_list[i] for i in matches]) return all_groups
Given a list of structures, use fit to group them by structural equality. Args: s_list ([Structure]): List of structures to be grouped anonymous (bool): Wheher to use anonymous mode. Returns: A list of lists of matched structures Assumption: if s1 == s2 but s1 != s3, than s2 and s3 will be put in different groups without comparison.
juraj-google-style
def _assert_not_running(self): if self.is_alive: raise Error(self._ad, 'Logcat thread is already running, cannot start another one.')
Asserts the logcat service is not running. Raises: Error, if the logcat service is running.
github-repos
def cmd_ssh_user(tar_aminame, inst_name): if (tar_aminame == 'Unknown'): tar_aminame = inst_name userlu = {'ubunt': 'ubuntu', 'debia': 'admin', 'fedor': 'root', 'cento': 'centos', 'openb': 'root'} usertemp = (['name'] + [value for (key, value) in list(userlu.items()) if (key in tar_aminame.lower())]) usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint('loginuser Calculated: ', username) return username
Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name.
codesearchnet
def decrease_exponent_to(self, new_exp): if (new_exp > self.exponent): raise ValueError(('New exponent %i should be more negative than old exponent %i' % (new_exp, self.exponent))) multiplied = (self * pow(EncodedNumber.BASE, (self.exponent - new_exp))) multiplied.exponent = new_exp return multiplied
Return an EncryptedNumber with same value but lower exponent. If we multiply the encoded value by :attr:`EncodedNumber.BASE` and decrement :attr:`exponent`, then the decoded value does not change. Thus we can almost arbitrarily ratchet down the exponent of an `EncryptedNumber` - we only run into trouble when the encoded integer overflows. There may not be a warning if this happens. When adding `EncryptedNumber` instances, their exponents must match. This method is also useful for hiding information about the precision of numbers - e.g. a protocol can fix the exponent of all transmitted `EncryptedNumber` instances to some lower bound(s). Args: new_exp (int): the desired exponent. Returns: EncryptedNumber: Instance with the same plaintext and desired exponent. Raises: ValueError: You tried to increase the exponent.
codesearchnet
def _evolve(self, state, qargs=None): state = self._format_state(state, density_matrix=True) if qargs is None: if state.shape[0] != self._input_dim: raise QiskitError( "QuantumChannel input dimension is not equal to state dimension." ) shape_in = self._input_dim * self._input_dim shape_out = (self._output_dim, self._output_dim) return np.reshape( np.dot(self._data, np.reshape(state, shape_in, order='F')), shape_out, order='F') return self._evolve_subsystem(state, qargs)
Evolve a quantum state by the QuantumChannel. Args: state (QuantumState): The input statevector or density matrix. qargs (list): a list of QuantumState subsystem positions to apply the operator on. Returns: DensityMatrix: the output quantum state as a density matrix. Raises: QiskitError: if the operator dimension does not match the specified QuantumState subsystem dimensions.
juraj-google-style
def deserialize_feature_column(config, custom_objects=None, columns_by_name=None): if isinstance(config, six.string_types): return config module_feature_column_classes = {cls.__name__: cls for cls in _FEATURE_COLUMNS} if columns_by_name is None: columns_by_name = {} cls, cls_config = _class_and_config_for_serialized_keras_object(config, module_objects=module_feature_column_classes, custom_objects=custom_objects, printable_module_name='feature_column_v2') if not issubclass(cls, fc_types.FeatureColumn): raise ValueError('Expected FeatureColumn class, instead found: {}'.format(cls)) new_instance = cls.from_config(cls_config, custom_objects=custom_objects, columns_by_name=columns_by_name) return columns_by_name.setdefault(_column_name_with_class_name(new_instance), new_instance)
Deserializes a `config` generated with `serialize_feature_column`. This method should only be used to deserialize parent FeatureColumns when implementing FeatureColumn.from_config(), else deserialize_feature_columns() is preferable. Returns a FeatureColumn for this config. Args: config: A Dict with the serialization of feature columns acquired by `serialize_feature_column`, or a string representing a raw column. custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). columns_by_name: A Dict[String, FeatureColumn] of existing columns in order to avoid duplication. Raises: ValueError if `config` has invalid format (e.g: expected keys missing, or refers to unknown classes). Returns: A FeatureColumn corresponding to the input `config`.
github-repos
def UsageText(component, trace=None, verbose=False): if trace: command = trace.GetCommand() needs_separating_hyphen_hyphen = trace.NeedsSeparatingHyphenHyphen() else: command = None needs_separating_hyphen_hyphen = False if not command: command = '' continued_command = command spec = inspectutils.GetFullArgSpec(component) metadata = decorators.GetMetadata(component) actions_grouped_by_kind = _GetActionsGroupedByKind(component, verbose=verbose) possible_actions = _GetPossibleActions(actions_grouped_by_kind) continuations = [] if possible_actions: continuations.append(_GetPossibleActionsUsageString(possible_actions)) availability_lines = _UsageAvailabilityLines(actions_grouped_by_kind) if callable(component): callable_items = _GetCallableUsageItems(spec, metadata) if callable_items: continuations.append(' '.join(callable_items)) elif trace: continuations.append(trace.separator) availability_lines.extend(_GetCallableAvailabilityLines(spec)) if continuations: continued_command += ' ' + ' | '.join(continuations) help_command = command + (' -- ' if needs_separating_hyphen_hyphen else ' ') + '--help' return f'Usage: {continued_command}\n{''.join(availability_lines)}\nFor detailed information on this command, run:\n {help_command}'
Returns usage text for the given component. Args: component: The component to determine the usage text for. trace: The Fire trace object containing all metadata of current execution. verbose: Whether to display the usage text in verbose mode. Returns: String suitable for display in an error screen.
github-repos
def from_string(cls, s, name=None, modules=None, active=None): r = cls(name=name, modules=modules, active=active) _parse_repp(s.splitlines(), r, None) return r
Instantiate a REPP from a string. Args: name (str, optional): the name of the REPP module modules (dict, optional): a mapping from identifiers to REPP modules active (iterable, optional): an iterable of default module activations
codesearchnet
def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value): UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename))) header = 'NCOLS %d\n' \ 'NROWS %d\n' \ 'XLLCENTER %f\n' \ 'YLLCENTER %f\n' \ 'CELLSIZE %f\n' \ 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1], geotransform[3] - (ysize - 0.5) * geotransform[1], geotransform[1], nodata_value) with open(filename, 'w', encoding='utf-8') as f: f.write(header) for i in range(0, ysize): for j in range(0, xsize): f.write('%s\t' % repr(data[i][j])) f.write('\n') f.close()
Output Raster to ASCII file. Args: filename: output ASCII filename. data: 2D array data. xsize: Col count. ysize: Row count. geotransform: geographic transformation. nodata_value: nodata_flow value.
juraj-google-style
def clip_and_copy_attack_outputs(self, attack_name, is_targeted): if is_targeted: self._targeted_attack_names.add(attack_name) else: self._attack_names.add(attack_name) attack_dir = os.path.join((self.targeted_attacks_output_dir if is_targeted else self.attacks_output_dir), attack_name) for fname in os.listdir(attack_dir): if (not (fname.endswith('.png') or fname.endswith('.jpg'))): continue image_id = fname[:(- 4)] if (image_id not in self.dataset_max_clip): continue image_max_clip = self.dataset_max_clip[image_id] image_min_clip = self.dataset_min_clip[image_id] adversarial_image = np.array(Image.open(os.path.join(attack_dir, fname)).convert('RGB')) clipped_adv_image = np.clip(adversarial_image, image_min_clip, image_max_clip) output_basename = '{0:08d}'.format(self._output_image_idx) self._output_image_idx += 1 self._output_to_attack_mapping[output_basename] = (attack_name, is_targeted, image_id) if is_targeted: self._targeted_attack_image_count += 1 else: self._attack_image_count += 1 Image.fromarray(clipped_adv_image).save(os.path.join(self.all_adv_examples_dir, (output_basename + '.png')))
Clips results of attack and copy it to directory with all images. Args: attack_name: name of the attack. is_targeted: if True then attack is targeted, otherwise non-targeted.
codesearchnet
def get_servo_temperature(self): data = [] data.append(9) data.append(self.servoid) data.append(RAM_READ_REQ) data.append(TEMPERATURE_RAM) data.append(BYTE2) send_data(data) rxdata = [] try: rxdata = SERPORT.read(13) return ord(rxdata[9]) except HerkulexError: raise HerkulexError('Could not communicate with motors')
Gets the current temperature of Herkulex Args: none Returns: int: the current temperature register of Herkulex Raises: SerialException: Error occured while opening serial port
codesearchnet
def max_validator(max_value): def validator(value): if (value > max_value): raise ValidationError('{} is not <= {}'.format(value, max_value)) return validator
Return validator function that ensures upper bound of a number. Result validation function will validate the internal value of resource instance field with the ``value >= min_value`` check. Args: max_value: maximum value for new validator
codesearchnet
def _GetAttributeScripts(self, attribute_data, dest_dir): script_dict = {} attribute_data = (attribute_data or {}) metadata_key = ('%s-script' % self.script_type) metadata_value = attribute_data.get(metadata_key) if metadata_value: self.logger.info('Found %s in metadata.', metadata_key) with tempfile.NamedTemporaryFile(mode='w', dir=dest_dir, delete=False) as dest: dest.write(metadata_value.lstrip()) script_dict[metadata_key] = dest.name metadata_key = ('%s-script-url' % self.script_type) metadata_value = attribute_data.get(metadata_key) if metadata_value: self.logger.info('Found %s in metadata.', metadata_key) script_dict[metadata_key] = self._DownloadScript(metadata_value, dest_dir) return script_dict
Retrieve the scripts from attribute metadata. Args: attribute_data: dict, the contents of the attributes metadata. dest_dir: string, the path to a directory for storing metadata scripts. Returns: dict, a dictionary mapping metadata keys to files storing scripts.
codesearchnet
def to(self, new_unit): return FloatWithUnit((self * self.unit.get_conversion_factor(new_unit)), unit_type=self._unit_type, unit=new_unit)
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of units of each type. Args: new_unit: New unit type. Returns: A FloatWithUnit object in the new units. Example usage: >>> e = Energy(1.1, "eV") >>> e = Energy(1.1, "Ha") >>> e.to("eV") 29.932522246 eV
codesearchnet
def get_sparsity_modes(model_object): if not model_object or not model_object.metadata: return [] result = set() for subgraph in model_object.subgraphs: for tensor in subgraph.tensors: if not tensor.sparsity: continue if tensor.sparsity.blockMap.size == 0 or not tensor.sparsity.blockMap: result.add(conversion_metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY) else: result.add(conversion_metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY) return list(result)
Get sparsity modes used in a tflite model. The sparsity modes are listed in conversion_metadata.fbs file. Args: model_object: A tflite model in object form. Returns: The list of sparsity modes used in the model.
github-repos
def write(self, x: int, y: int, text: str, transposed_text: 'Optional[str]'=None): entry = self.entries.get((x, y), _DiagramText('', '')) self.entries[(x, y)] = _DiagramText((entry.text + text), (entry.transposed_text + (transposed_text if transposed_text else text)))
Adds text to the given location. Args: x: The column in which to write the text. y: The row in which to write the text. text: The text to write at location (x, y). transposed_text: Optional text to write instead, if the text diagram is transposed.
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): win_registry_reader = FileObjectWinRegistryFileReader() try: registry_file = win_registry_reader.Open(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning('unable to open Windows Registry file with error: {0!s}'.format(exception)) return win_registry = dfwinreg_registry.WinRegistry() key_path_prefix = win_registry.GetRegistryFileMapping(registry_file) registry_file.SetKeyPathPrefix(key_path_prefix) root_key = registry_file.GetRootKey() if (not root_key): return registry_find_specs = getattr(parser_mediator.artifacts_filter_helper, 'registry_find_specs', None) if (not registry_find_specs): try: self._ParseRecurseKeys(parser_mediator, root_key) except IOError as exception: parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception)) else: artifacts_filter_helper = artifact_filters.ArtifactDefinitionsFilterHelper if (not artifacts_filter_helper.CheckKeyCompatibility(key_path_prefix)): logger.warning('Artifacts filters are not supported for Windows Registry file with key path prefix: "{0:s}".'.format(key_path_prefix)) else: try: win_registry.MapFile(key_path_prefix, registry_file) self._ParseKeysFromFindSpecs(parser_mediator, win_registry, registry_find_specs) except IOError as exception: parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))
Parses a Windows Registry file-like object. Args: parser_mediator (ParserMediator): parser mediator. file_object (dfvfs.FileIO): a file-like object.
codesearchnet
def copy_image_on_background(image, color=WHITE): background = Image.new("RGB", image.size, color) background.paste(image, mask=image.split()[3]) return background
Create a new image by copying the image on a *color* background. Args: image (PIL.Image.Image): Image to copy color (tuple): Background color usually WHITE or BLACK Returns: PIL.Image.Image
juraj-google-style
def _fill_table_entry(self, row, col): self.observation_table[row, col] = self._membership_query(row + col)
Fill an entry of the observation table. Args: row (str): The row of the observation table col (str): The column of the observation table Returns: None
juraj-google-style
def _read_csv_from_file_pandas_on_ray(cls, filepath, kwargs={}): names = kwargs.get("names", None) index_col = kwargs.get("index_col", None) if names is None: kwargs["index_col"] = None names = pandas.read_csv( file_open(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0) ).columns kwargs["index_col"] = index_col empty_pd_df = pandas.read_csv( file_open(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0) ) column_names = empty_pd_df.columns skipfooter = kwargs.get("skipfooter", None) skiprows = kwargs.pop("skiprows", None) parse_dates = kwargs.pop("parse_dates", False) partition_kwargs = dict( kwargs, header=None, names=names, skipfooter=0, skiprows=None, parse_dates=parse_dates, ) with file_open(filepath, "rb") as f: prefix = b"" if kwargs.get("encoding", None) is not None: prefix = f.readline() partition_kwargs["skiprows"] = 1 f.seek(0, os.SEEK_SET) prefix_id = ray.put(prefix) partition_kwargs_id = ray.put(partition_kwargs) kwargs["skiprows"] = skiprows cls._skip_header(f, kwargs) partition_ids = [] index_ids = [] total_bytes = file_size(f) num_parts = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(column_names), num_parts) chunk_size = max(1, (total_bytes - f.tell()) while f.tell() < total_bytes: start = f.tell() f.seek(chunk_size, os.SEEK_CUR) f.readline() partition_id = cls.read_csv_remote_task._remote( args=( filepath, num_splits, start, f.tell(), partition_kwargs_id, prefix_id, ), num_return_vals=num_splits + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) if index_col is None: new_index = pandas.RangeIndex(sum(ray.get(index_ids))) else: new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids) new_index = ray.get(new_index_ids) if parse_dates is not None: if isinstance(parse_dates, list) and isinstance(parse_dates[0], list): for group in parse_dates: new_col_name = "_".join(group) column_names = column_names.drop(group).insert(0, new_col_name) elif isinstance(parse_dates, dict): for new_col_name, group in parse_dates.items(): column_names = column_names.drop(group).insert(0, new_col_name) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, column_names ) if skipfooter: new_query_compiler = new_query_compiler.drop( new_query_compiler.index[-skipfooter:] ) if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1: return new_query_compiler[new_query_compiler.columns[0]] return new_query_compiler
Constructs a DataFrame from a CSV file. Args: filepath (str): path to the CSV file. npartitions (int): number of partitions for the DataFrame. kwargs (dict): args excluding filepath provided to read_csv. Returns: DataFrame or Series constructed from CSV file.
juraj-google-style
def resize_image(buf, width, height, num_channels, new_width, new_height): new_size = new_width * new_height * num_channels input_pixels = ffi.from_buffer(buf) output_pixels = ffi.new('unsigned char[]', new_size) result = lib.stbir_resize_uint8( ffi.cast('unsigned char*', input_pixels), width, height, 0, output_pixels, new_width, new_height, 0, num_channels ) if not result: raise ResizeError() return ffi.buffer(output_pixels, new_size)
Resize an image Args: buf (Buffer): Buffer coming from `load_image` width (int): Width of `buf` height (int): Height of `buf` num_channels (int): Number of channels in `buf` (RGBA=4) new_width (int): Desired width new_height (int): Desired height Returns: Buffer: Resized image Raises: ResizeError: If an error occurs during resize
juraj-google-style
def animate_cli(animation_, step, event): while True: time.sleep(step) frame = next(animation_) sys.stdout.write(frame) sys.stdout.flush() if event.is_set(): break sys.stdout.write(animation_.get_erase_frame()) sys.stdout.flush() animation_.reset()
Print out the animation cycle to stdout. This function is for use with synchronous functions and must be run in a thread. Args: animation_ (generator): A generator that produces strings for the animation. Should be endless. step (float): Seconds between each animation frame.
juraj-google-style
def get_template(self, template_id): request = self._get_request() return request.get(self.TEMPLATE_GET_URL + template_id)
Gets a Template which includes a list of Accounts that can access it Args: template_id (str): The id of the template to retrieve Returns: A Template object
juraj-google-style
def new_reviewer(self, name, anomalous=None): n = self._reviewer_cls(self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n
Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance.
codesearchnet
def send_rpc_request(self, request):
Sends the JSON RPC request to the server and gets a response. Note that the request and response are both in string format. So if the connection with server provides interfaces in bytes format, please transform them to string in the implementation of this function. Args: request: str, a string of the RPC request. Returns: A string of the RPC response. Raises: errors.ProtocolError: something went wrong when exchanging data with the server.
github-repos
def assemble_buffer(self, buf_header, buf_payload): if (self.header.get('num_buffers', 0) <= len(self._buffers)): raise ProtocolError(('too many buffers received expecting ' + str(self.header['num_buffers']))) self._buffers.append((buf_header, buf_payload))
Add a buffer header and payload that we read from the socket. This differs from add_buffer() because we're validating vs. the header's num_buffers, instead of filling in the header. Args: buf_header (``JSON``) : a buffer header buf_payload (``JSON`` or bytes) : a buffer payload Returns: None Raises: ProtocolError
codesearchnet
def upload_file(self, url, file, callback=None, extra_headers={}): extra_headers = extra_headers.copy() response = None if os.stat(file.name).st_size == 0: raise CommError("%s is an empty file" % file.name) try: progress = Progress(file, callback=callback) response = requests.put( url, data=progress, headers=extra_headers) response.raise_for_status() except requests.exceptions.RequestException as e: total = progress.len status = self._status_request(url, total) if status.status_code in (308, 408, 500, 502, 503, 504): util.sentry_reraise(retry.TransientException(exc=e)) else: util.sentry_reraise(e) return response
Uploads a file to W&B with failure resumption Args: url (str): The url to download file (str): The path to the file you want to upload callback (:obj:`func`, optional): A callback which is passed the number of bytes uploaded since the last time it was called, used to report progress Returns: The requests library response object
juraj-google-style
def get_padding(x, padding_value=0): with tf.name_scope('padding'): return tf.to_float(tf.equal(x, padding_value))
Return float tensor representing the padding values in x. Args: x: int tensor with any shape padding_value: int value that Returns: flaot tensor with same shape as x containing values 0 or 1. 0 -> non-padding, 1 -> padding
codesearchnet
def _get_tensors(graph, signature_def_tensor_names=None, user_tensor_names=None): tensors = [] if user_tensor_names: user_tensor_names = sorted(user_tensor_names) tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names) elif signature_def_tensor_names: tensors = [graph.get_tensor_by_name(name) for name in sorted(signature_def_tensor_names)] else: raise ValueError('Specify either signature_def_tensor_names or user_tensor_names') return tensors
Gets the tensors associated with the tensor names. Either signature_def_tensor_names or user_tensor_names should be provided. If the user provides tensors, the tensors associated with the user provided tensor names are provided. Otherwise, the tensors associated with the names in the SignatureDef are provided. Args: graph: GraphDef representing graph. signature_def_tensor_names: Tensor names stored in either the inputs or outputs of a SignatureDef. (default None) user_tensor_names: Tensor names provided by the user. (default None) Returns: List of tensors. Raises: ValueError: signature_def_tensors and user_tensor_names are undefined or empty. user_tensor_names are not valid.
github-repos
def generate_nearest_neighbour_lookup_table( self ): self.jump_probability = {} for site_label_1 in self.connected_site_pairs: self.jump_probability[ site_label_1 ] = {} for site_label_2 in self.connected_site_pairs[ site_label_1 ]: self.jump_probability[ site_label_1 ][ site_label_2 ] = {} for coordination_1 in range( self.max_coordination_per_site[ site_label_1 ] ): self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ] = {} for coordination_2 in range( 1, self.max_coordination_per_site[ site_label_2 ] + 1 ): self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ][ coordination_2 ] = self.relative_probability( site_label_1, site_label_2, coordination_1, coordination_2 )
Construct a look-up table of relative jump probabilities for a nearest-neighbour interaction Hamiltonian. Args: None. Returns: None.
juraj-google-style
def sample_static_prior(self, samples, batch_size, fixed=False): dist = self.static_prior() if fixed: sample = (dist.sample((samples, 1)) + tf.zeros([batch_size, 1])) else: sample = dist.sample((samples, batch_size)) return (sample, dist)
Sample the static latent prior. Args: samples: Number of samples to draw from the latent distribution. batch_size: Number of sequences to sample. fixed: Boolean for whether or not to share the same random sample across all sequences. Returns: A tuple of a sample tensor of shape [samples, batch_size, latent_size], and a MultivariateNormalDiag distribution from which the tensor was sampled, with event shape [latent_size], and batch shape [].
codesearchnet
def read(self, size=(- 1)): self._check_open() if (not self._remaining()): return '' data_list = [] while True: remaining = self._buffer.remaining() if ((size >= 0) and (size < remaining)): data_list.append(self._buffer.read(size)) self._offset += size break else: size -= remaining self._offset += remaining data_list.append(self._buffer.read()) if (self._buffer_future is None): if ((size < 0) or (size >= self._remaining())): needs = self._remaining() else: needs = size data_list.extend(self._get_segments(self._offset, needs)) self._offset += needs break if self._buffer_future: self._buffer.reset(self._buffer_future.get_result()) self._buffer_future = None if (self._buffer_future is None): self._request_next_buffer() return ''.join(data_list)
Read data from RAW file. Args: size: Number of bytes to read as integer. Actual number of bytes read is always equal to size unless EOF is reached. If size is negative or unspecified, read the entire file. Returns: data read as str. Raises: IOError: When this buffer is closed.
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if not self.add_bos_token: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def disease_terms(self, hgnc_id=None): query = {} if hgnc_id: LOG.debug("Fetching all diseases for gene %s", hgnc_id) query['genes'] = hgnc_id else: LOG.info("Fetching all disease terms") return list(self.disease_term_collection.find(query))
Return all disease terms that overlaps a gene If no gene, return all disease terms Args: hgnc_id(int) Returns: iterable(dict): A list with all disease terms that match
juraj-google-style
def list_address(self, id=None, endpoint=None): return self._call_endpoint(LIST_ADDRESS, id=id, endpoint=endpoint)
Lists all the addresses in the current wallet. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
juraj-google-style