code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def read_excitation_energies(self): transitions = list() with zopen(self.filename, 'r') as f: line = f.readline() td = False while (line != ''): if re.search('^\\sExcitation energies and oscillator strengths:', line): td = True if td: if re.search('^\\sExcited State\\s*\\d', line): val = [float(v) for v in float_patt.findall(line)] transitions.append(tuple(val[0:3])) line = f.readline() return transitions
Read a excitation energies after a TD-DFT calculation. Returns: A list: A list of tuple for each transition such as [(energie (eV), lambda (nm), oscillatory strength), ... ]
codesearchnet
def __init__(self, callback): super(ThreadedXMLRPCServer, self).__init__(callback) self._rpc_thread = None self._xmlrpc_server = None
Initialize a threaded RPC server. Args: callback (function): callback function to invoke on get status RPC request.
juraj-google-style
def load_filename(self, filename, index=None): filename = str(filename) if index is None: index = self._get_tab_index() page = self.pages[index] self.load_dir, _ = os.path.split(filename) clss = page.clss_load if len(clss) == 1: f = clss[0]() f.load(filename) else: f = f311.load_with_classes(filename, page.clss_load) if f is None: raise RuntimeError("Could not load '{0!s}'".format(filename)) self.load(f, index)
Loads file given filename Args: filename: index: tab index to load file into. If not passed, loads into current tab
juraj-google-style
def orient_averaged_adaptive(tm): S = np.zeros((2,2), dtype=complex) Z = np.zeros((4,4)) def Sfunc(beta, alpha, i, j, real): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) s = S_ang[i,j].real if real else S_ang[i,j].imag return s * tm.or_pdf(beta) ind = range(2) for i in ind: for j in ind: S.real[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0 S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0 def Zfunc(beta, alpha, i, j): (S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) return Z_ang[i,j] * tm.or_pdf(beta) ind = range(4) for i in ind: for j in ind: Z[i,j] = dblquad(Zfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0 return (S, Z)
Compute the T-matrix using variable orientation scatterers. This method uses a very slow adaptive routine and should mainly be used for reference purposes. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance Returns: The amplitude (S) and phase (Z) matrices.
juraj-google-style
class GroundingDinoImageLoss(ImageLoss): def __init__(self, matcher, focal_alpha, losses): nn.Module.__init__(self) self.matcher = matcher self.focal_alpha = focal_alpha self.losses = losses def _get_target_classes_one_hot(self, outputs, targets, indices): logits = outputs['logits'] class_labels = torch.cat([target['class_labels'][J] + len(outputs['label_maps'][i]) if i > 0 else target['class_labels'][J] for i, (target, (_, J)) in enumerate(zip(targets, indices))]) label_maps = torch.cat(outputs['label_maps'], dim=0) idx = self._get_source_permutation_idx(indices) target_classes_onehot = torch.zeros_like(logits, device=logits.device, dtype=torch.long) target_classes_onehot[idx] = label_maps[class_labels].to(torch.long) return target_classes_onehot def loss_labels(self, outputs, targets, indices, num_boxes): if 'logits' not in outputs: raise KeyError('No logits were found in the outputs') if 'text_mask' not in outputs: raise KeyError('No text_mask were found in the outputs') target_classes_onehot = self._get_target_classes_one_hot(outputs, targets, indices) source_logits = outputs['logits'] text_mask = outputs['text_mask'] source_logits = torch.masked_select(source_logits, text_mask) target_classes_onehot = torch.masked_select(target_classes_onehot, text_mask) target_classes_onehot = target_classes_onehot.float() loss_ce = sigmoid_focal_loss(inputs=source_logits, targets=target_classes_onehot, num_boxes=num_boxes, alpha=self.focal_alpha, gamma=2) losses = {'loss_ce': loss_ce} return losses
This class computes the losses for `GroundingDinoForObjectDetection`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`GroundingDinoHungarianMatcher`): Module able to compute a matching between targets and proposals. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses.
github-repos
def __init__(self, minimum=None, maximum=None): super(IntegerTypeChecker, self).__init__(base_type=int) self.minimum = minimum self.maximum = maximum
Initialization method. Args: minimum (int): a minimum value (included). maximum (int): a maximum value (included).
juraj-google-style
def fill_datetime(self): if (not self.filled): raise SlotNotFilledError(('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key))) return self._fill_datetime
Returns when the slot was filled. Returns: A datetime.datetime. Raises: SlotNotFilledError if the value hasn't been filled yet.
codesearchnet
def shannon_entropy(time_series): if (not isinstance(time_series, str)): time_series = list(time_series) data_set = list(set(time_series)) freq_list = [] for entry in data_set: counter = 0.0 for i in time_series: if (i == entry): counter += 1 freq_list.append((float(counter) / len(time_series))) ent = 0.0 for freq in freq_list: ent += (freq * np.log2(freq)) ent = (- ent) return ent
Return the Shannon Entropy of the sample data. Args: time_series: Vector or string of the sample data Returns: The Shannon Entropy as float value
codesearchnet
def rank_internal(input, name=None, optimize=True): with ops.name_scope(name, 'Rank', [input]) as name: if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)): return gen_array_ops.size(input.dense_shape, name=name) else: input = ops.convert_to_tensor(input) input_shape = input.get_shape() if optimize and input_shape.ndims is not None: return constant(input_shape.ndims, dtypes.int32, name=name) return gen_array_ops.rank(input, name=name)
Returns the rank of a tensor. Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). optimize: if true, encode the rank as a constant when possible. Returns: A `Tensor` of type `int32`.
github-repos
def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)
Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax") >>> encoder_outputs = model.encode(**inputs) ```
github-repos
def create_bagit_stream(dir_name, payload_info_list): zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) _add_path(dir_name, payload_info_list) payload_byte_count, payload_file_count = _add_payload_files( zip_file, payload_info_list ) tag_info_list = _add_tag_files( zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count ) _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list) _add_tag_manifest_file(zip_file, dir_name, tag_info_list) return zip_file
Create a stream containing a BagIt zip archive. Args: dir_name : str The name of the root directory in the zip file, under which all the files are placed (avoids "zip bombs"). payload_info_list: list List of payload_info_dict, each dict describing a file. - keys: pid, filename, iter, checksum, checksum_algorithm - If the filename is None, the pid is used for the filename.
juraj-google-style
def process_document_events(events, use_buffers=True): json_events = [] references = set() buffers = [] if use_buffers else None for event in events: json_events.append(event.generate(references, buffers)) json = { 'events' : json_events, 'references' : references_json(references), } return serialize_json(json), buffers if use_buffers else []
Create a JSON string describing a patch to be applied as well as any optional buffers. Args: events : list of events to be translated into patches Returns: str, list : JSON string which can be applied to make the given updates to obj as well as any optional buffers
juraj-google-style
def get_distance_and_image(self, frac_coords1: Vector3Like, frac_coords2: Vector3Like, jimage: Optional[Union[(List[int], np.ndarray)]]=None) -> Tuple[(float, np.ndarray)]: if (jimage is None): (v, d2) = pbc_shortest_vectors(self, frac_coords1, frac_coords2, return_d2=True) fc = ((self.get_fractional_coords(v[0][0]) + frac_coords1) - frac_coords2) fc = np.array(np.round(fc), dtype=np.int) return (np.sqrt(d2[(0, 0)]), fc) jimage = np.array(jimage) mapped_vec = self.get_cartesian_coords(((jimage + frac_coords2) - frac_coords1)) return (np.linalg.norm(mapped_vec), jimage)
Gets distance between two frac_coords assuming periodic boundary conditions. If the index jimage is not specified it selects the j image nearest to the i atom and returns the distance and jimage indices in terms of lattice vector translations. If the index jimage is specified it returns the distance between the frac_coords1 and the specified jimage of frac_coords2, and the given jimage is also returned. Args: fcoords1 (3x1 array): Reference fcoords to get distance from. fcoords2 (3x1 array): fcoords to get distance from. jimage (3x1 array): Specific periodic image in terms of lattice translations, e.g., [1,0,0] implies to take periodic image that is one a-lattice vector away. If jimage is None, the image that is nearest to the site is found. Returns: (distance, jimage): distance and periodic lattice translations of the other site for which the distance applies. This means that the distance between frac_coords1 and (jimage + frac_coords2) is equal to distance.
codesearchnet
def get_default_backend_config(appdirs): return { 'store': 'sqlalchemy', 'day_start': datetime.time(5, 30, 0), 'fact_min_delta': 1, 'tmpfile_path': os.path.join(appdirs.user_data_dir, '{}.tmp'.format(appdirs.appname)), 'db_engine': 'sqlite', 'db_path': os.path.join(appdirs.user_data_dir, '{}.sqlite'.format(appdirs.appname)), }
Return a default config dictionary. Args: appdirs (HamsterAppDirs): ``HamsterAppDirs`` instance encapsulating the apps details. Returns: dict: Dictionary with a default configuration. Note: Those defaults are independent of the particular config-store.
juraj-google-style
def get(self, context_id, address_list): if (context_id not in self._contexts): return [] for add in address_list: if (not self.address_is_valid(address=add)): raise AuthorizationException(address=add) context = self._contexts[context_id] addresses_in_ctx = [add for add in address_list if (add in context)] addresses_not_in_ctx = list((set(address_list) - set(addresses_in_ctx))) values = context.get(addresses_in_ctx) values_list = list(zip(addresses_in_ctx, values)) if addresses_not_in_ctx: for address in addresses_not_in_ctx: context.validate_read(address) try: (address_values, reads) = self._find_address_values_in_chain(base_contexts=[context_id], addresses_to_find=addresses_not_in_ctx) except KeyError: return [] values_list.extend(address_values) if reads: tree = MerkleDatabase(self._database, context.merkle_root) add_values = [] for add in reads: value = None try: value = tree.get(add) except KeyError: pass add_values.append((add, value)) values_list.extend(add_values) values_list.sort(key=(lambda x: address_list.index(x[0]))) return values_list
Get the values associated with list of addresses, for a specific context referenced by context_id. Args: context_id (str): the return value of create_context, referencing a particular context. address_list (list): a list of address strs Returns: values_list (list): a list of (address, value) tuples Raises: AuthorizationException: Raised when an address in address_list is not authorized either by not being in the inputs for the txn associated with this context, or it is under a namespace but the characters that are under the namespace are not valid address characters.
codesearchnet
def ResolveFlats(dem, in_place=False): if (type(dem) is not rdarray): raise Exception('A richdem.rdarray or numpy.ndarray is required!') if (not in_place): dem = dem.copy() _AddAnalysis(dem, 'ResolveFlats(dem, in_place={in_place})'.format(in_place=in_place)) demw = dem.wrap() _richdem.rdResolveFlatsEpsilon(demw) dem.copyFromWrapped(demw) if (not in_place): return dem
Attempts to resolve flats by imposing a local gradient Args: dem (rdarray): An elevation model in_place (bool): If True, the DEM is modified in place and there is no return; otherwise, a new, altered DEM is returned. Returns: DEM modified such that all flats drain.
codesearchnet
def __init__(self, var_config, scope_config): self._substs = {} self._var_config = var_config self._scope_config = scope_config for var_id, var_value in iteritems(var_config): key = "%%{var}%%".format(var=var_id) self._substs[key] = str(var_value) for scope_id, var_config in iteritems(scope_config): for var_id, var_value in iteritems(var_config): key = "%%{scope}.{var}%%".format(scope=scope_id, var=var_id) self._substs[key] = str(var_value)
Initializes the substitution environment. Args: var_config: A configuration (concrete values) of pattern variables. scope_config: A configuration (concrete values) of pattern scopes.
juraj-google-style
def get_actions(self, parent_environ=None): interp = Python(target_environ={}, passive=True) executor = self._create_executor(interp, parent_environ) self._execute(executor) return executor.actions
Get the list of rex.Action objects resulting from interpreting this context. This is provided mainly for testing purposes. Args: parent_environ Environment to interpret the context within, defaults to os.environ if None. Returns: A list of rex.Action subclass instances.
juraj-google-style
def get_heading_encoding(response): encoding = wpull.protocol.http.util.parse_charset( response.fields.get('content-type', '')) if encoding: return wpull.string.normalize_codec_name(encoding) else: return None
Return the document encoding from a HTTP header. Args: response (Response): An instance of :class:`.http.Response`. Returns: ``str``, ``None``: The codec name.
juraj-google-style
def request(self, session=None): try: from .tcex_request import TcExRequest r = TcExRequest(self, session) if ((session is None) and self.default_args.tc_proxy_external): self.log.info('Using proxy server for external request {}:{}.'.format(self.default_args.tc_proxy_host, self.default_args.tc_proxy_port)) r.proxies = self.proxies return r except ImportError as e: self.handle_error(105, [e])
Return an instance of the Request Class. A wrapper on the Python Requests module that provides a different interface for creating requests. The session property of this instance has built-in logging, session level retries, and preconfigured proxy configuration. Returns: (object): An instance of Request Class
codesearchnet
def build_variant_query(self, query=None, category='snv', variant_type=['clinical']): query = (query or {}) mongo_variant_query = {} LOG.debug(('Building a mongo query for %s' % query)) if query.get('hgnc_symbols'): mongo_variant_query['hgnc_symbols'] = {'$in': query['hgnc_symbols']} mongo_variant_query['variant_type'] = {'$in': variant_type} mongo_variant_query['category'] = category rank_score = (query.get('rank_score') or 15) mongo_variant_query['rank_score'] = {'$gte': rank_score} LOG.debug(('Querying %s' % mongo_variant_query)) return mongo_variant_query
Build a mongo query across multiple cases. Translate query options from a form into a complete mongo query dictionary. Beware that unindexed queries against a large variant collection will be extremely slow. Currently indexed query options: hgnc_symbols rank_score variant_type category Args: query(dict): A query dictionary for the database, from a query form. category(str): 'snv', 'sv', 'str' or 'cancer' variant_type(str): 'clinical' or 'research' Returns: mongo_query : A dictionary in the mongo query format.
codesearchnet
def period_start_day(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `period_start_day`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `period_start_day`') self._period_start_day = value
Corresponds to IDD Field `period_start_day` Args: value (str): value for IDD Field `period_start_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def loss_labels(self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array]) -> Dict[str, Tensor]: pred_logits = class_queries_logits batch_size, num_queries, _ = pred_logits.shape criterion = nn.CrossEntropyLoss(weight=self.empty_weight) idx = self._get_predictions_permutation_indices(indices) target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)]) target_classes = torch.full((batch_size, num_queries), fill_value=self.num_classes, dtype=torch.int64, device=pred_logits.device) target_classes[idx] = target_classes_o pred_logits_transposed = pred_logits.transpose(1, 2) loss_ce = criterion(pred_logits_transposed, target_classes) losses = {'loss_cross_entropy': loss_ce} return losses
Compute the losses related to the labels using cross entropy. Args: class_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, num_labels` class_labels (`List[torch.Tensor]`): List of class labels of shape `(labels)`. indices (`Tuple[np.array])`: The indices computed by the Hungarian matcher. Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key: - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
github-repos
def ensure_dir(path): dirpath = os.path.dirname(path) if dirpath and not os.path.exists(dirpath): os.makedirs(dirpath)
Ensure directory exists. Args: path(str): dir path
juraj-google-style
def normalize_whitespace(text): return re.sub('\\s+', ' ', text, flags=re.UNICODE).strip()
Returns the given text with outer whitespace removed and inner whitespace collapsed. Args: text (str): The text to normalize. Returns: str: The normalized text.
codesearchnet
def group_device_names(devices, group_size): num_devices = len(devices) if group_size > num_devices: raise ValueError( "only %d devices, but group_size=%d" % (num_devices, group_size)) num_groups = ( num_devices (num_devices % group_size != 0) else 0)) groups = [[] for i in range(num_groups)] for i in range(0, num_groups * group_size): groups[i % num_groups].append(devices[i % num_devices]) return groups
Group device names into groups of group_size. Args: devices: list of strings naming devices. group_size: int >= 1 Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) % group_size = 0 then each device will appear exactly once. Raises: ValueError: group_size > len(devices)
juraj-google-style
def _random_stateless_uniform(shape: types.IntTensor, num_digits: types.IntTensor, seed: int, validate_args: bool=False, dtype: tf.DType=None, name: str=None) -> types.IntTensor: with tf.name_scope(name or 'random_stateless_uniform'): dtype = dtype or tf.int32 shape = tf.convert_to_tensor(shape, dtype=dtype, name='dim') num_digits = tf.convert_to_tensor(num_digits, dtype=dtype, name='num_digits') control_deps = [] if validate_args: control_deps.append(tf.debugging.assert_positive(shape, message='shape must be positive')) control_deps.append(tf.debugging.assert_positive(num_digits, message='num_digits must be positive')) with tf.control_dependencies(control_deps): minval = tf.cast(utils.exp2(num_digits - 1), dtype=dtype) maxval = tf.cast(utils.exp2(num_digits), dtype=dtype) return tf.random.stateless_uniform(shape, seed, minval=minval, maxval=maxval, dtype=dtype)
Returns a `Tensor` drawn from a uniform distribution with a given `shape`. Args: shape: Positive scalar `Tensor` of integers with rank 1. The shape of the returned `Tensor`. num_digits: Positive scalar `Tensor` of integers with rank 0. the base-2 precision of the points which can be sampled from `generating_matrices`. seed: Positive scalar `Tensor` with shape [2] and dtype `int32` used as seed for the random generator. validate_args: Python `bool` indicating whether to validate arguments. Default value: `False`. dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either `tf.int32` or `tf.int64`). Default value: `None` which maps to `tf.int32`. name: Python `str` name prefixed to ops created by this function. Default value: `None` which maps to `random_stateless_uniform`. Returns: A `Tensor` with the requested `shape`.
github-repos
def post_process_depth_estimation(self, outputs: 'DepthProDepthEstimatorOutput', target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]]=None) -> Dict[str, List[TensorType]]: requires_backends(self, 'torch') predicted_depth = outputs.predicted_depth fov = outputs.field_of_view batch_size = len(predicted_depth) if target_sizes is not None and batch_size != len(target_sizes): raise ValueError('Make sure that you pass in as many fov values as the batch dimension of the predicted depth') results = [] fov = [None] * batch_size if fov is None else fov target_sizes = [None] * batch_size if target_sizes is None else target_sizes for depth, fov_value, target_size in zip(predicted_depth, fov, target_sizes): focal_length = None if target_size is not None: if fov_value is not None: width = target_size[1] focal_length = 0.5 * width / torch.tan(0.5 * torch.deg2rad(fov_value)) depth = depth * width / focal_length depth = torch.nn.functional.interpolate(input=depth.unsqueeze(0).unsqueeze(1), size=target_size, mode=pil_torch_interpolation_mapping[self.resample].value).squeeze() depth = 1.0 / torch.clamp(depth, min=0.0001, max=10000.0) results.append({'predicted_depth': depth, 'field_of_view': fov_value, 'focal_length': focal_length}) return results
Post-processes the raw depth predictions from the model to generate final depth predictions which is caliberated using the field of view if provided and resized to specified target sizes if provided. Args: outputs ([`DepthProDepthEstimatorOutput`]): Raw outputs of the model. target_sizes (`Optional[Union[TensorType, List[Tuple[int, int]], None]]`, *optional*, defaults to `None`): Target sizes to resize the depth predictions. Can be a tensor of shape `(batch_size, 2)` or a list of tuples `(height, width)` for each image in the batch. If `None`, no resizing is performed. Returns: `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions, and field of view (degrees) and focal length (pixels) if `field_of_view` is given in `outputs`. Raises: `ValueError`: If the lengths of `predicted_depths`, `fovs`, or `target_sizes` are mismatched.
github-repos
def _handle_stop_workflow(self, request): self._stop_workflow = True for (name, dag) in self._dags_running.items(): if (name not in self._stop_dags): self._stop_dags.append(name) return Response(success=True, uid=request.uid)
The handler for the stop_workflow request. The stop_workflow request adds all running dags to the list of dags that should be stopped and prevents new dags from being started. The dags will then stop queueing new tasks, which will terminate the dags and in turn the workflow. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if the dags were added successfully to the list of dags that should be stopped.
codesearchnet
def initialize(self, table): check_table_dtypes(table, self.key_dtype, self.value_dtype) with ops.name_scope(self._name, 'text_file_init', (table.resource_handle,)): filename = ops.convert_to_tensor(self._filename, dtypes.string, name='asset_filepath') init_op = gen_lookup_ops.initialize_table_from_text_file_v2(table.resource_handle, filename, self._key_index, self._value_index, -1 if self._vocab_size is None else self._vocab_size, self._delimiter, self._offset) ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) if not context.executing_eagerly() and constant_op.is_constant(filename): ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename) return init_op
Initializes the table from a text file. Args: table: The table to be initialized. Returns: The operation that initializes the table. Raises: TypeError: when the keys and values data types do not match the table key and value data types.
github-repos
def potential_jumps( self ): jumps = [] if self.number_of_occupied_sites <= self.number_of_sites / 2: for occupied_site in self.occupied_sites(): unoccupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in occupied_site.neighbours ] if not site.is_occupied ] for vacant_site in unoccupied_neighbours: jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) ) else: for vacant_site in self.vacant_sites(): occupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in vacant_site.neighbours ] if site.is_occupied ] for occupied_site in occupied_neighbours: jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) ) return jumps
All nearest-neighbour jumps not blocked by volume exclusion (i.e. from occupied to neighbouring unoccupied sites). Args: None Returns: (List(Jump)): List of possible jumps.
juraj-google-style
def from_event(cls, ion_event): if (ion_event.value is not None): (args, kwargs) = cls._to_constructor_args(ion_event.value) else: (args, kwargs) = ((), {}) value = cls(*args, **kwargs) value.ion_event = ion_event value.ion_type = ion_event.ion_type value.ion_annotations = ion_event.annotations return value
Constructs the given native extension from the properties of an event. Args: ion_event (IonEvent): The event to construct the native value from.
codesearchnet
def HandleExceptionsAndRebuildHttpConnections(retry_args): retry_after = None if isinstance(retry_args.exc, (http_client.BadStatusLine, http_client.IncompleteRead, http_client.ResponseNotReady)): logging.debug('Caught HTTP error %s, retrying: %s', type(retry_args.exc).__name__, retry_args.exc) elif isinstance(retry_args.exc, socket.error): logging.debug('Caught socket error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, socket.gaierror): logging.debug( 'Caught socket address error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, socket.timeout): logging.debug( 'Caught socket timeout error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, httplib2.ServerNotFoundError): logging.debug( 'Caught server not found error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, ValueError): logging.debug('Response content was invalid (%s), retrying', retry_args.exc) elif (isinstance(retry_args.exc, TokenRefreshError) and hasattr(retry_args.exc, 'status') and (retry_args.exc.status == TOO_MANY_REQUESTS or retry_args.exc.status >= 500)): logging.debug( 'Caught transient credential refresh error (%s), retrying', retry_args.exc) elif isinstance(retry_args.exc, exceptions.RequestError): logging.debug('Request returned no response, retrying') elif isinstance(retry_args.exc, exceptions.BadStatusCodeError): logging.debug('Response returned status %s, retrying', retry_args.exc.status_code) elif isinstance(retry_args.exc, exceptions.RetryAfterError): logging.debug('Response returned a retry-after header, retrying') retry_after = retry_args.exc.retry_after else: raise retry_args.exc RebuildHttpConnections(retry_args.http) logging.debug('Retrying request to url %s after exception %s', retry_args.http_request.url, retry_args.exc) time.sleep( retry_after or util.CalculateWaitForRetry( retry_args.num_retries, max_wait=retry_args.max_retry_wait))
Exception handler for http failures. This catches known failures and rebuilds the underlying HTTP connections. Args: retry_args: An ExceptionRetryArgs tuple.
juraj-google-style
def getObjective(self, name): return lock_and_call( lambda: Objective(self._impl.getObjective(name)), self._lock )
Get the objective with the corresponding name. Args: name: Name of the objective to be found. Raises: TypeError: if the specified objective does not exist.
juraj-google-style
def _convert_to_dict(data): if isinstance(data, dict): return data if (isinstance(data, list) or isinstance(data, tuple)): if _all_correct_list(data): return dict(data) else: data = zip(data[::2], data[1::2]) return dict(data) else: raise MetaParsingException("Can't decode provided metadata - unknown structure.")
Convert `data` to dictionary. Tries to get sense in multidimensional arrays. Args: data: List/dict/tuple of variable dimension. Returns: dict: If the data can be converted to dictionary. Raises: MetaParsingException: When the data are unconvertible to dict.
codesearchnet
def file_md5(filename): with zopen(filename, 'r') as f: file_string = f.read() try: file_string = file_string.decode() except AttributeError: pass return md5sum(file_string)
Generate the md5 checksum for a file Args: filename (Str): The file to be checksummed. Returns: (Str): The hex checksum Notes: If the file is gzipped, the md5 checksum returned is for the uncompressed ASCII file.
codesearchnet
def load_architecture(self, name, arch_info, disassembler, translator): self.name = name self.arch_info = arch_info self.disassembler = disassembler self.ir_translator = translator self._setup_analysis_modules()
Translate to REIL instructions. Args: name (str): Architecture's name. arch_info (ArchitectureInformation): Architecture information object. disassembler (Disassembler): Disassembler for the architecture. translator (Translator): Translator for the architecture.
juraj-google-style
def get_pattern_actual_step(self, patternnumber): _checkPatternNumber(patternnumber) address = _calculateRegisterAddress('actualstep', patternnumber) return self.read_register(address, 0)
Get the 'actual step' parameter for a given pattern. Args: patternnumber (integer): 0-7 Returns: The 'actual step' parameter (int).
juraj-google-style
def validate(self, corpus): overflow_segments = {} for utterance in corpus.utterances.values(): utt_segments = self.validate_utterance(utterance) if len(utt_segments) > 0: overflow_segments[utterance.idx] = utt_segments passed = len(overflow_segments) <= 0 info = { 'Label-List ID': self.label_list_idx, 'Threshold': str(self.threshold) } return LabelOverflowValidationResult(passed, overflow_segments, self.name(), info)
Perform the validation on the given corpus. Args: corpus (Corpus): The corpus to test/validate. Returns: InvalidUtterancesResult: Validation result.
juraj-google-style
def add(self, layer, rebuild=True): if not self._layers: if getattr(layer, '_input_shape_arg', None) is not None: self.add(InputLayer(shape=layer._input_shape_arg)) if hasattr(layer, '_keras_history'): origin_layer = layer._keras_history[0] if isinstance(origin_layer, InputLayer): layer = origin_layer if not isinstance(layer, Layer): raise ValueError(f'Only instances of `keras.Layer` can be added to a Sequential model. Received: {layer} (of type {type(layer)})') if not self._is_layer_name_unique(layer): raise ValueError(f"All layers added to a Sequential model should have unique names. Name '{layer.name}' is already the name of a layer in this model. Update the `name` argument to pass a unique name.") if isinstance(layer, InputLayer) and self._layers and isinstance(self._layers[0], InputLayer): raise ValueError(f"Sequential model '{self.name}' has already been configured to use input shape {self._layers[0].batch_shape}. You cannot add a different Input layer to it.") self._layers.append(layer) if rebuild: self._maybe_rebuild() else: self.built = False self._functional = None
Adds a layer instance on top of the layer stack. Args: layer: layer instance.
github-repos
def _test_streaming(self, with_attributes): state_verifier = PipelineStateMatcher(PipelineState.RUNNING) expected_messages = self.EXPECTED_OUTPUT_MESSAGES[self.runner_name] if not with_attributes: expected_messages = [pubsub_msg.data for pubsub_msg in expected_messages] if self.runner_name == 'TestDirectRunner': strip_attributes = None else: strip_attributes = [self.ID_LABEL, self.TIMESTAMP_ATTRIBUTE] pubsub_msg_verifier = PubSubMessageMatcher(self.project, self.output_sub.name, expected_messages, timeout=MESSAGE_MATCHER_TIMEOUT_S, with_attributes=with_attributes, strip_attributes=strip_attributes) extra_opts = {'input_subscription': self.input_sub.name, 'output_topic': self.output_topic.name, 'wait_until_finish_duration': TEST_PIPELINE_DURATION_MS, 'on_success_matcher': all_of(state_verifier, pubsub_msg_verifier)} for msg in self.INPUT_MESSAGES[self.runner_name]: self.pub_client.publish(self.input_topic.name, msg.data, **msg.attributes).result() pubsub_it_pipeline.run_pipeline(argv=self.test_pipeline.get_full_options_as_args(**extra_opts), with_attributes=with_attributes, id_label=self.ID_LABEL, timestamp_attribute=self.TIMESTAMP_ATTRIBUTE)
Runs IT pipeline with message verifier. Args: with_attributes: False - Reads and writes message data only. True - Reads and writes message data and attributes. Also verifies id_label and timestamp_attribute features.
github-repos
def process_tokens(self, tokens): for (tok_type, token, (start_row, start_col), _, _) in tokens: if (tok_type == tokenize.STRING): self._process_string_token(token, start_row, start_col)
Process the token stream. This is required to override the parent class' implementation. Args: tokens: the tokens from the token stream to process.
codesearchnet
def to_numbers(self, flatten: bool=True) -> Union[List[Union[int, float, str]], utils.Nestable[Union[int, float, str]]]: if flatten: decisions = [self.value] if self.value is not None else [] for c in self.children: decisions.extend(c.to_numbers(flatten)) return decisions elif self.value is None: return [c.to_numbers(flatten) for c in self.children] elif not self.children: return self.value elif len(self.children) == 1: child = self.children[0].to_numbers(flatten) if isinstance(child, tuple): return tuple([self.value, list(child)]) else: return (self.value, child) else: assert len(self.children) > 1 return (self.value, [c.to_numbers(flatten) for c in self.children])
Returns a (maybe) nested structure of numbers as decisions. Args: flatten: If True, the hierarchy of the numbers will not be preserved. Decisions will be returned as a flat list in DFS order. Otherwise, a nestable structure of numbers will be returned. Returns: A flat list or a hierarchical structure of numbers as the decisions made for each decision point.
github-repos
def parse_done(self, buf: memoryview) -> Tuple[bool, memoryview]: match = self._pattern.match(buf) if not match: raise NotParseable(buf) done = match.group(1).upper() == self.continuation buf = buf[match.end(0):] return done, buf
Parse the continuation line sent by the client to end the ``IDLE`` command. Args: buf: The continuation line to parse.
juraj-google-style
def export_model(model, model_type, export_dir, model_column_fn): wide_columns, deep_columns = model_column_fn() if model_type == 'wide': columns = wide_columns elif model_type == 'deep': columns = deep_columns else: columns = wide_columns + deep_columns feature_spec = tf.feature_column.make_parse_example_spec(columns) example_input_fn = ( tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)) model.export_savedmodel(export_dir, example_input_fn, strip_default_attrs=True)
Export to SavedModel format. Args: model: Estimator object model_type: string indicating model type. "wide", "deep" or "wide_deep" export_dir: directory to export the model. model_column_fn: Function to generate model feature columns.
juraj-google-style
def comments(self, case_id=None, variant_id=None, username=None): logger.debug("Looking for comments") comment_objs = self.query(Comment) if case_id: comment_objs = comment_objs.filter_by(case_id=case_id) if variant_id: comment_objs = comment_objs.filter_by(variant_id=variant_id) elif case_id: comment_objs = comment_objs.filter_by(variant_id=None) return comment_objs
Return comments for a case or variant. Args: case_id (str): id for a related case variant_id (Optional[str]): id for a related variant
juraj-google-style
def put(self, url, params=None, data=None, files=None, **kwargs): return self.call_api( "PUT", url, params=params, data=data, files=files, **kwargs )
Call the API with a PUT request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. data (dict or None): Request body contents. files (dict or None: Files to be passed to the request. Returns: An instance of ResultParser or ErrorParser.
juraj-google-style
def _load_config_file(path): with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f: conf = yaml.safe_load(f) return conf
Loads a test config file. The test config file has to be in YAML format. Args: path: A string that is the full path to the config file, including the file name. Returns: A dict that represents info in the config file.
github-repos
def read_local_files(*file_paths: str) -> str: def _read_single_file(file_path): with open(file_path) as f: filename = os.path.splitext(file_path)[0] title = f'{filename}\n{"=" * len(filename)}' return '\n\n'.join((title, f.read())) return '\n' + '\n\n'.join(map(_read_single_file, file_paths))
Reads one or more text files and returns them joined together. A title is automatically created based on the file name. Args: *file_paths: list of files to aggregate Returns: content of files
juraj-google-style
def distance_to_line(a, b, p): return distance(closest_point(a, b, p), p)
Closest distance between a line segment and a point Args: a ([float, float]): x and y coordinates. Line start b ([float, float]): x and y coordinates. Line end p ([float, float]): x and y coordinates. Point to compute the distance Returns: float
juraj-google-style
def apply_inverse(self, y, in_place=False): return cho_solve(self._factor, y, overwrite_b=in_place)
r""" Apply the inverse of the covariance matrix to the input by solving .. math:: K\,x = y Args: y (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or matrix :math:`y`. in_place (Optional[bool]): Should the data in ``y`` be overwritten with the result :math:`x`? (default: ``False``)
codesearchnet
def ParseLastVisitedRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs): query_hash = hash(query) hidden = self._GetRowValue(query_hash, row, 'hidden') transition = self._GetRowValue(query_hash, row, 'transition') visit_identifier = self._GetRowValue(query_hash, row, 'visit_id') from_visit = self._GetRowValue(query_hash, row, 'from_visit') event_data = ChromeHistoryPageVisitedEventData() event_data.from_visit = self._GetUrl(from_visit, cache, database) event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.page_transition_type = (transition & self._PAGE_TRANSITION_CORE_MASK) event_data.title = self._GetRowValue(query_hash, row, 'title') event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.url_hidden = (hidden == '1') event_data.visit_source = self._GetVisitSource(visit_identifier, cache, database) timestamp = self._GetRowValue(query_hash, row, 'visit_time') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a last visited row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. cache (SQLiteCache): cache which contains cached results from querying the visits and urls tables. database (Optional[SQLiteDatabase]): database.
codesearchnet
def button_state(self): if (self.type != EventType.POINTER_BUTTON): raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_pointer_get_button_state(self._handle)
The button state that triggered this event. For pointer events that are not of type :attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property raises :exc:`AttributeError`. Returns: ~libinput.constant.ButtonState: The button state triggering this event. Raises: AttributeError
codesearchnet
def __register_services(api_name_version_map, api_config_registry): generator = api_config.ApiConfigGenerator() protorpc_services = [] for service_factories in api_name_version_map.itervalues(): service_classes = [service_factory.service_class for service_factory in service_factories] config_dict = generator.get_config_dict(service_classes) api_config_registry.register_backend(config_dict) for service_factory in service_factories: protorpc_class_name = service_factory.service_class.__name__ root = ('%s%s' % (service_factory.service_class.api_info.base_path, protorpc_class_name)) if any((((service_map[0] == root) or (service_map[1] == service_factory)) for service_map in protorpc_services)): raise api_config.ApiConfigurationError(("Can't reuse the same class in multiple APIs: %s" % protorpc_class_name)) protorpc_services.append((root, service_factory)) return protorpc_services
Register & return a list of each URL and class that handles that URL. This finds every service class in api_name_version_map, registers it with the given ApiConfigRegistry, builds the URL for that class, and adds the URL and its factory to a list that's returned. Args: api_name_version_map: A mapping from (api name, api version) to a list of service factories, as returned by __create_name_version_map. api_config_registry: The ApiConfigRegistry where service classes will be registered. Returns: A list of (URL, service_factory) for each service class in api_name_version_map. Raises: ApiConfigurationError: If a Service class appears more than once in api_name_version_map. This could happen if one class is used to implement multiple APIs.
codesearchnet
def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False): if halo_size == 0: return x block_size = block_size_dim.size partial_size = halo_size % block_size num_complete_blocks = halo_size parts = [x] for i in xrange(1, num_complete_blocks + 1): parts = ([shift(x, i, blocks_dim, wrap)] + parts + [shift(x, -i, blocks_dim, wrap)]) if partial_size > 0: left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name) right_margin = mtf_slice( x, block_size_dim.size - partial_size, partial_size, block_size_dim.name) parts = ( [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)] + parts + [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)]) return concat(parts, block_size_dim.name)
Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size.
juraj-google-style
def plot_correlation(self, freq=None, title=None, figsize=(12, 6), **kwargs): if (title is None): title = self._get_default_plot_title(freq, 'Return Correlation Matrix') rets = self._get_series(freq).to_returns().dropna() return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)
Utility function to plot correlations. Args: * freq (str): Pandas data frequency alias string * title (str): Plot title * figsize (tuple (x,y)): figure size * kwargs: passed to Pandas' plot_corr_heatmap function
codesearchnet
def tag(self, name, formatter=None): tag = Tag(name, formatter) for tag_data in self._tags: if (tag_data.name == name): tag = tag_data break else: self._tags.append(tag) return tag
Return instance of Tag. Args: name (str): The value for this tag. formatter (method, optional): A method that take a tag value and returns a formatted tag. Returns: obj: An instance of Tag.
codesearchnet
def colored(cls, color, message): return ((getattr(cls, color.upper()) + message) + cls.DEFAULT)
Small function to wrap a string around a color Args: color (str): name of the color to wrap the string with, must be one of the class properties message (str): String to wrap with the color Returns: str: the colored string
codesearchnet
def get_vmss(access_token, subscription_id, resource_group, vmss_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) return do_get(endpoint, access_token)
Get virtual machine scale set details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of scale set properties.
juraj-google-style
def _send_notification(self, handle, payload): self.bable.notify( connection_handle=self._connection_handle, attribute_handle=handle, value=payload )
Send a notification over BLE It is executed in the baBLE working thread: should not be blocking. Args: handle (int): The handle to notify on payload (bytearray): The value to notify
juraj-google-style
def get_timestamped_export_dir(export_dir_base): attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: timestamp = int(time.time()) result_dir = os.path.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp))) if not gfile.Exists(result_dir): return result_dir time.sleep(1) attempts += 1 logging.warning('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)) raise RuntimeError('Failed to obtain a unique export directory name after {} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.
github-repos
def _unpack(formatstring, packed): _checkString(formatstring, description='formatstring', minlength=1) _checkString(packed, description='packed string', minlength=1) if (sys.version_info[0] > 2): packed = bytes(packed, encoding='latin1') try: value = struct.unpack(formatstring, packed)[0] except: errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.' errortext += ' Bytestring: {0!r} Struct format code is: {1}' raise ValueError(errortext.format(packed, formatstring)) return value
Unpack a bytestring into a value. Uses the built-in :mod:`struct` Python module. Args: * formatstring (str): String for the packing. See the :mod:`struct` module for details. * packed (str): The bytestring to be unpacked. Returns: A value. The type depends on the formatstring. Raises: ValueError Note that the :mod:`struct` module wants byte buffers for Python3, but bytestrings for Python2. This is compensated for automatically.
codesearchnet
def resolve_object_property(obj, path: str): value = obj for path_part in path.split('.'): value = getattr(value, path_part) return value
Resolves the value of a property on an object. Is able to resolve nested properties. For example, a path can be specified: 'other.beer.name' Raises: AttributeError: In case the property could not be resolved. Returns: The value of the specified property.
codesearchnet
def _WriteAttributeContainer(self, attribute_container): if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT: timestamp, serialized_data = self._serialized_event_heap.PopEvent() else: serialized_data = self._SerializeAttributeContainer(attribute_container) if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: compressed_data = zlib.compress(serialized_data) serialized_data = sqlite3.Binary(compressed_data) else: compressed_data = '' if self._storage_profiler: self._storage_profiler.Sample( 'write', attribute_container.CONTAINER_TYPE, len(serialized_data), len(compressed_data)) if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT: query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)' self._cursor.execute(query, (timestamp, serialized_data)) else: query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format( attribute_container.CONTAINER_TYPE) self._cursor.execute(query, (serialized_data, )) identifier = identifiers.SQLTableIdentifier( attribute_container.CONTAINER_TYPE, self._cursor.lastrowid) attribute_container.SetIdentifier(identifier)
Writes an attribute container. The table for the container type must exist. Args: attribute_container (AttributeContainer): attribute container.
juraj-google-style
def load(f, _dict=dict, decoder=None): if _ispath(f): with io.open(_getpath(f), encoding='utf-8') as ffile: return loads(ffile.read(), _dict, decoder) elif isinstance(f, list): from os import path as op from warnings import warn if (not [path for path in f if op.exists(path)]): error_msg = 'Load expects a list to contain filenames only.' error_msg += linesep error_msg += 'The list needs to contain the path of at least one existing file.' raise FNFError(error_msg) if (decoder is None): decoder = TomlDecoder() d = decoder.get_empty_table() for l in f: if op.exists(l): d.update(load(l, _dict, decoder)) else: warn('Non-existent filename in list with at least one valid filename') return d else: try: return loads(f.read(), _dict, decoder) except AttributeError: raise TypeError('You can only load a file descriptor, filename or list')
Parses named file or files as toml and returns a dictionary Args: f: Path to the file to open, array of files to read into single dict or a file descriptor _dict: (optional) Specifies the class of the returned toml dictionary Returns: Parsed toml file represented as a dictionary Raises: TypeError -- When f is invalid type TomlDecodeError: Error while decoding toml IOError / FileNotFoundError -- When an array with no valid (existing) (Python 2 / Python 3) file paths is passed
codesearchnet
def _read_output(self, stream, callback, output_file): if (callback is None and output_file is None) or stream.closed: return False line = stream.readline() if line: if callback is not None: callback(line.decode(), self._data, self._store, self._signal, self._context) if output_file is not None: output_file.write(line) return True else: return False
Read the output of the process, executed the callback and save the output. Args: stream: A file object pointing to the output stream that should be read. callback(callable, None): A callback function that is called for each new line of output. output_file: A file object to which the full output is written. Returns: bool: True if a line was read from the output, otherwise False.
juraj-google-style
def hurst_compare_nvals(data, nvals=None): import matplotlib.pyplot as plt data = np.asarray(data) n_all = np.arange(2,len(data)+1) dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit="poly") dd_def = nolds.hurst_rs(data, debug_data=True, fit="poly") n_def = np.round(np.exp(dd_def[1][0])).astype("int32") n_div = n_all[np.where(len(data) % n_all[:-1] == 0)] dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit="poly") def corr(nvals): return [np.log(nolds.expected_rs(n)) for n in nvals] l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), "o") l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), "o") l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), "o") l_cst = [] t_cst = [] if nvals is not None: dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit="poly") l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), "o") l_cst = l_cst t_cst = ["custom"] plt.xlabel("log(n)") plt.ylabel("log((R/S)_n - E[(R/S)_n])") plt.legend(l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst) labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], ["all", "def", "div"]) for data, label in labeled_data: print("%s: %.3f" % (label, data)) if nvals is not None: print("custom: %.3f" % dd_cst[0]) plt.show()
Creates a plot that compares the results of different choices for nvals for the function hurst_rs. Args: data (array-like of float): the input data from which the hurst exponent should be estimated Kwargs: nvals (array of int): a manually selected value for the nvals parameter that should be plotted in comparison to the default choices
juraj-google-style
def valUserCert(self, byts, cacerts=None): cert = crypto.load_certificate(crypto.FILETYPE_PEM, byts) if (cacerts is None): cacerts = self.getCaCerts() store = crypto.X509Store() [store.add_cert(cacert) for cacert in cacerts] ctx = crypto.X509StoreContext(store, cert) ctx.verify_certificate() return cert
Validate the PEM encoded x509 user certificate bytes and return it. Args: byts (bytes): The bytes for the User Certificate. cacerts (tuple): A tuple of OpenSSL.crypto.X509 CA Certificates. Raises: OpenSSL.crypto.X509StoreContextError: If the certificate is not valid. Returns: OpenSSL.crypto.X509: The certificate, if it is valid.
codesearchnet
def add_arguments(self, parser): group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-l', '--list', nargs='?', type=str.lower, default='_', choices=['usb', 'ip'], help='list all the connected emulators') group.add_argument('-s', '--supported', nargs=1, help='query whether a device is supported') group.add_argument('-t', '--test', action='store_true', help='perform a self-test') return None
Adds the arguments for the emulator command. Args: self (EmulatorCommand): the ``EmulatorCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None``
juraj-google-style
def download_apcor(self, uri): local_file = os.path.basename(uri) if os.access(local_file, os.F_OK): fobj = open(local_file) else: fobj = storage.vofile(uri, view='data') fobj.seek(0) str = fobj.read() fobj.close() apcor_str = str return ApcorData.from_string(apcor_str)
Downloads apcor data. Args: uri: The URI of the apcor data file. Returns: apcor: ossos.downloads.core.ApcorData
codesearchnet
def read_local_files(*file_paths: str) -> str: def _read_single_file(file_path): with open(file_path) as f: filename = os.path.splitext(file_path)[0] title = f return '\n\n'.join((title, f.read())) return ('\n' + '\n\n'.join(map(_read_single_file, file_paths)))
Reads one or more text files and returns them joined together. A title is automatically created based on the file name. Args: *file_paths: list of files to aggregate Returns: content of files
codesearchnet
def _zip_request_params(self, urls, query_params, data): if (not isinstance(urls, list)): urls = [urls] if (not isinstance(query_params, list)): query_params = [query_params] if (not isinstance(data, list)): data = [data] url_count = len(urls) query_param_count = len(query_params) data_count = len(data) max_count = max(url_count, query_param_count, data_count) if ((max_count > url_count > 1) or (max_count > query_param_count > 1) or (max_count > data_count > 1)): raise InvalidRequestError('Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}', url_count, query_param_count, data_count, max_count) if (url_count < max_count): urls = (urls * max_count) if (query_param_count < max_count): query_params = (query_params * max_count) if (data_count < max_count): data = (data * max_count) return list(zip(urls, query_params, data))
Massages inputs and returns a list of 3-tuples zipping them up. This is all the smarts behind deciding how many requests to issue. It's fine for an input to have 0, 1, or a list of values. If there are two inputs each with a list of values, the cardinality of those lists much match. Args: urls - 1 string URL or a list of URLs query_params - None, 1 dict, or a list of dicts data - None, 1 dict or string, or a list of dicts or strings Returns: A list of 3-tuples (url, query_param, data) Raises: InvalidRequestError - if cardinality of lists does not match
codesearchnet
def register(self, user_dict): endpoint = os.path.join(self._config.get('napps', 'api'), 'users', '') res = self.make_request(endpoint, method='POST', json=user_dict) return res.content.decode('utf-8')
Send an user_dict to NApps server using POST request. Args: user_dict(dict): Dictionary with user attributes. Returns: result(string): Return the response of Napps server.
codesearchnet
def _set_details(self, content): try: self.details = str(content) except UnicodeEncodeError: if (sys.version_info < (3, 0)): self.details = unicode(content) else: logging.error('Unable to decode "%s" in Py3, encoding in utf-8.', content) self.details = content.encode('utf-8')
Sets the `details` field. Args: content: the content to extract details from.
codesearchnet
def _get_req_fp(self, op): if(op): op = op.lower() if op == 'get': return requests.get, None if op == 'put': return requests.put, {'Content-Type': 'application/x-www-form-urlencoded'} if op == 'post': return requests.post, {'Content-Type': 'application/json'} if op == 'delete': return requests.delete, None else: raise NotImplementedError('Operation {} is not supported!'.format(op))
Decisions on what verb to use and content headers happen here Args: op a string specifying a http verb
juraj-google-style
def _handle_message_for_stream(self, stream_transport, message, timeout): if (message.command not in ('OKAY', 'CLSE', 'WRTE')): raise usb_exceptions.AdbProtocolError('%s received unexpected message: %s', self, message) if (message.arg1 == stream_transport.local_id): if (message.command == 'WRTE'): if (not stream_transport.remote_id): raise usb_exceptions.AdbProtocolError('%s received WRTE before OKAY/CLSE: %s', stream_transport, message) self.transport.write_message(adb_message.AdbMessage('OKAY', stream_transport.local_id, stream_transport.remote_id), timeout) elif (message.command == 'CLSE'): self.close_stream_transport(stream_transport, timeout) return message else: with self._stream_transport_map_lock: dest_transport = self._stream_transport_map.get(message.arg1) if dest_transport: if (message.command == 'CLSE'): self.close_stream_transport(dest_transport, timeout) dest_transport.enqueue_message(message, timeout) else: _LOG.warning('Received message for unknown local-id: %s', message)
Handle an incoming message, check if it's for the given stream. If the message is not for the stream, then add it to the appropriate message queue. Args: stream_transport: AdbStreamTransport currently waiting on a message. message: Message to check and handle. timeout: Timeout to use for the operation, should be an instance of timeouts.PolledTimeout. Returns: The message read if it was for this stream, None otherwise. Raises: AdbProtocolError: If we receive an unexpected message type.
codesearchnet
def ParseDict(js_dict, message, ignore_unknown_fields=False): parser = _Parser(ignore_unknown_fields) parser.ConvertMessage(js_dict, message) return message
Parses a JSON dictionary representation into a message. Args: js_dict: Dict representation of a JSON message. message: A protocol buffer message to merge into. ignore_unknown_fields: If True, do not raise errors for unknown fields. Returns: The same message passed as argument.
juraj-google-style
def GetZipInfo(self): if (not self._zip_info): location = getattr(self.path_spec, 'location', None) if (location is None): raise errors.PathSpecError('Path specification missing location.') if (not location.startswith(self._file_system.LOCATION_ROOT)): raise errors.PathSpecError('Invalid location in path specification.') if (len(location) == 1): return None zip_file = self._file_system.GetZipFile() try: self._zip_info = zip_file.getinfo(location[1:]) except KeyError: pass return self._zip_info
Retrieves the ZIP info object. Returns: zipfile.ZipInfo: a ZIP info object or None if not available. Raises: PathSpecError: if the path specification is incorrect.
codesearchnet
def stop(self, name: str) -> None: if (not self._timing): return now = get_now_utc_pendulum() if (not self._stack): raise AssertionError('MultiTimer.stop() when nothing running') if (self._stack[(- 1)] != name): raise AssertionError('MultiTimer.stop({}) when {} is running'.format(repr(name), repr(self._stack[(- 1)]))) self._totaldurations[name] += (now - self._starttimes[name]) self._stack.pop() if self._stack: last = self._stack[(- 1)] self._starttimes[last] = now
Stop a named timer. Args: name: timer to stop
codesearchnet
def word_list(sowpods=False, start="", end=""): location = os.path.join( os.path.dirname(os.path.realpath(__file__)), "wordlists", ) if sowpods: filename = "sowpods.txt" else: filename = "twl.txt" filepath = os.path.join(location, filename) with open(filepath) as wordfile: for word in wordfile.readlines(): word = word.strip() if start and end and word.startswith(start) and word.endswith(end): yield word elif start and word.startswith(start) and not end: yield word elif end and word.endswith(end) and not start: yield word elif not start and not end: yield word
Opens the word list file. Args: sowpods: a boolean to declare using the sowpods list or TWL (default) start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yeilds: a word at a time out of 178691 words for TWL, 267751 for sowpods. Much less if either start or end are used (filtering is applied here)
juraj-google-style
def __init__(self, n, key=None, reverse=False): self._n = n self._key = key self._reverse = reverse
Creates a per-key Top operation. The arguments 'key' and 'reverse' may be passed as keyword arguments, and have the same meaning as for Python's sort functions. Args: n: number of elements to extract from pcoll. key: (optional) a mapping of elements to a comparable key, similar to the key argument of Python's sorting methods. reverse: (optional) whether to order things smallest to largest, rather than largest to smallest
github-repos
def set_data(self, data): for name in self._fields: setattr(self, name, data.get(name)) return self
Fills form with data Args: data (dict): Data to assign form fields. Returns: Self. Form object.
juraj-google-style
def Compile(self, filter_implementation): self.attribute = self.swap_source.get(self.attribute, self.attribute) arguments = [self.attribute] op_str = self.operator.lower() operator = filter_implementation.OPS.get(op_str, None) if (not operator): raise errors.ParseError('Unknown operator {0:s} provided.'.format(self.operator)) if (self.attribute == 'timestamp'): args = [] for argument in self.args: args.append(DateCompareObject(argument)) self.args = args for argument in self.args: if isinstance(argument, DateCompareObject): if ('Less' in str(operator)): TimeRangeCache.SetUpperTimestamp(argument.data) else: TimeRangeCache.SetLowerTimestamp(argument.data) arguments.extend(self.args) expander = filter_implementation.FILTERS['ValueExpander'] ops = operator(arguments=arguments, value_expander=expander) if (not self.bool_value): if hasattr(ops, 'FlipBool'): ops.FlipBool() return ops
Compiles the filter implementation. Args: filter_implementation: a filter object (instance of objectfilter.TODO). Returns: A filter operator (instance of TODO). Raises: ParserError: if an unknown operator is provided.
codesearchnet
def _unsorted_segment_N(data, segment_ids, num_segments): num_segments = ops.convert_to_tensor(num_segments) segment_ids_shape = array_ops.shape_internal(segment_ids) ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype) n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments) broadcastable_shape = array_ops.concat([num_segments[array_ops.newaxis], array_ops.ones([array_ops.rank(data) - array_ops.rank(segment_ids)], dtype=num_segments.dtype)], axis=0) n = array_ops.reshape(n, broadcastable_shape) return gen_math_ops.maximum(n, 1)
Helper function for unsorted_segment_mean/_sqrtN. Computes the number of segment entries with 0-entries set to 1 to allow division by N. Args: data: A `Tensor` with data that will be assembled in the output. segment_ids: An integer tensor whose shape is a prefix of `data.shape`. The values must be in the range `[0, num_segments)`. The values are always validated to be in range on CPU, never validated on TPU/GPU. num_segments: An integer scalar `Tensor`. The number of distinct segment IDs. Returns: A `Tensor` with the number of segment entries with 0-entries set to 1.
github-repos
def set_epsilon(value): global _EPSILON _EPSILON = value
Set the value of the fuzz factor used in numeric expressions. Args: value: float. New value of epsilon. Examples: >>> keras.config.epsilon() 1e-07 >>> keras.config.set_epsilon(1e-5) >>> keras.config.epsilon() 1e-05 >>> # Set it back to the default value. >>> keras.config.set_epsilon(1e-7)
github-repos
async def addNode(self, name, valu, props=None): try: fnib = self._getNodeFnib(name, valu) retn = await self._addNodeFnib(fnib, props=props) return retn except asyncio.CancelledError: raise except Exception: mesg = f'Error adding node: {name} {valu!r} {props!r}' logger.exception(mesg) if self.strict: raise return None
Add a node by form name and value with optional props. Args: name (str): The form of node to add. valu (obj): The value for the node. props (dict): Optional secondary properties for the node.
juraj-google-style
def are_equal_xml(a_xml, b_xml): a_dom = xml.dom.minidom.parseString(a_xml) b_dom = xml.dom.minidom.parseString(b_xml) return are_equal_elements(a_dom.documentElement, b_dom.documentElement)
Normalize and compare XML documents for equality. The document may or may not be a DataONE type. Args: a_xml: str b_xml: str XML documents to compare for equality. Returns: bool: ``True`` if the XML documents are semantically equivalent.
juraj-google-style
def coresight_configure(self, ir_pre=0, dr_pre=0, ir_post=0, dr_post=0, ir_len=0, perform_tif_init=True): if (self.tif == enums.JLinkInterfaces.SWD): res = self._dll.JLINKARM_CORESIGHT_Configure('') if (res < 0): raise errors.JLinkException(res) return None config_string = 'IRPre=%s;DRPre=%s;IRPost=%s;DRPost=%s;IRLenDevice=%s;' config_string = (config_string % (ir_pre, dr_pre, ir_post, dr_post, ir_len)) if (not perform_tif_init): config_string = (config_string + 'PerformTIFInit=0;') res = self._dll.JLINKARM_CORESIGHT_Configure(config_string.encode()) if (res < 0): raise errors.JLinkException(res) return None
Prepares target and J-Link for CoreSight function usage. Args: self (JLink): the ``JLink`` instance ir_pre (int): sum of instruction register length of all JTAG devices in the JTAG chain, close to TDO than the actual one, that J-Link shall communicate with dr_pre (int): number of JTAG devices in the JTAG chain, closer to TDO than the actual one, that J-Link shall communicate with ir_post (int): sum of instruction register length of all JTAG devices in the JTAG chain, following the actual one, that J-Link shall communicate with dr_post (int): Number of JTAG devices in the JTAG chain, following the actual one, J-Link shall communicate with ir_len (int): instruction register length of the actual device that J-Link shall communicate with perform_tif_init (bool): if ``False``, then do not output switching sequence on completion Returns: ``None`` Note: This must be called before calling ``coresight_read()`` or ``coresight_write()``.
codesearchnet
def FormatTypeSummaryTable(self, level_name, name_to_problist): output = [] output.append('<table>') for classname in sorted(name_to_problist.keys()): problist = name_to_problist[classname] human_name = MaybePluralizeWord(problist.count, UnCamelCase(classname)) output.append(('<tr><td>%d</td><td><a href=" output.append('</table>\n') return ''.join(output)
Return an HTML table listing the number of problems by class name. Args: level_name: string such as "Error" or "Warning" name_to_problist: dict mapping class name to an BoundedProblemList object Returns: HTML in a string
codesearchnet
def __init__(self, channel): self.Health = channel.unary_unary( '/health.Health/Health', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def escape(inp, quote='"'): output = "" for c in inp: if c == quote: output += '\\' output += c return output
Escape `quote` in string `inp`. Example usage:: >>> escape('hello "') 'hello \\"' >>> escape('hello \\"') 'hello \\\\"' Args: inp (str): String in which `quote` will be escaped. quote (char, default "): Specify which character will be escaped. Returns: str: Escaped string.
juraj-google-style
def _create_controller_info_record(self, controller_module_name): module = self._controller_modules[controller_module_name] controller_info = None try: controller_info = module.get_info(copy.copy(self._controller_objects[controller_module_name])) except AttributeError: logging.warning('No optional debug info found for controller %s. To provide it, implement `get_info`.', controller_module_name) try: yaml.dump(controller_info) except TypeError: logging.warning('The info of controller %s in class "%s" is not YAML serializable! Coercing it to string.', controller_module_name, self._class_name) controller_info = str(controller_info) return records.ControllerInfoRecord(self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME, controller_info)
Creates controller info record for a particular controller type. Info is retrieved from all the controller objects spawned from the specified module, using the controller module's `get_info` function. Args: controller_module_name: string, the name of the controller module to retrieve info from. Returns: A records.ControllerInfoRecord object.
github-repos
def save_b26_file(filename, instruments=None, scripts=None, probes=None, overwrite=False, verbose=False): if os.path.isfile(filename) and overwrite == False: data_dict = load_b26_file(filename) else: data_dict = {} if instruments is not None: if 'instruments' in data_dict: data_dict['instruments'].update(instruments) else: data_dict['instruments'] = instruments if scripts is not None: if 'scripts' in data_dict: data_dict['scripts'].update(scripts) else: data_dict['scripts'] = scripts if probes is not None: probe_instruments = list(probes.keys()) if 'probes' in data_dict: probe_instruments= set(probe_instruments + list(data_dict['probes'].keys())) else: data_dict.update({'probes':{}}) for instrument in probe_instruments: if instrument in data_dict['probes'] and instrument in probes: data_dict['probes'][instrument] = ','.join(set(data_dict['probes'][instrument].split(',') + probes[instrument].split(','))) else: data_dict['probes'].update(probes) if verbose: print(('writing ', filename)) if data_dict != {}: if verbose: print(('filename', filename)) print(('exists', os.path.exists(os.path.dirname(filename)))) if os.path.exists(os.path.dirname(filename)) is False: os.makedirs(os.path.dirname(filename)) with open(filename, 'w') as outfile: tmp = json.dump(data_dict, outfile, indent=4)
save instruments, scripts and probes as a json file Args: filename: instruments: scripts: probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...} Returns:
juraj-google-style
def validate(self, corpus): passed = True results = {} for validator in self.validators: sub_result = validator.validate(corpus) results[validator.name()] = sub_result if not sub_result.passed: passed = False return CombinedValidationResult(passed, results)
Perform validation on the given corpus. Args: corpus (Corpus): The corpus to test/validate.
juraj-google-style
def GetSortedEvents(self, time_range=None): if not self._storage_file: raise IOError('Unable to read from closed storage writer.') return self._storage_file.GetSortedEvents(time_range=time_range)
Retrieves the events in increasing chronological order. This includes all events written to the storage including those pending being flushed (written) to the storage. Args: time_range (Optional[TimeRange]): time range used to filter events that fall in a specific period. Returns: generator(EventObject): event generator. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
juraj-google-style
def validate(request: Union[(Dict, List)], schema: dict) -> Union[(Dict, List)]: jsonschema_validate(request, schema) return request
Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError
codesearchnet
def __init__(self, min_obs=10): self.min_obs = min_obs self.label_encoder = LabelEncoder(min_obs)
Initialize the OneHotEncoder class object. Args: min_obs (int): minimum number of observation to create a dummy variable label_encoder (LabelEncoder): LabelEncoder that transofrm
juraj-google-style
def removeRow(self, triggered): if triggered: model = self.tableView.model() selection = self.tableView.selectedIndexes() rows = [index.row() for index in selection] model.removeDataFrameRows(set(rows)) self.sender().setChecked(False)
Removes a row to the model. This method is also a slot. Args: triggered (bool): If the corresponding button was activated, the selected row will be removed from the model.
juraj-google-style
def overlay(self, feature, color='Blue', opacity=0.6): result = self.copy() if (type(feature) == Table): if ('feature' in feature): feature = feature['feature'] else: feature = Circle.map_table(feature) if (type(feature) in [list, np.ndarray]): for f in feature: f._attrs['fill_color'] = color f._attrs['fill_opacity'] = opacity f.draw_on(result._folium_map) elif (type(feature) == Map): for i in range(len(feature._features)): f = feature._features[i] f._attrs['fill_color'] = color f._attrs['fill_opacity'] = opacity f.draw_on(result._folium_map) elif (type(feature) == Region): feature._attrs['fill_color'] = color feature._attrs['fill_opacity'] = opacity feature.draw_on(result._folium_map) return result
Overlays ``feature`` on the map. Returns a new Map. Args: ``feature``: a ``Table`` of map features, a list of map features, a Map, a Region, or a circle marker map table. The features will be overlayed on the Map with specified ``color``. ``color`` (``str``): Color of feature. Defaults to 'Blue' ``opacity`` (``float``): Opacity of overlain feature. Defaults to 0.6. Returns: A new ``Map`` with the overlain ``feature``.
codesearchnet
def get_vulnerability(source, sink, triggers, lattice, cfg, interactive, blackbox_mapping): nodes_in_constraint = [secondary for secondary in reversed(source.secondary_nodes) if lattice.in_constraint(secondary, sink.cfg_node)] nodes_in_constraint.append(source.cfg_node) if sink.trigger.all_arguments_propagate_taint: sink_args = get_sink_args(sink.cfg_node) else: sink_args = get_sink_args_which_propagate(sink, sink.cfg_node.ast_node) tainted_node_in_sink_arg = get_tainted_node_in_sink_args(sink_args, nodes_in_constraint) if tainted_node_in_sink_arg: vuln_deets = {'source': source.cfg_node, 'source_trigger_word': source.trigger_word, 'sink': sink.cfg_node, 'sink_trigger_word': sink.trigger_word} sanitiser_nodes = set() potential_sanitiser = None if sink.sanitisers: for sanitiser in sink.sanitisers: for cfg_node in triggers.sanitiser_dict[sanitiser]: if isinstance(cfg_node, AssignmentNode): sanitiser_nodes.add(cfg_node) elif isinstance(cfg_node, IfNode): potential_sanitiser = cfg_node def_use = build_def_use_chain(cfg.nodes, lattice) for chain in get_vulnerability_chains(source.cfg_node, sink.cfg_node, def_use): (vulnerability_type, interactive) = how_vulnerable(chain, blackbox_mapping, sanitiser_nodes, potential_sanitiser, cfg.blackbox_assignments, interactive, vuln_deets) if (vulnerability_type == VulnerabilityType.FALSE): continue vuln_deets['reassignment_nodes'] = chain return (vuln_factory(vulnerability_type)(**vuln_deets), interactive) return (None, interactive)
Get vulnerability between source and sink if it exists. Uses triggers to find sanitisers. Note: When a secondary node is in_constraint with the sink but not the source, the secondary is a save_N_LHS node made in process_function in expr_visitor. Args: source(TriggerNode): TriggerNode of the source. sink(TriggerNode): TriggerNode of the sink. triggers(Triggers): Triggers of the CFG. lattice(Lattice): the lattice we're analysing. cfg(CFG): .blackbox_assignments used in is_unknown, .nodes used in build_def_use_chain interactive(bool): determines if we ask the user about blackbox functions not in the mapping file. blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint. Returns: A Vulnerability if it exists, else None
codesearchnet