code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def add(self, layer): if hasattr(layer, '_keras_history'): origin_layer = layer._keras_history[0] if isinstance(origin_layer, input_layer.InputLayer): layer = origin_layer logging.warning('Please add `keras.layers.InputLayer` instead of `keras.Input` to Sequential model. `keras.Input` is intended to be used by Functional model.') if isinstance(layer, module.Module): if not isinstance(layer, base_layer.Layer): layer = functional.ModuleWrapper(layer) else: raise TypeError('The added layer must be an instance of class Layer. Found: ' + str(layer)) tf_utils.assert_no_legacy_layers([layer]) if not self._is_layer_name_unique(layer): raise ValueError('All layers added to a Sequential model should have unique names. Name "%s" is already the name of a layer in this model. Update the `name` argument to pass a unique name.' % (layer.name,)) self.built = False set_inputs = False self._maybe_create_attribute('_self_tracked_trackables', []) if not self._self_tracked_trackables: if isinstance(layer, input_layer.InputLayer): set_inputs = True else: batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer) if batch_shape: x = input_layer.Input(batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input') layer(x) set_inputs = True if set_inputs: outputs = nest.flatten(layer._inbound_nodes[-1].outputs) if len(outputs) != 1: raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG) self.outputs = outputs self.inputs = layer_utils.get_source_inputs(self.outputs[0]) self.built = True self._has_explicit_input_shape = True elif self.outputs: output_tensor = layer(self.outputs[0]) if len(nest.flatten(output_tensor)) != 1: raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG) self.outputs = [output_tensor] self.built = True if set_inputs or self._graph_initialized: self._init_graph_network(self.inputs, self.outputs) self._graph_initialized = True else: self._self_tracked_trackables.append(layer) self._handle_deferred_layer_dependencies([layer]) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
Adds a layer instance on top of the layer stack. Args: layer: layer instance. Raises: TypeError: If `layer` is not a layer instance. ValueError: In case the `layer` argument does not know its input shape. ValueError: In case the `layer` argument has multiple output tensors, or is already connected somewhere else (forbidden in `Sequential` models).
github-repos
def _InfoBackup(component): info = {} info['type_name'] = type(component).__name__ info['string_form'] = str(component) filename, lineno = GetFileAndLine(component) info['file'] = filename info['line'] = lineno info['docstring'] = inspect.getdoc(component) try: info['length'] = str(len(component)) except (TypeError, AttributeError): pass return info
Returns a dict with information about the given component. This function is to be called only in the case that IPython's oinspect module is not available. The info dict it produces may contain less information that contained in the info dict produced by oinspect. Args: component: The component to analyze. Returns: A dict with information about the component.
github-repos
def selfSignCert(self, cert, pkey): cert.set_issuer(cert.get_subject()) cert.sign(pkey, self.signing_digest)
Self-sign a certificate. Args: cert (OpenSSL.crypto.X509): The certificate to sign. pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate. Examples: Sign a given certificate with a given private key: cdir.selfSignCert(mycert, myotherprivatekey) Returns: None
juraj-google-style
def data(self, rows=None): rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers) length = tf.gather(self._length, rows) return episode, length
Access a batch of episodes from the memory. Padding elements after the length of each episode are unspecified and might contain old data. Args: rows: Episodes to select, defaults to all. Returns: Tuple containing a tuple of transition quantities with batch and time dimensions, and a batch of sequence lengths.
juraj-google-style
def bind_parameters(self, value_dict): new_circuit = self.copy() if (value_dict.keys() > self.parameters): raise QiskitError('Cannot bind parameters ({}) not present in the circuit.'.format([str(p) for p in (value_dict.keys() - self.parameters)])) for (parameter, value) in value_dict.items(): new_circuit._bind_parameter(parameter, value) for parameter in value_dict: del new_circuit._parameter_table[parameter] return new_circuit
Assign parameters to values yielding a new circuit. Args: value_dict (dict): {parameter: value, ...} Raises: QiskitError: If value_dict contains parameters not present in the circuit Returns: QuantumCircuit: copy of self with assignment substitution.
codesearchnet
def acos(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.acos, tf.float32)
Returns a TensorFluent for the arccos function. Args: x: The input fluent. Returns: A TensorFluent wrapping the arccos function.
juraj-google-style
def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D: for (name, observable) in observables.items(): if ((observable.track_pt_bin == track_pt_bin) and (observable.jet_pt_bin == jet_pt_bin)): return histogram.Histogram1D.from_existing_hist(observable.hist) raise ValueError('Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}')
Get a Histogram1D associated with the selected jet and track pt bins. This is often used to retrieve data for fitting. Args: observables (dict): The observables from which the hist should be retrieved. track_pt_bin (int): Track pt bin of the desired hist. jet_ptbin (int): Jet pt bin of the desired hist. Returns: Histogram1D: Converted TH1 or uproot histogram. Raises: ValueError: If the requested observable couldn't be found.
codesearchnet
def GetNumberOfEventSources(self): number_of_event_sources = self._CountStoredAttributeContainers(self._CONTAINER_TYPE_EVENT_SOURCE) number_of_event_sources += self._GetNumberOfSerializedAttributeContainers(self._CONTAINER_TYPE_EVENT_SOURCE) return number_of_event_sources
Retrieves the number event sources. Returns: int: number of event sources.
codesearchnet
def index_buffer(self, buffer, index_element_size=4): if (not (type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes])): raise VAOError('buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance') if isinstance(buffer, numpy.ndarray): buffer = self.ctx.buffer(buffer.tobytes()) if isinstance(buffer, bytes): buffer = self.ctx.buffer(data=buffer) self._index_buffer = buffer self._index_element_size = index_element_size
Set the index buffer for this VAO Args: buffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes`` Keyword Args: index_element_size (int): Byte size of each element. 1, 2 or 4
codesearchnet
def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs): vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy if vision_feature_select_strategy not in ['default', 'full']: raise ValueError(f'Unexpected select feature strategy: {self.config.vision_feature_select_strategy}') kwargs = {k: v for k, v in kwargs.items() if v is not None} image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs) if isinstance(vision_feature_layer, int): selected_image_feature = image_outputs.hidden_states[vision_feature_layer] if vision_feature_select_strategy == 'default': selected_image_feature = selected_image_feature[:, 1:] else: hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer] if vision_feature_select_strategy == 'default': hs_pool = [hs[:, 1:] for hs in hs_pool] selected_image_feature = torch.cat(hs_pool, dim=-1) image_features = self.multi_modal_projector(selected_image_feature) if 'image_sizes' in kwargs: split_sizes = [height image_features = torch.split(image_features.squeeze(0), split_sizes) else: image_features = list(image_features) return image_features
Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`): The tensors corresponding to the input images. vision_feature_layer (`Union[int, List[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"` Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
github-repos
def get_changeset(changeset): url = 'https: return ET.fromstring(requests.get(url).content)
Get the changeset using the OSM API and return the content as a XML ElementTree. Args: changeset: the id of the changeset.
codesearchnet
def stitch_values(values_and_indices_list): length = 0 for values_and_indices in values_and_indices_list: length += len(values_and_indices[0]) result = [None] * length for values_and_indices in values_and_indices_list: if values_and_indices and values_and_indices[0]: for v, i in zip(*values_and_indices): assert result[i] is None result[i] = v return result
Stitch values together according to their indices. Args: values_and_indices_list: a list of tuples of values and indices indicating the values and positions in the returned list. Returns: a stitched list of values.
github-repos
def _ref(self): return self._variable
Returns a reference to this variable. You usually do not need to call this method as all ops that need a reference to the variable call it automatically. Returns is a `Tensor` which holds a reference to the variable. You can assign a new value to the variable by passing the tensor to an assign op. See `tf.Variable.value` if you want to get the value of the variable. Returns: A `Tensor` that is a reference to the variable.
github-repos
def peek(self) -> str: try: return self.input[self.offset] except IndexError: raise EndOfInput(self)
Return the next character without advancing offset. Raises: EndOfInput: If past the end of `self.input`.
codesearchnet
def device_id_to_slug(did): try: device_slug = IOTileDeviceSlug(did, allow_64bits=False) except ValueError: raise ArgumentError('Unable to recognize {} as a device id'.format(did)) return str(device_slug)
Converts a device id into a correct device slug. Args: did (long) : A device id did (string) : A device slug in the form of XXXX, XXXX-XXXX-XXXX, d--XXXX, d--XXXX-XXXX-XXXX-XXXX Returns: str: The device slug in the d--XXXX-XXXX-XXXX-XXXX format Raises: ArgumentError: if the ID is not in the [1, 16**12] range, or if not a valid string
codesearchnet
def _detect(self): results = [] for c in self.contracts: for f in c.functions: if (f.contract != c): continue if (f.view or f.pure): if f.contains_assembly: attr = ('view' if f.view else 'pure') info = '{}.{} ({}) is declared {} but contains assembly code\n' info = info.format(f.contract.name, f.name, f.source_mapping_str, attr) json = self.generate_json_result(info) self.add_function_to_json(f, json) json['elements'].append({'type': 'info', 'contains_assembly': True}) results.append(json) variables_written = f.all_state_variables_written() if variables_written: attr = ('view' if f.view else 'pure') info = '{}.{} ({}) is declared {} but changes state variables:\n' info = info.format(f.contract.name, f.name, f.source_mapping_str, attr) for variable_written in variables_written: info += '\t- {}.{}\n'.format(variable_written.contract.name, variable_written.name) json = self.generate_json_result(info) self.add_function_to_json(f, json) self.add_variables_to_json(variables_written, json) json['elements'].append({'type': 'info', 'contains_assembly': False}) results.append(json) return results
Detect the constant function changing the state Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func','#varsWritten'}
codesearchnet
def extract_cookies(self, response, request, referrer_host=None): new_response = HTTPResponseInfoWrapper(response) new_request = convert_http_request(request, referrer_host) self._cookie_jar.extract_cookies(new_response, new_request)
Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL.
codesearchnet
def Serialize(self, writer): writer.WriteHashes(self.HashStart) if self.HashStop is not None: writer.WriteUInt256(self.HashStop)
Serialize object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def save_checkpoint(model, filename, optimizer=None, meta=None): if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError('meta must be a dict or None, but got {}'.format( type(meta))) meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) mmcv.mkdir_or_exist(osp.dirname(filename)) if hasattr(model, 'module'): model = model.module checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(model.state_dict()) } if optimizer is not None: checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, filename)
Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. meta (dict, optional): Metadata to be saved in checkpoint.
juraj-google-style
def create_alias(alias_name, alias_command): (alias_name, alias_command) = (alias_name.strip(), alias_command.strip()) alias_table = get_alias_table() if (alias_name not in alias_table.sections()): alias_table.add_section(alias_name) alias_table.set(alias_name, 'command', alias_command) _commit_change(alias_table)
Create an alias. Args: alias_name: The name of the alias. alias_command: The command that the alias points to.
codesearchnet
def sg_transpose(tensor, opt): assert (opt.perm is not None), 'perm is mandatory' return tf.transpose(tensor, opt.perm, name=opt.name)
r"""Permutes the dimensions according to `opt.perm`. See `tf.transpose()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: perm: A permutation of the dimensions of `tensor`. The target shape. name: If provided, replace current tensor's name. Returns: A `Tensor`.
codesearchnet
def _get_format(format, fname, inp=None): fmt = None err = True if (format is not None): if (format in fmt_to_exts): fmt = format err = False elif fname: file_ext = os.path.splitext(fname)[1][len(os.path.extsep):] for (fmt_name, exts) in fmt_to_exts.items(): if (file_ext in exts): fmt = fmt_name err = False if (fmt is None): if (inp is not None): fmt = _guess_fmt_from_bytes(inp) err = False if err: err_string = 'Failed to guess markup format based on: ' what = [] for (k, v) in {format: 'specified format argument', fname: 'filename', inp: 'input string'}.items(): if k: what.append(v) if (not what): what.append('nothing to guess format from!') err_string += ', '.join(what) raise AnyMarkupError(err_string) return fmt
Try to guess markup format of given input. Args: format: explicit format override to use fname: name of file, if a file was used to read `inp` inp: optional bytestring to guess format of (can be None, if markup format is to be guessed only from `format` and `fname`) Returns: guessed format (a key of fmt_to_exts dict) Raises: AnyMarkupError if explicit format override has unsupported value or if it's impossible to guess the format
codesearchnet
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float: examples_len = len(y_true) correct = sum([(y1 == y2) for (y1, y2) in zip(y_true, y_predicted)]) return ((correct / examples_len) if examples_len else 0)
Calculate accuracy in terms of absolute coincidence Args: y_true: array of true values y_predicted: array of predicted values Returns: portion of absolutely coincidental samples
codesearchnet
def AddForwardedIp(self, address, interface): address = address if IP_ALIAS_REGEX.match(address) else '%s/32' % address args = ['add', 'to', 'local', address] options = self._CreateRouteOptions(dev=interface) self._RunIpRoute(args=args, options=options)
Configure a new IP address on the network interface. Args: address: string, the IP address to configure. interface: string, the output device to use.
juraj-google-style
def convert_drive(self, shift, instruction): command_dict = {'name': instruction.command.name, 't0': (shift + instruction.start_time), 'ch': instruction.channels[0].name} return self._qobj_model(**command_dict)
Return converted `PulseInstruction`. Args: shift(int): Offset time. instruction (PulseInstruction): drive instruction. Returns: dict: Dictionary of required parameters.
codesearchnet
def __directory_list_descriptor(self, configs): descriptor = {'kind': 'discovery items = [] for config in configs: item_descriptor = self.__item_descriptor(config) if item_descriptor: items.append(item_descriptor) if items: descriptor['items'] = items return descriptor
Builds a directory list for an API. Args: configs: List of dicts containing the service configurations to list. Returns: A dictionary that can be deserialized into JSON in discovery list format. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()), or a repeated method signature.
codesearchnet
def _LastEntryTimestamp(dct, upper_bound_timestamp): if upper_bound_timestamp is None: upper_bound = lambda _: True else: upper_bound = lambda key: key <= upper_bound_timestamp try: return max(filter(upper_bound, iterkeys(dct))) except ValueError: return None
Searches for greatest timestamp lower than the specified one. Args: dct: A dictionary from timestamps to some items. upper_bound_timestamp: An upper bound for timestamp to be returned. Returns: Greatest timestamp that is lower than the specified one. If no such value exists, `None` is returned.
juraj-google-style
def AddSerializedFile(self, serialized_file_desc_proto): from google.protobuf import descriptor_pb2 file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(serialized_file_desc_proto) self.Add(file_desc_proto)
Adds the FileDescriptorProto and its types to this pool. Args: serialized_file_desc_proto: A bytes string, serialization of the FileDescriptorProto to add.
codesearchnet
def _parse_description(details): description = details.find("div", {"class": "detailPopis"}) if not description: return None ekniha = description[0].find("div", {"class": "ekniha"}) if ekniha: ekniha[0].replaceWith(dhtmlparser.HTMLElement("")) detail = description[0].find("p", {"class": "detailKat"}) if detail: detail[0].replaceWith(dhtmlparser.HTMLElement("")) description = dhtmlparser.removeTags(description[0]).strip() if not description: return None return description
Parse description of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: str/None: Details as string with currency or None if not found.
juraj-google-style
def encode(self): blob = bytearray() for record in self.records: blob += record.encode() header = struct.pack('<LL', self.SCRIPT_MAGIC, (len(blob) + self.SCRIPT_HEADER_LENGTH)) blob = (header + blob) sha = hashlib.sha256() sha.update(blob) hash_value = sha.digest()[:16] return (bytearray(hash_value) + blob)
Encode this record into a binary blob. This binary blob could be parsed via a call to FromBinary(). Returns: bytearray: The binary encoded script.
codesearchnet
def EncodeMessages(self, message_list, result, destination=None, timestamp=None, api_version=3): if (api_version not in [3]): raise RuntimeError(('Unsupported api version: %s, expected 3.' % api_version)) if (destination is None): destination = self.server_name cipher = self._GetServerCipher() else: remote_public_key = self._GetRemotePublicKey(destination) cipher = Cipher(self.common_name, self.private_key, remote_public_key) if (timestamp is None): self.timestamp = timestamp = int((time.time() * 1000000)) packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp) self.EncodeMessageList(message_list, packed_message_list) result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata result.encrypted_cipher = cipher.encrypted_cipher serialized_message_list = packed_message_list.SerializeToString() (result.packet_iv, result.encrypted) = cipher.Encrypt(serialized_message_list) result.hmac = cipher.HMAC(result.encrypted) result.full_hmac = cipher.HMAC(result.encrypted, result.encrypted_cipher, result.encrypted_cipher_metadata, result.packet_iv.SerializeToString(), struct.pack('<I', api_version)) result.api_version = api_version if isinstance(result, rdfvalue.RDFValue): result.num_messages = len(message_list) return timestamp
Accepts a list of messages and encodes for transmission. This function signs and then encrypts the payload. Args: message_list: A MessageList rdfvalue containing a list of GrrMessages. result: A ClientCommunication rdfvalue which will be filled in. destination: The CN of the remote system this should go to. timestamp: A timestamp to use for the signed messages. If None - use the current time. api_version: The api version which this should be encoded in. Returns: A nonce (based on time) which is inserted to the encrypted payload. The client can verify that the server is able to decrypt the message and return the nonce. Raises: RuntimeError: If we do not support this api version.
codesearchnet
def get_poi_types(self, **kwargs): params = { 'cultureInfo': util.language_code(kwargs.get('lang')) } result = self.make_request('geo', 'get_poi_types', **params) values = result.get('types', []) return True, [emtype.PoiType(**a) for a in values]
Obtain POI types. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[PoiType]), or message string in case of error.
juraj-google-style
def lookup_symbol(self, name, namespace_stack): symbol = Symbol(name, name.split('::'), namespace_stack) assert symbol.parts if (symbol.parts[0] == ''): symbol.parts = symbol.parts[1:] elif (namespace_stack is not None): result = self._lookup_in_all_namespaces(symbol) if result: return result return self._lookup_global(symbol)
Returns AST node and module for symbol if found. Args: name: 'name of the symbol to lookup' namespace_stack: None or ['namespaces', 'in', 'current', 'scope'] Returns: (ast.Node, module (ie, any object stored with symbol)) if found Raises: Error if the symbol cannot be found.
codesearchnet
def get_summary(self): func_summaries = [f.get_summary() for f in self.functions] modif_summaries = [f.get_summary() for f in self.modifiers] return (self.name, [str(x) for x in self.inheritance], [str(x) for x in self.variables], func_summaries, modif_summaries)
Return the function summary Returns: (str, list, list, list, list): (name, inheritance, variables, fuction summaries, modifier summaries)
codesearchnet
def join_dags(self, names=None): return self._client.send(Request(action='join_dags', payload={'names': names})).success
Wait for the specified dags to terminate. This function blocks until the specified dags terminate. If no dags are specified wait for all dags of the workflow, except the dag of the task calling this signal, to terminate. Args: names (list): The names of the dags that have to terminate. Returns: bool: True if all the signal was sent successfully.
codesearchnet
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
juraj-google-style
def getUserForHost(self, user, host): for name in iterFqdnUp(host): usercert = '%s@%s' % (user, name) if self.isUserCert(usercert): return usercert
Gets the name of the first existing user cert for a given user and host. Args: user (str): The name of the user. host (str): The name of the host. Examples: Get the name for the "myuser" user cert at "cool.vertex.link": usercertname = cdir.getUserForHost('myuser', 'cool.vertex.link') Returns: str: The cert name, if exists.
juraj-google-style
def postprocess(self, args: argparse.Namespace): names = {k for k in self.pytype_single_args if hasattr(args, k)} opt_map = {k: self._pytype_arg_map[k].long_opt for k in names} pytype_config.Postprocessor(names, opt_map, args).process()
Postprocesses the subset of pytype_single_args that appear in args. Args: args: an argparse.Namespace.
github-repos
def get_controller(self, path): path_info = path.lstrip('/').split('/', 2) try: return self._routes.get(((path_info[0] + '/') + path_info[1])) except (IndexError, KeyError): return self._routes.get((path_info[0] or 'index'))
Return controller that handle given path. Args: - path: requested path, like: /blog/post_view/15
codesearchnet
def plot_title(ax, pretitle='', title='Figure', posttitle='', title_fontsize=14, title_arg=None): current_title = ax.get_title() if (not current_title): current_title = ((pretitle + title) + posttitle) title_arg = dict_if_none(title_arg) ax.set_title(current_title, fontsize=title_fontsize, **title_arg)
Set title options of a matplotlib plot Args: ax: matplotlib axes pretitle(str): String to include before the general title of the figure posttitle (str): String to include after the general title of the figure title (str): Set the title for the figure title_fontsize (int): Defines the size of the title's font title_arg (dict): Addition arguments for matplotlib.title() call
codesearchnet
def get_available_palettes(chosen_palette): result = None try: result = ALL_PALETTES[:(ALL_PALETTES.index(chosen_palette) + 1)] except ValueError: pass return result
Given a chosen palette, returns tuple of those available, or None when not found. Because palette support of a particular level is almost always a superset of lower levels, this should return all available palettes. Returns: Boolean, None: is tty or None if not found.
codesearchnet
def load(cls, fh): dat = fh.read() try: ret = cls.from_json(dat) except: ret = cls.from_yaml(dat) return ret
Load json or yaml data from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> jsdata = composite.load(json) >>> >>> with open('data.yml', 'r') as yml: >>> ymldata = composite.load(yml)
juraj-google-style
def sample(self, num_rows=1): self.check_fit() res = {} means = np.zeros(self.covariance.shape[0]) size = (num_rows,) clean_cov = np.nan_to_num(self.covariance) samples = np.random.multivariate_normal(means, clean_cov, size=size) for i, (label, distrib) in enumerate(self.distribs.items()): cdf = stats.norm.cdf(samples[:, i]) res[label] = distrib.percent_point(cdf) return pd.DataFrame(data=res)
Creates sintentic values stadistically similar to the original dataset. Args: num_rows: `int` amount of samples to generate. Returns: np.ndarray: Sampled data.
juraj-google-style
def image_summary(predictions, targets, hparams): del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions.
juraj-google-style
def get_status(self, batch_id): with self._lock: if self._batch_committed(batch_id): return ClientBatchStatus.COMMITTED if (batch_id in self._invalid): return ClientBatchStatus.INVALID if (batch_id in self._pending): return ClientBatchStatus.PENDING return ClientBatchStatus.UNKNOWN
Returns the status enum for a batch. Args: batch_id (str): The id of the batch to get the status for Returns: int: The status enum
codesearchnet
class IntSoftmax(nn.Module): def __init__(self, output_bit, quant_mode=False, force_dequant='none'): super().__init__() self.output_bit = output_bit self.max_bit = 32 self.quant_mode = quant_mode if force_dequant in ['nonlinear', 'softmax']: logger.info('Force dequantize softmax') self.quant_mode = False self.act = QuantAct(16, quant_mode=self.quant_mode) self.x0 = -0.6931 self.const = 30 self.coef = [0.35815147, 0.96963238, 1.0] self.coef[1] /= self.coef[0] self.coef[2] /= self.coef[0] def int_polynomial(self, x_int, scaling_factor): with torch.no_grad(): b_int = torch.floor(self.coef[1] / scaling_factor) c_int = torch.floor(self.coef[2] / scaling_factor ** 2) z = (x_int + b_int) * x_int + c_int scaling_factor = self.coef[0] * scaling_factor ** 2 return (z, scaling_factor) def int_exp(self, x_int, scaling_factor): with torch.no_grad(): x0_int = torch.floor(self.x0 / scaling_factor) x_int = torch.max(x_int, self.const * x0_int) q = floor_ste.apply(x_int / x0_int) r = x_int - x0_int * q exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor) exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0) scaling_factor = exp_scaling_factor / 2 ** self.const return (exp_int, scaling_factor) def forward(self, x, scaling_factor): if not self.quant_mode: return (nn.functional.softmax(x, dim=-1), None) x_int = x / scaling_factor x_int_max, _ = x_int.max(dim=-1, keepdim=True) x_int = x_int - x_int_max exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor) exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor) exp_int = exp / exp_scaling_factor exp_int_sum = exp_int.sum(dim=-1, keepdim=True) factor = floor_ste.apply(2 ** self.max_bit / exp_int_sum) exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit)) scaling_factor = 1 / 2 ** self.output_bit return (exp_int * scaling_factor, scaling_factor)
Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`. Args: output_bit (`int`): Bitwidth for the layer output activation. quant_mode (`bool`, *optional*, defaults to `False`): Whether or not the layer is quantized. force_dequant (`str`, *optional*, defaults to `"none"`): Force dequantize the layer if either "softmax" or "nonlinear" is given.
github-repos
def _merge_doc(original, to_merge): if not original: return to_merge or '' if not to_merge: return original or '' sections = [] for name in ('usage', 'arguments', 'options'): sections.append(_merge_section( _get_section(name, original), _get_section(name, to_merge) )) return format_usage('\n\n'.join(s for s in sections).rstrip())
Merge two usage strings together. Args: original: The source of headers and initial section lines. to_merge: The source for the additional section lines to append. Returns: A new usage string that contains information from both usage strings.
juraj-google-style
def recipe_dynamic_costs(config, dcm_account, auth_read, configuration_sheet_url, auth_write, bigquery_dataset): dynamic_costs(config, {'auth': auth_read, 'account': dcm_account, 'sheet': {'template': {'url': 'https:
Calculate DV360 cost at the dynamic creative combination level. Args: dcm_account (string) - NA auth_read (authentication) - Credentials used for reading data. configuration_sheet_url (string) - NA auth_write (authentication) - Credentials used for writing data. bigquery_dataset (string) - NA
github-repos
def sample_with_temperature(logits, temperature, sampling_keep_top_k=-1): if temperature == 0.0: logits_shape = shape_list(logits) argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1) return tf.reshape(argmax, logits_shape[:-1]) else: assert temperature > 0.0 if sampling_keep_top_k != -1: if sampling_keep_top_k <= 0: raise ValueError("sampling_keep_top_k must either be -1 or positive.") vocab_size = shape_list(logits)[1] k_largest = tf.contrib.nn.nth_element( logits, n=sampling_keep_top_k, reverse=True) k_largest = tf.tile(tf.reshape(k_largest, [-1, 1]), [1, vocab_size]) logits = tf.where(tf.less_equal(logits, k_largest), tf.ones_like(logits)*-1e6, logits) reshaped_logits = ( tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature) choices = tf.multinomial(reshaped_logits, 1) choices = tf.reshape(choices, shape_list(logits)[:logits.get_shape().ndims - 1]) return choices
Either argmax or random sampling. Args: logits: a Tensor. temperature: a float 0.0=argmax 1.0=random sampling_keep_top_k: If not -1, only sample from the top k logits. Returns: a Tensor with one fewer dimension than logits.
juraj-google-style
def unzip_file(source_file, dest_dir=None, mkdir=False): if dest_dir is None: dest_dir, fname = os.path.split(source_file) elif not os.path.isdir(dest_dir): if mkdir: preparedir(dest_dir) else: created = preparedir(dest_dir, False) if not created: raise ValueError("Failed to find %s." % dest_dir) with zipfile.ZipFile(source_file) as zf: for member in zf.infolist(): words = member.filename.split('\\') for word in words[:-1]: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir, ''): continue dest_dir = os.path.join(dest_dir, word) zf.extract(member, dest_dir)
Unzip a compressed file. Args: source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip) dest_dir: Target folder to extract to (e.g. c:/ladybug). Default is set to the same directory as the source file. mkdir: Set to True to create the directory if doesn't exist (Default: False)
juraj-google-style
def best_case(self, matrix, m_list, indices_left): m_indices = [] fraction_list = [] for m in m_list: m_indices.extend(m[2]) fraction_list.extend([m[0]] * m[1]) indices = list(indices_left.intersection(m_indices)) interaction_matrix = matrix[indices, :][:, indices] fractions = np.zeros(len(interaction_matrix)) + 1 fractions[:len(fraction_list)] = fraction_list fractions = np.sort(fractions) sums = 2 * np.sum(matrix[indices], axis=1) sums = np.sort(sums) step1 = np.sort(interaction_matrix) * (1 - fractions) step2 = np.sort(np.sum(step1, axis=1)) step3 = step2 * (1 - fractions) interaction_correction = np.sum(step3) if self._algo == self.ALGO_TIME_LIMIT: elapsed_time = datetime.utcnow() - self._start_time speedup_parameter = elapsed_time.total_seconds() / 1800 avg_int = np.sum(interaction_matrix, axis=None) avg_frac = np.average(np.outer(1 - fractions, 1 - fractions)) average_correction = avg_int * avg_frac interaction_correction = average_correction * speedup_parameter \ + interaction_correction * (1 - speedup_parameter) best_case = np.sum(matrix) + np.inner(sums[::-1], fractions - 1) \ + interaction_correction return best_case
Computes a best case given a matrix and manipulation list. Args: matrix: the current matrix (with some permutations already performed) m_list: [(multiplication fraction, number_of_indices, indices, species)] describing the manipulation indices: Set of indices which haven't had a permutation performed on them.
juraj-google-style
def get_tag(self, main_type, sub_type, unique_id, tag, owner=None, params=None): params = params or {} return self.tag(main_type, sub_type, unique_id, tag, owner=owner, params=params)
Args: owner: main_type: sub_type: unique_id: tag: params: Return:
juraj-google-style
def get_component(self, colour, tolerance=0, default=None): if not (0 <= tolerance <= np.sqrt(195075)): raise LegendError('Tolerance must be between 0 and 441.67') for decor in self.__list: if colour.lower() == decor.colour: return decor.component r1, g1, b1 = utils.hex_to_rgb(colour) best_match = ' best_match_dist = np.sqrt(r1**2. + g1**2. + b1**2.) for decor in self.__list: r2, g2, b2 = decor.rgb distance = np.sqrt((r2-r1)**2. + (g2-g1)**2. + (b2-b1)**2.) if distance < best_match_dist: best_match = decor.component best_match_dist = distance best_match_colour = decor.colour if best_match_dist <= tolerance: return best_match else: with warnings.catch_warnings(): warnings.simplefilter("always") w = "No match found for {0} ".format(colour.lower()) w += "with tolerance of {0}. Best match is ".format(tolerance) w += "{0}, {1}".format(best_match.summary(), best_match_colour) w += ", d={0}".format(best_match_dist) warnings.warn(w) return default
Get the component corresponding to a display colour. This is for generating a Striplog object from a colour image of a striplog. Args: colour (str): The hex colour string to look up. tolerance (float): The colourspace distance within which to match. default (component or None): The component to return in the event of no match. Returns: component. The component best matching the provided colour.
juraj-google-style
def load(hdf5_filename): hdf5_filename = os.path.expanduser(hdf5_filename) try: f = h5py.File(hdf5_filename, "r") data_layers = f.get('image').get('CUTOUT') except Exception as e: raise ValueError("Could not load file {0} for conversion. {}".format( hdf5_filename, e)) raise return numpy.array(data_layers)
Import a HDF5 file into a numpy array. Arguments: hdf5_filename: A string filename of a HDF5 datafile Returns: A numpy array with data from the HDF5 file
juraj-google-style
def search(self, resources_request=None): name_pattern, version_range = self._parse_request(resources_request) family_names = set( x.name for x in iter_package_families(paths=self.package_paths) if fnmatch.fnmatch(x.name, name_pattern) ) family_names = sorted(family_names) if self.resource_type: resource_type = self.resource_type elif version_range or len(family_names) == 1: resource_type = "package" else: resource_type = "family" if not family_names: return resource_type, [] if resource_type == "family": results = [ResourceSearchResult(x, "family") for x in family_names] return "family", results results = [] for name in family_names: it = iter_packages(name, version_range, paths=self.package_paths) packages = sorted(it, key=lambda x: x.version) if self.latest and packages: packages = [packages[-1]] for package in packages: try: if package.timestamp: if self.after_time and package.timestamp < self.after_time: continue if self.before_time and package.timestamp >= self.before_time: continue if self.validate: package.validate_data() except ResourceContentError as e: if resource_type == "package": result = ResourceSearchResult(package, "package", str(e)) results.append(result) continue if resource_type == "package": result = ResourceSearchResult(package, "package") results.append(result) continue try: for variant in package.iter_variants(): if self.validate: try: variant.validate_data() except ResourceContentError as e: result = ResourceSearchResult( variant, "variant", str(e)) results.append(result) continue result = ResourceSearchResult(variant, "variant") results.append(result) except ResourceContentError: continue return resource_type, results
Search for resources. Args: resources_request (str): Resource to search, glob-style patterns are supported. If None, returns all matching resource types. Returns: 2-tuple: - str: resource type (family, package, variant); - List of `ResourceSearchResult`: Matching resources. Will be in alphabetical order if families, and version ascending for packages or variants.
juraj-google-style
def not_storable(_type): return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type)))
Helper for tagging unserializable types. Arguments: _type (type): type to be ignored. Returns: Storable: storable instance that does not poke.
juraj-google-style
def read_from_file(self, filename, negative_occupancies='warn'): valid_negative_occupancies = ['warn', 'raise', 'ignore', 'zero'] if (negative_occupancies not in valid_negative_occupancies): raise ValueError('"{}" is not a valid value for the keyword `negative_occupancies`.'.format(negative_occupancies)) with open(filename, 'r') as file_in: file_in.readline() (self.number_of_k_points, self.number_of_bands, self.number_of_ions) = [int(f) for f in get_numbers_from_string(file_in.readline())] self.read_in = file_in.read() self.parse_k_points() self.parse_bands() self.parse_occupancy() if np.any((self.occupancy[(:, 1)] < 0)): if (negative_occupancies == 'warn'): warnings.warn('One or more occupancies in your PROCAR file are negative.') elif (negative_occupancies == 'raise'): raise ValueError('One or more occupancies in your PROCAR file are negative.') elif (negative_occupancies == 'zero'): self.occupancy[(self.occupancy < 0)] = 0.0 self.parse_projections() self.sanity_check() self.read_in = None if self.calculation['spin_polarised']: self.data = self.projection_data.reshape(self.spin_channels, self.number_of_k_points, self.number_of_bands, (self.number_of_ions + 1), self.number_of_projections)[(:, :, :, :, 1:)].swapaxes(0, 1).swapaxes(1, 2) else: self.data = self.projection_data.reshape(self.number_of_k_points, self.number_of_bands, self.spin_channels, (self.number_of_ions + 1), self.number_of_projections)[(:, :, :, :, 1:)]
Reads the projected wavefunction character of each band from a VASP PROCAR file. Args: filename (str): Filename of the PROCAR file. negative_occupancies (:obj:Str, optional): Sets the behaviour for handling negative occupancies. Default is `warn`. Returns: None Note: Valid options for `negative_occupancies` are: `warn` (default): Warn that some partial occupancies are negative, but do not alter any values. `raise`: Raise an AttributeError. `ignore`: Do nothing. `zero`: Negative partial occupancies will be set to zero.
codesearchnet
def regroup_if_changed(group, op_list, name=None): has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas) if (group is None or len(group.control_inputs) != len(op_list) or (has_deltas and op_list.has_changed())): if has_deltas: op_list.mark() if op_list: return tf.group(*op_list, name=name) else: return tf.no_op(name=name) else: return group
Creates a new group for op_list if it has changed. Args: group: The current group. It is returned if op_list is unchanged. op_list: The list of operations to check. name: The name to use if a new group is created. Returns: Either group or a new group (or if op_list is empty then no_op).
juraj-google-style
def Scalars(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Scalars(tag)
Retrieve the scalar events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ScalarEvents`.
juraj-google-style
def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time): neighbor_x = self.x[::stride, ::stride] neighbor_y = self.y[::stride, ::stride] neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T) neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1])) print('Forecast Hours: {0}-{1}'.format(start_time, end_time)) for m in range(len(self.members)): period_max = self.data[m,start_time:end_time,:,:].max(axis=0) valid_i, valid_j = np.where(period_max >= threshold) print(self.members[m], len(valid_i)) if len(valid_i) > 0: var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T) exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int) exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape) neighbor_prob[m][exceed_i, exceed_j] = 1 if smoothing > 0: neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing,mode='constant') return neighbor_prob
Calculate the neighborhood probability over the full period of the forecast Args: radius: circular radius from each point in km smoothing: width of Gaussian smoother in km threshold: intensity of exceedance stride: number of grid points to skip for reduced neighborhood grid Returns: (neighborhood probabilities)
juraj-google-style
def _convert_to_sparse_tensors(sp_inputs): if isinstance(sp_inputs, list): return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs] if isinstance(sp_inputs, tuple): return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs) raise TypeError('Inputs must be a list or tuple.')
Convert `sp_inputs` to `SparseTensor` objects and return them. Args: sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue` objects. Returns: `sp_inputs` converted to `SparseTensor` objects. Raises: ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor `SparseTensorValue`.
github-repos
def depth_june_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_june_average_ground_temperature`'.format(value)) self._depth_june_average_ground_temperature = value
Corresponds to IDD Field `depth_june_average_ground_temperature` Args: value (float): value for IDD Field `depth_june_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def write(self, brightness): if not isinstance(brightness, (bool, int)): raise TypeError("Invalid brightness type, should be bool or int.") if isinstance(brightness, bool): brightness = self._max_brightness if brightness else 0 else: if not 0 <= brightness <= self._max_brightness: raise ValueError("Invalid brightness value, should be between 0 and %d." % self._max_brightness) try: os.write(self._fd, b"%d\n" % brightness) except OSError as e: raise LEDError(e.errno, "Writing LED brightness: " + e.strerror) try: os.lseek(self._fd, 0, os.SEEK_SET) except OSError as e: raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
Set the brightness of the LED to `brightness`. `brightness` can be a boolean for on/off, or integer value for a specific brightness. Args: brightness (bool, int): Brightness value to set. Raises: LEDError: if an I/O or OS error occurs. TypeError: if `brightness` type is not bool or int.
juraj-google-style
def __contains__(self, nurest_object): for obj in self: if obj.equals(nurest_object): return True return False
Verify if the fetcher contains the given NURESTObject Args: nurest_object (bambou.NURESTObject): the NURESTObject object to verify Returns: Returns True if the object has been found. False otherwise
juraj-google-style
def fuzzy_match(self, proc): return any(((proc in row[self.command_name]) for row in self.data))
Are there any commands that contain the given text? Returns: boolean: ``True`` if the word ``proc`` appears in the command column. .. note:: 'proc' can match anywhere in the command path, name or arguments.
codesearchnet
def _linear(self, inputs): first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size])
Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size].
github-repos
def write_compartments(self, stream, compartments, adjacencies, properties=None): def convert(entry): return self.convert_compartment_entry(entry, adjacencies.get(entry.id)) self._write_entries(stream, compartments, convert, properties)
Write iterable of compartments as YAML object to stream. Args: stream: File-like object. compartments: Iterable of compartment entries. adjacencies: Dictionary mapping IDs to adjacent compartment IDs. properties: Set of compartment properties to output (or None to output all).
codesearchnet
def get(): result = runCommand('facter --json', raise_error_on_fail=True) json_facts = result[1] facts = json.loads(json_facts) return facts
Get local facts about this machine. Returns: json-compatible dict with all facts of this host
codesearchnet
def take_profit_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=TakeProfitOrderRequest(**kwargs) )
Shortcut to replace a pending Take Profit Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Take Profit Order to replace kwargs : The arguments to create a TakeProfitOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(Credential, self).read(input_stream, kmip_version=kmip_version) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.CREDENTIAL_TYPE, local_stream): self._credential_type = primitives.Enumeration(enum=enums.CredentialType, tag=enums.Tags.CREDENTIAL_TYPE) self._credential_type.read(local_stream, kmip_version=kmip_version) else: raise ValueError('Credential encoding missing the credential type.') if self.is_tag_next(enums.Tags.CREDENTIAL_VALUE, local_stream): if (self.credential_type == enums.CredentialType.USERNAME_AND_PASSWORD): self._credential_value = UsernamePasswordCredential() elif (self.credential_type == enums.CredentialType.DEVICE): self._credential_value = DeviceCredential() elif (self.credential_type == enums.CredentialType.ATTESTATION): self._credential_value = AttestationCredential() else: raise ValueError('Credential encoding includes unrecognized credential type.') self._credential_value.read(local_stream, kmip_version=kmip_version) else: raise ValueError('Credential encoding missing the credential value.') self.is_oversized(local_stream)
Read the data encoding the Credential struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if either the credential type or value are missing from the encoding.
codesearchnet
def get_graph(self, item_ids, language=None): def _related(item_ids): if item_ids is None: items = Item.objects.filter(active=True).prefetch_related('parents', 'children') else: item_ids = [ii for iis in item_ids.values() for ii in iis] items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents', 'children') return {item.id: sorted([_item.id for rel in [item.parents.all(), item.children.all()] for _item in rel]) for item in items} if item_ids is None: return self._reachable_graph(None, _related, language=language) else: graph = self.get_graph(None, language) return self._subset_graph(graph, item_ids)
Get a subgraph of items reachable from the given set of items through any relation. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (parent items), root items are referenced by None key
juraj-google-style
def register_mbr_plugin(self, fs_id, plugin): self.logger.debug('MBR: {}, FS ID: {}'.format(self.__get_plugin_name(plugin), fs_id)) self.__mbr_plugins[fs_id].append(plugin)
Used in plugin's registration routine, to associate it's detection method with given filesystem id Args: fs_id: filesystem id that is read from MBR partition entry plugin: plugin that supports this filesystem
codesearchnet
def hwvtep_add_loopback_interface(self, **kwargs): name = kwargs.pop('name') id = kwargs.pop('int_id') ip_args = dict(name=name, loopback_id=id) method_name = 'overlay_gateway_ip_interface_loopback_loopback_id' method_class = self._brocade_tunnels gw_attr = getattr(method_class, method_name) config = gw_attr(**ip_args) output = self._callback(config) return output
Add loopback interface to the overlay-gateway Args: name (str): gateway-name int_id (int): loopback inteface id callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
codesearchnet
def audio(self, audio, sample_rate, name=None, subdir=''): from chainerui.report.audio_report import check_available if not check_available(): return from chainerui.report.audio_report import report as _audio col_name = self.get_col_name(name, 'audio') out_dir, rel_out_dir = self.get_subdir(subdir) filename, _ = _audio(audio, sample_rate, out_dir, col_name) self.audios[col_name] = os.path.join(rel_out_dir, filename) self.count += 1
Summary audio to listen on web browser. Args: audio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \ :class:`chainer.Variable`): sampled wave array. sample_rate (int): sampling rate. name (str): name of image. set as column name. when not setting, assigned ``'audio'`` + sequential number. subdir (str): sub-directory path of output.
juraj-google-style
def do_post(endpoint, body, access_token): headers = {'content-type': 'application/json', 'Authorization': ('Bearer ' + access_token)} headers['User-Agent'] = get_user_agent() return requests.post(endpoint, data=body, headers=headers)
Do an HTTP POST request and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. body (str): JSON body of information to post. access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON body.
codesearchnet
def difference(self, *other): from_frozenset = self.items.difference(*map(set, other)) return self.from_iterable(from_frozenset, sort=True)
Returns a new :class:`FrameSet` with elements in `self` but not in `other`. Args: other (:class:`FrameSet`): or objects that can cast to :class:`FrameSet` Returns: :class:`FrameSet`:
juraj-google-style
def onScreen(x, y=None): x, y = _unpackXY(x, y) x = int(x) y = int(y) width, height = platformModule._size() return 0 <= x < width and 0 <= y < height
Returns whether the given xy coordinates are on the screen or not. Args: Either the arguments are two separate values, first arg for x and second for y, or there is a single argument of a sequence with two values, the first x and the second y. Example: onScreen(x, y) or onScreen([x, y]) Returns: bool: True if the xy coordinates are on the screen at its current resolution, otherwise False.
juraj-google-style
def merge_variables(variables, **kwargs): var_dict = OrderedDict() for v in variables: if v.name not in var_dict: var_dict[v.name] = [] var_dict[v.name].append(v) return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables.
juraj-google-style
def _longToBytestring(value, signed=False, numberOfRegisters=2): _checkInt(value, description='inputvalue') _checkBool(signed, description='signed parameter') _checkInt(numberOfRegisters, minvalue=2, maxvalue=2, description='number of registers') formatcode = '>' if signed: formatcode += 'l' else: formatcode += 'L' outstring = _pack(formatcode, value) assert len(outstring) == 4 return outstring
Convert a long integer to a bytestring. Long integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave. Args: * value (int): The numerical value to be converted. * signed (bool): Whether large positive values should be interpreted as negative values. * numberOfRegisters (int): Should be 2. For error checking only. Returns: A bytestring (4 bytes). Raises: TypeError, ValueError
juraj-google-style
def __eq__(self, other): if not isinstance(other, SemanticTime): return False return self._SORT_ORDER == other._SORT_ORDER
Determines if the date time values are equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are equal to other.
juraj-google-style
def get_schema_descendant( self, route: SchemaRoute) -> Optional[SchemaNode]: node = self for p in route: node = node.get_child(*p) if node is None: return None return node
Return descendant schema node or ``None`` if not found. Args: route: Schema route to the descendant node (relative to the receiver).
juraj-google-style
def eval_rs(gains, losses): count = (len(gains) + len(losses)) avg_gains = (stats.avg(gains, count=count) if gains else 1) avg_losses = (stats.avg(losses, count=count) if losses else 1) if (avg_losses == 0): return avg_gains else: return (avg_gains / avg_losses)
Evaluates the RS variable in RSI algorithm Args: gains: List of price gains. losses: List of prices losses. Returns: Float of average gains over average losses.
codesearchnet
def ValidateSyntax(rdf_artifact): if (not rdf_artifact.doc): raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, 'missing doc') for supp_os in rdf_artifact.supported_os: valid_os = rdf_artifact.SUPPORTED_OS_LIST if (supp_os not in valid_os): detail = ("invalid `supported_os` ('%s' not in %s)" % (supp_os, valid_os)) raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail) for condition in rdf_artifact.conditions: try: of = objectfilter.Parser(condition).Parse() of.Compile(objectfilter.BaseFilterImplementation) except rdf_artifacts.ConditionError as e: detail = ("invalid condition '%s'" % condition) raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail, e) for label in rdf_artifact.labels: if (label not in rdf_artifact.ARTIFACT_LABELS): raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, ("invalid label '%s'" % label)) valid_provides = rdf_client.KnowledgeBase().GetKbFieldNames() for kb_var in rdf_artifact.provides: if (kb_var not in valid_provides): detail = ("broken `provides` ('%s' not in %s)" % (kb_var, valid_provides)) raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail) for dep in GetArtifactPathDependencies(rdf_artifact): if (dep not in valid_provides): detail = ("broken path dependencies ('%s' not in %s)" % (dep, valid_provides)) raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail) for source in rdf_artifact.sources: try: source.Validate() except rdf_artifacts.ArtifactSourceSyntaxError as e: raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, 'bad source', e)
Validates artifact syntax. This method can be used to validate individual artifacts as they are loaded, without needing all artifacts to be loaded first, as for Validate(). Args: rdf_artifact: RDF object artifact. Raises: ArtifactSyntaxError: If artifact syntax is invalid.
codesearchnet
def tuplize(nested): if isinstance(nested, str): return nested try: return tuple(map(tuplize, nested)) except TypeError: return nested
Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples.
codesearchnet
def _get_ground_truth_detections(instances_file, allowlist_file=None, num_images=None): with open(instances_file, 'r') as annotation_dump: data_dict = ast.literal_eval(annotation_dump.readline()) image_data = collections.OrderedDict() if allowlist_file is not None: with open(allowlist_file, 'r') as allowlist: image_id_allowlist = set([int(x) for x in allowlist.readlines()]) else: image_id_allowlist = [image['id'] for image in data_dict['images']] for image_dict in data_dict['images']: image_id = image_dict['id'] if image_id not in image_id_allowlist: continue image_data_dict = {} image_data_dict['id'] = image_dict['id'] image_data_dict['file_name'] = image_dict['file_name'] image_data_dict['height'] = image_dict['height'] image_data_dict['width'] = image_dict['width'] image_data_dict['detections'] = [] image_data[image_id] = image_data_dict shared_image_ids = set() for annotation_dict in data_dict['annotations']: image_id = annotation_dict['image_id'] if image_id in image_data: shared_image_ids.add(image_id) output_image_ids = sorted(shared_image_ids) if num_images: if num_images <= 0: logging.warning('--num_images is %d, hence outputing all annotated images.', num_images) elif num_images > len(shared_image_ids): logging.warning('--num_images (%d) is larger than the number of annotated images.', num_images) else: output_image_ids = output_image_ids[:num_images] for image_id in list(image_data): if image_id not in output_image_ids: del image_data[image_id] for annotation_dict in data_dict['annotations']: image_id = annotation_dict['image_id'] if image_id not in output_image_ids: continue image_data_dict = image_data[image_id] bbox = annotation_dict['bbox'] top = bbox[1] left = bbox[0] bottom = top + bbox[3] right = left + bbox[2] if top > image_data_dict['height'] or left > image_data_dict['width'] or bottom > image_data_dict['height'] or (right > image_data_dict['width']): continue object_d = {} object_d['bbox'] = [top / image_data_dict['height'], left / image_data_dict['width'], bottom / image_data_dict['height'], right / image_data_dict['width']] object_d['category_id'] = annotation_dict['category_id'] image_data_dict['detections'].append(object_d) return image_data
Processes the annotations JSON file and returns ground truth data corresponding to allowlisted image IDs. Args: instances_file: COCO instances JSON file, usually named as instances_val20xx.json. allowlist_file: File containing COCO minival image IDs to allowlist for evaluation, one per line. num_images: Number of allowlisted images to pre-process. First num_images are chosen based on sorted list of filenames. If None, all allowlisted files are preprocessed. Returns: A dict mapping image id (int) to a per-image dict that contains: 'filename', 'image' & 'height' mapped to filename & image dimensions respectively AND 'detections' to a list of detection dicts, with each mapping: 'category_id' to COCO category id (starting with 1) & 'bbox' to a list of dimension-normalized [top, left, bottom, right] bounding-box values.
github-repos
def accept_confirm(self, text=None, wait=None): with self.driver.accept_modal('confirm', text=text, wait=wait): (yield)
Execute the wrapped code, accepting a confirm. Args: text (str | RegexObject, optional): Text to match against the text in the modal. wait (int | float, optional): Maximum time to wait for the modal to appear after executing the wrapped code. Raises: ModalNotFound: If a modal dialog hasn't been found.
codesearchnet
def receive(self): pickled_request = self._connection.connection.lpop(self._request_key) return (pickle.loads(pickled_request) if (pickled_request is not None) else None)
Returns a single request. Takes the first request from the list of requests and returns it. If the list is empty, None is returned. Returns: Response: If a new request is available a Request object is returned, otherwise None is returned.
codesearchnet
def on_item_changed(self, item, new_value, row, column): return (item, new_value, row, column)
Event for the item change. Args: emitter (TableWidget): The emitter of the event. item (TableItem): The TableItem instance. new_value (str): New text content. row (int): row index. column (int): column index.
codesearchnet
def bsp_new_with_size(x: int, y: int, w: int, h: int) -> tcod.bsp.BSP: return Bsp(x, y, w, h)
Create a new BSP instance with the given rectangle. Args: x (int): Rectangle left coordinate. y (int): Rectangle top coordinate. w (int): Rectangle width. h (int): Rectangle height. Returns: BSP: A new BSP instance. .. deprecated:: 2.0 Call the :any:`BSP` class instead.
juraj-google-style
async def get_action_context_and_template(chain, parent_link, decision_link): actions_path = decision_link.get_artifact_full_path('public/actions.json') all_actions = load_json_or_yaml(actions_path, is_path=True)['actions'] action_name = get_action_callback_name(parent_link.task) action_defn = _get_action_from_actions_json(all_actions, action_name) jsone_context = await populate_jsone_context(chain, parent_link, decision_link, "action") if 'task' in action_defn and chain.context.config['min_cot_version'] <= 2: tmpl = {'tasks': [action_defn['task']]} elif action_defn.get('kind') == 'hook': in_tree_tmpl = await get_in_tree_template(decision_link) action_perm = _get_action_perm(action_defn) tmpl = _wrap_action_hook_with_let(in_tree_tmpl, action_perm) jsone_context = { 'payload': _render_action_hook_payload( action_defn, jsone_context, parent_link ), 'taskId': parent_link.task_id, 'now': jsone_context['now'], 'as_slugid': jsone_context['as_slugid'], 'clientId': jsone_context.get('clientId'), } elif action_defn.get('kind') == 'task': tmpl = await get_in_tree_template(decision_link) for k in ('action', 'push', 'repository'): jsone_context[k] = deepcopy(action_defn['hookPayload']['decision'].get(k, {})) jsone_context['action']['repo_scope'] = get_repo_scope(parent_link.task, parent_link.name) else: raise CoTError('Unknown action kind `{kind}` for action `{name}`.'.format( kind=action_defn.get('kind', '<MISSING>'), name=action_defn.get('name', '<MISSING>'), )) return jsone_context, tmpl
Get the appropriate json-e context and template for an action task. Args: chain (ChainOfTrust): the chain of trust. parent_link (LinkOfTrust): the parent link to test. decision_link (LinkOfTrust): the parent link's decision task link. tasks_for (str): the reason the parent link was created (cron, hg-push, action) Returns: (dict, dict): the json-e context and template.
juraj-google-style
def _build_js(inputs, outputs, name, implementation, support_code): input_fields = json.dumps([f[0] for f in inputs]) output_fields = [{'name': f[0], 'type': f[1]} for f in outputs] output_fields = json.dumps(output_fields, sort_keys=True) if support_code is None: support_code = '' return ('{code}\n{name}={implementation};\nbigquery.defineFunction(\'{name}\', {inputs}, ' '{outputs}, {name});').format(code=support_code, name=name, implementation=implementation, inputs=str(input_fields), outputs=str(output_fields))
Creates a BigQuery SQL UDF javascript object. Args: inputs: a list of (name, type) tuples representing the schema of input. outputs: a list of (name, type) tuples representing the schema of the output. name: the name of the function implementation: a javascript function defining the UDF logic. support_code: additional javascript code that the function can use.
juraj-google-style
def is_object_new(self, func): self.load_lazy_attribute('__new__') self.load_lazy_attribute('__new__extra_args') return [func] == self.members['__new__'].data or [func] == self.members['__new__extra_args'].data
Whether the given function is object.__new__. Args: func: A function. Returns: True if func equals either of the pytd definitions for object.__new__, False otherwise.
github-repos
def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None: if (not org_wav_fn.exists()): raise FileNotFoundError args = [config.FFMPEG_PATH, '-i', str(org_wav_fn), '-ac', '1', '-ar', '16000', str(tgt_wav_fn)] subprocess.run(args)
Converts the wav into a 16bit mono 16000Hz wav. Args: org_wav_fn: A `Path` to the original wave file tgt_wav_fn: The `Path` to output the processed wave file
codesearchnet
class CustomObjectScope: def __init__(self, custom_objects): self.custom_objects = custom_objects or {} self.backup = None def __enter__(self): self.backup = global_state.get_global_attribute('custom_objects_scope_dict', {}).copy() global_state.set_global_attribute('custom_objects_scope_dict', self.custom_objects.copy()) return self def __exit__(self, *args, **kwargs): global_state.set_global_attribute('custom_objects_scope_dict', self.backup.copy())
Exposes custom classes/functions to Keras deserialization internals. Under a scope `with custom_object_scope(objects_dict)`, Keras methods such as `keras.models.load_model()` or `keras.models.model_from_config()` will be able to deserialize any custom object referenced by a saved config (e.g. a custom layer or metric). Example: Consider a custom regularizer `my_regularizer`: ```python layer = Dense(3, kernel_regularizer=my_regularizer) # Config contains a reference to `my_regularizer` config = layer.get_config() ... # Later: with custom_object_scope({'my_regularizer': my_regularizer}): layer = Dense.from_config(config) ``` Args: custom_objects: Dictionary of `{str: object}` pairs, where the `str` key is the object name.
github-repos
def _worker(self, constructor, conn): try: env = constructor() while True: try: if (not conn.poll(0.1)): continue (message, payload) = conn.recv() except (EOFError, KeyboardInterrupt): break if (message == self._ACCESS): name = payload result = getattr(env, name) conn.send((self._RESULT, result)) continue if (message == self._CALL): (name, args, kwargs) = payload result = getattr(env, name)(*args, **kwargs) conn.send((self._RESULT, result)) continue if (message == self._CLOSE): assert (payload is None) break raise KeyError('Received message of unknown type {}'.format(message)) except Exception: stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) tf.logging.error('Error in environment process: {}'.format(stacktrace)) conn.send((self._EXCEPTION, stacktrace)) conn.close()
The process waits for actions and sends back environment results. Args: constructor: Constructor for the OpenAI Gym environment. conn: Connection for communication to the main process. Raises: KeyError: When receiving a message of unknown type.
codesearchnet
def add_ref(self, timestamp: int) -> None: self._ref_times.append(timestamp)
Adds a reference to this tensor with the specified timestamp. Args: timestamp: Timestamp of object reference as an integer.
github-repos
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error): line = clean_lines.elided[linenum] match = Match(r'^(.*[^ ({>]){', line) if match: leading_text = match.group(1) (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text) and not _IsType(clean_lines, nesting_state, leading_text)): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.')
Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
juraj-google-style
def build_graph(device, input_shape, output_sizes, axis): with ops.device('/%s:0' % device): inp = array_ops.zeros(input_shape) outputs = [] for _ in range(100): outputs.extend(array_ops.split(inp, output_sizes, axis)) return control_flow_ops.group(*outputs)
Build a graph containing a sequence of split operations. Args: device: string, the device to run on. input_shape: shape of the input tensor. output_sizes: size of each output along axis. axis: axis to be split along. Returns: An array of tensors to run()
github-repos
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: raise ValueError('With TAPAS, you must provide both question IDs and table IDs.') return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1
Build model inputs from a question and flattened table for question answering or sequence classification tasks by concatenating and adding special tokens. Args: token_ids_0 (`List[int]`): The ids of the question. token_ids_1 (`List[int]`, *optional*): The ids of the flattened table. Returns: `List[int]`: The model input with special tokens.
github-repos
def shell(cmd, *args, **kwargs): if (kwargs.get('rel_path') and (not cmd.startswith('/'))): cmd = os.path.join(kwargs['rel_path'], cmd) status = 0 try: output = subprocess.check_output(((cmd,) + args), stderr=kwargs.get('stderr')) except subprocess.CalledProcessError as e: if kwargs.get('raise_on_status', True): raise e output = e.output status = e.returncode except OSError as e: if kwargs.get('raise_on_status', True): raise e if ('stderr' in kwargs): kwargs['stderr'].write(e.message) return ((- 1), '') if six.PY3: output = output.decode('utf8') return (status, output)
Execute shell command and return output Args: cmd (str): the command itself, i.e. part until the first space *args: positional arguments, i.e. other space-separated parts rel_path (bool): execute relative to the path (default: `False`) raise_on_status(bool): bool, raise exception if command exited with non-zero status (default: `True`) stderr (file-like): file-like object to collect stderr output, None by default Returns: Tuple[int, str]: status, shell output
codesearchnet