code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_charge_transfer(self, atom_index): if (self.potcar is None): raise ValueError('POTCAR must be supplied in order to calculate charge transfer!') potcar_indices = [] for (i, v) in enumerate(self.natoms): potcar_indices += ([i] * v) nelect = self.potcar[potcar_indices[atom_index]].nelectrons return (self.data[atom_index]['charge'] - nelect)
Returns the charge transferred for a particular atom. Requires POTCAR to be supplied. Args: atom_index: Index of atom. Returns: Charge transfer associated with atom from the Bader analysis. Given by final charge on atom - nelectrons in POTCAR for associated atom.
codesearchnet
def visualize_reconstruction(inputs, reconstruct, num=3, name="reconstruction"): reconstruct = tf.clip_by_value(reconstruct, 0., 1.) inputs_and_reconstruct = tf.concat((inputs[:num], reconstruct[:num]), axis=0) image_summary(inputs_and_reconstruct, name)
Visualizes the reconstruction of inputs in TensorBoard. Args: inputs: A tensor of the original inputs, of shape [batch, timesteps, h, w, c]. reconstruct: A tensor of a reconstruction of inputs, of shape [batch, timesteps, h, w, c]. num: Integer for the number of examples to visualize. name: String name of this summary.
juraj-google-style
def convert_per_replica_to_dtensor(per_replica_value, mesh): values = per_replica_value.values if isinstance(values[0], (float, int)): rank = 0 else: rank = len(values[0].shape) if rank == 0: result = [] for v in values: result.append(array_ops.expand_dims_v2(v, axis=0)) rank += 1 else: result = list(values) batch_layout = layout.Layout.batch_sharded(mesh, batch_dim=DEFAULT_BATCH_MESH_DIM_NAME, rank=rank) return d_api.pack(result, batch_layout)
Convert a PerReplica result to a DTensor instance. Args: per_replica_value: A PerReplica instance whose value will be converted to DTensor. mesh: The mesh used for layout creation. Returns: A DTensor instance that packed from per_replica_value with batch sharded layout.
github-repos
def switch_types(self): if (not self.__switch_types): self.__switch_types = SwitchTypes(self.__connection) return self.__switch_types
Gets the SwitchTypes API client. Returns: SwitchTypes:
codesearchnet
def __init__(self, storage_writer, path): super(SQLiteStorageMergeReader, self).__init__(storage_writer) self._active_container_type = None self._active_cursor = None self._add_active_container_method = None self._add_container_type_methods = {} self._compression_format = definitions.COMPRESSION_FORMAT_NONE self._connection = None self._container_types = None self._cursor = None self._event_data_identifier_mappings = {} self._path = path for container_type, method_name in self._ADD_CONTAINER_TYPE_METHODS.items(): method = getattr(self, method_name, None) if not method: raise RuntimeError( 'Add method missing for container type: {0:s}'.format( container_type)) self._add_container_type_methods[container_type] = method
Initializes a storage merge reader. Args: storage_writer (StorageWriter): storage writer. path (str): path to the input file. Raises: IOError: if the input file cannot be opened. RuntimeError: if an add container method is missing.
juraj-google-style
def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'): if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding elements = [] callback_func = elements.append target = target_class(callback_func) if parser_type == 'html': parser = lxml.html.HTMLParser( encoding=lxml_encoding, target=target ) elif parser_type == 'xhtml': parser = lxml.html.XHTMLParser( encoding=lxml_encoding, target=target, recover=True ) else: parser = lxml.etree.XMLParser( encoding=lxml_encoding, target=target, recover=True ) if parser_type == 'html': for dummy in range(3): parser.feed('<html>'.encode(encoding)) while True: data = file.read(self.BUFFER_SIZE) if not data: break parser.feed(data) for element in elements: yield element del elements[:] parser.close() for element in elements: yield element
Return an iterator of elements found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. target_class: A class to be used for target parsing. parser_type (str): The type of parser to use. Accepted values: ``html``, ``xhtml``, ``xml``. Returns: iterator: Each item is an element from :mod:`.document.htmlparse.element`
juraj-google-style
def Run(self, conf, args): raise NotImplementedError('command %r not implemented' % self.__class__.__name__)
Run this command. Commands are invoked with a global configuration object and a list of arguments. Args: conf: A Config object defining global configuration of nss_cache. args: A list of strings of commandline arguments. Returns: 0 if the command was successful non-zero shell error code if not.
github-repos
def _LinearMapByteStream( self, byte_stream, byte_offset=0, context=None, **unused_kwargs): elements_data_size = self._data_type_definition.GetByteSize() self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size) try: struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:]) mapped_values = map(self._element_data_type_map.MapValue, struct_tuple) except Exception as exception: error_string = ( 'Unable to read: {0:s} from byte stream at offset: {1:d} ' 'with error: {2!s}').format( self._data_type_definition.name, byte_offset, exception) raise errors.MappingError(error_string) if context: context.byte_size = elements_data_size return tuple(mapped_values)
Maps a data type sequence on a byte stream. Args: byte_stream (bytes): byte stream. byte_offset (Optional[int]): offset into the byte stream where to start. context (Optional[DataTypeMapContext]): data type map context. Returns: tuple[object, ...]: mapped values. Raises: MappingError: if the data type definition cannot be mapped on the byte stream.
juraj-google-style
def part_studio_stl(self, did, wid, eid): req_headers = { 'Accept': 'application/vnd.onshape.v1+octet-stream' } return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl', headers=req_headers)
Exports STL export from a part studio Args: - did (str): Document ID - wid (str): Workspace ID - eid (str): Element ID Returns: - requests.Response: Onshape response data
juraj-google-style
def MultiDestroyFlowStates(self, session_ids, request_limit=None): subjects = [session_id.Add('state') for session_id in session_ids] to_delete = [] deleted_requests = [] for (subject, values) in self.MultiResolvePrefix(subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit): for (_, serialized, _) in values: request = rdf_flow_runner.RequestState.FromSerializedString(serialized) deleted_requests.append(request) response_subject = self.GetFlowResponseSubject(request.session_id, request.id) to_delete.append(response_subject) to_delete.append(subject) self.DeleteSubjects(to_delete, sync=True) return deleted_requests
Deletes all requests and responses for the given flows. Args: session_ids: A lists of flows to destroy. request_limit: A limit on the number of requests to delete. Returns: A list of requests that were deleted.
codesearchnet
def copy_graph(subject, existing_graph): new_graph = rdflib.Graph() for predicate, object_ in existing_graph.predicate_objects(): new_graph.add((subject, predicate, object_)) return new_graph
Function takes a subject and an existing graph, returns a new graph with all predicate and objects of the existing graph copied to the new_graph with subject as the new subject Args: subject(rdflib.URIRef): A URIRef subject existing_graph(rdflib.Graph): A rdflib.Graph Returns: rdflib.Graph
juraj-google-style
def __init__(self, tensors): if not isinstance(tensors, (list, tuple)) or not tensors: raise ValueError('Unable to create a ShardedNdArray without a list of tensors.') self.tensors = tensors self.n_devices = len(tensors)
Initializes the ShardedNdArray. Note that the tensors should be ordered in the way the pmap producing these tensors is run. Args: tensors: list or tuple of eager tensors, one for each device.
github-repos
def _histogram_equalization_helper(valid_data, number_of_bins, clip_limit=None, slope_limit=None): (temp_histogram, temp_bins) = np.histogram(valid_data, number_of_bins) if (clip_limit is not None): pixels_to_clip_at = int((clip_limit * (valid_data.size / float(number_of_bins)))) mask_to_clip = (temp_histogram > clip_limit) temp_histogram[mask_to_clip] = pixels_to_clip_at cumulative_dist_function = temp_histogram.cumsum() if (slope_limit is not None): pixel_height_limit = int((slope_limit * (valid_data.size / float(number_of_bins)))) cumulative_excess_height = 0 num_clipped_pixels = 0 weight_metric = np.zeros(cumulative_dist_function.shape, dtype=float) for pixel_index in range(1, cumulative_dist_function.size): current_pixel_count = cumulative_dist_function[pixel_index] diff_from_acceptable = (((current_pixel_count - cumulative_dist_function[(pixel_index - 1)]) - pixel_height_limit) - cumulative_excess_height) if (diff_from_acceptable < 0): weight_metric[pixel_index] = abs(diff_from_acceptable) cumulative_excess_height += max(diff_from_acceptable, 0) cumulative_dist_function[pixel_index] = (current_pixel_count - cumulative_excess_height) num_clipped_pixels = (num_clipped_pixels + cumulative_excess_height) cumulative_dist_function = (((number_of_bins - 1) * cumulative_dist_function) / cumulative_dist_function[(- 1)]) return (cumulative_dist_function, temp_bins)
Calculate the simplest possible histogram equalization, using only valid data. Returns: cumulative distribution function and bin information
codesearchnet
def get_resource(self, uri: str) -> Optional[message.Message]: for collection in (self.structure_definitions, self.search_parameters, self.code_systems, self.value_sets): resource = collection.get(uri) if resource is not None: return resource return None
Retrieves a protocol buffer representation of the given resource. Args: uri: The URI of the resource to retrieve. Returns: Protocol buffer for the resource or `None` if the `uri` can not be found.
github-repos
def from_structure(cls, structure, ff_elements=None, atom_style='charge'): s = structure.get_sorted_structure() (box, symmop) = lattice_2_lmpbox(s.lattice) coords = symmop.operate_multi(s.cart_coords) site_properties = s.site_properties if ('velocities' in site_properties): velos = np.array(s.site_properties['velocities']) rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix) rot_velos = rot.operate_multi(velos) site_properties.update({'velocities': rot_velos}) boxed_s = Structure(box.to_lattice(), s.species, coords, site_properties=site_properties, coords_are_cartesian=True) symbols = list(s.symbol_set) if ff_elements: symbols.extend(ff_elements) elements = sorted((Element(el) for el in set(symbols))) mass_info = [tuple(([i.symbol] * 2)) for i in elements] ff = ForceField(mass_info) topo = Topology(boxed_s) return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)
Simple constructor building LammpsData from a structure without force field parameters and topologies. Args: structure (Structure): Input structure. ff_elements ([str]): List of strings of elements that must be present due to force field settings but not necessarily in the structure. Default to None. atom_style (str): Choose between "atomic" (neutral) and "charge" (charged). Default to "charge".
codesearchnet
def deep_del(data, fn): result = {} for (k, v) in data.iteritems(): if (not fn(v)): if isinstance(v, dict): result[k] = deep_del(v, fn) else: result[k] = v return result
Create dict copy with removed items. Recursively remove items where fn(value) is True. Returns: dict: New dict with matching items removed.
codesearchnet
def get_lang(tweet): if is_original_format(tweet): lang_field = 'lang' else: lang_field = 'twitter_lang' if ((tweet[lang_field] is not None) and (tweet[lang_field] != 'und')): return tweet[lang_field] else: return None
Get the language that the Tweet is written in. Args: tweet (Tweet or dict): A Tweet object or dictionary Returns: str: 2-letter BCP 47 language code (or None if undefined) Example: >>> from tweet_parser.getter_methods.tweet_text import get_lang >>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017", ... "lang": "en"} >>> get_lang(original) 'en' >>> activity = {"postedTime": "2017-05-24T20:17:19.000Z", ... "twitter_lang": "en"} >>> get_lang(activity) 'en'
codesearchnet
def add(self, distinguished_name, object_class, attributes): self.conn.add(distinguished_name, object_class, attributes)
Add object to LDAP. Args: distinguished_name: the DN of the LDAP record to be added object_class: The objectClass of the record to be added. This is a list of length >= 1. attributes: a dictionary of LDAP attributes to add See ldap_tools.api.group.API#__ldap_attr
codesearchnet
def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1, training=True, bottleneck=True, padding='SAME'): conv = CONFIG[dim]['conv'] with tf.variable_scope('f', reuse=tf.AUTO_REUSE): if first_batch_norm: net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) else: net = x if bottleneck: net = conv(net, depth1, 1, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth1, 3, strides=1, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 1, strides=1, padding=padding, activation=None) else: net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) return net
Applies residual function for RevNet. Args: x: input tensor depth1: Number of output channels for the first and second conv layers. depth2: Number of output channels for the third conv layer. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the first conv filter. Note that this particular RevNet architecture only varies the stride for the first conv filter. The stride for the second conv filter is always set to 1. training: True for train phase, False for eval phase. bottleneck: If true, apply bottleneck 1x1 down/up sampling. padding: Padding for each conv layer. Returns: Output tensor after applying residual function for RevNet.
codesearchnet
def __init__(self, *args, **kwargs): self.model = kwargs.pop('model', self.model) self.queryset = kwargs.pop('queryset', self.queryset) self.search_fields = kwargs.pop('search_fields', self.search_fields) self.max_results = kwargs.pop('max_results', self.max_results) defaults = {'data_view': 'django_select2:auto-json'} defaults.update(kwargs) super(ModelSelect2Mixin, self).__init__(*args, **defaults)
Overwrite class parameters if passed as keyword arguments. Args: model (django.db.models.Model): Model to select choices from. queryset (django.db.models.query.QuerySet): QuerySet to select choices from. search_fields (list): List of model lookup strings. max_results (int): Max. JsonResponse view page size.
juraj-google-style
def get_nonconflicting_string(base_fmtstr, conflict_set, offset=0): conflict_set_ = set(conflict_set) for count in it.count(offset): base_str = (base_fmtstr % count) if (base_str not in conflict_set_): return base_str
gets a new string that wont conflict with something that already exists Args: base_fmtstr (str): conflict_set (set): CommandLine: python -m utool.util_dev --test-get_nonconflicting_string Example: >>> # ENABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> # build test data >>> base_fmtstr = 'somestring%d' >>> conflict_set = ['somestring0'] >>> # execute function >>> result = get_nonconflicting_string(base_fmtstr, conflict_set) >>> # verify results >>> print(result) somestring1
codesearchnet
def _set_details(self, content): try: self.details = str(content) except UnicodeEncodeError: logging.error('Unable to decode "%s" in Py3, encoding in utf-8.', content) self.details = content.encode('utf-8')
Sets the `details` field. Args: content: the content to extract details from.
github-repos
def _ParseKey(self, knowledge_base, registry_key, value_name): user_account = artifacts.UserAccountArtifact( identifier=registry_key.name, path_separator='\\') registry_value = registry_key.GetValueByName('ProfileImagePath') if registry_value: profile_path = registry_value.GetDataAsObject() username = self._GetUsernameFromProfilePath(profile_path) user_account.user_directory = profile_path or None user_account.username = username or None try: knowledge_base.AddUserAccount(user_account) except KeyError: pass
Parses a Windows Registry key for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. value_name (str): name of the Windows Registry value. Raises: errors.PreProcessFail: if the preprocessing fails.
juraj-google-style
def add_to_dumper(dumper: Type, classes: List[Type]) -> None: if (not isinstance(classes, list)): classes = [classes] for class_ in classes: if issubclass(class_, enum.Enum): dumper.add_representer(class_, EnumRepresenter(class_)) elif (issubclass(class_, str) or issubclass(class_, UserString)): dumper.add_representer(class_, UserStringRepresenter(class_)) else: dumper.add_representer(class_, Representer(class_))
Register user-defined classes with the Dumper. This enables the Dumper to write objects of your classes to a \ YAML file. Note that all the arguments are types, not instances! Args: dumper: Your dumper class(!), derived from yatiml.Dumper classes: One or more classes to add.
codesearchnet
def update_aliases(self): try: response = self.client.api.get_room_state(self.room_id) for chunk in response: if (('content' in chunk) and ('aliases' in chunk['content'])): if (chunk['content']['aliases'] != self.aliases): self.aliases = chunk['content']['aliases'] return True else: return False except MatrixRequestError: return False
Get aliases information from room state. Returns: boolean: True if the aliases changed, False if not
codesearchnet
def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu, name): layer_components = [ conv.Conv2D( output_channels, [3, 3], initializers=self._initializers, regularizers=self._regularizers, rate=dilation_rate, name="dilated_conv_" + name), ] if apply_relu: layer_components.append(lambda net: tf.nn.relu(net, name="relu_" + name)) return sequential.Sequential(layer_components, name=name)
Create a dilated convolution layer. Args: output_channels: int. Number of output channels for each pixel. dilation_rate: int. Represents how many pixels each stride offset will move. A value of 1 indicates a standard convolution. apply_relu: bool. If True, a ReLU non-linearlity is added. name: string. Name for layer. Returns: a sonnet Module for a dilated convolution.
juraj-google-style
def __init__(self, core, keep_probs): super(RecurrentDropoutWrapper, self).__init__( custom_getter=None, name=core.module_name + "_recdropout") self._core = core self._keep_probs = keep_probs self._dropout_state_size = [] def set_dropout_state_size(keep_prob, state_size): if keep_prob is not None: self._dropout_state_size.append(state_size) return len(self._dropout_state_size) - 1 return None self._dropout_indexes = tf.contrib.framework.nest.map_structure( set_dropout_state_size, keep_probs, core.state_size)
Builds a new wrapper around a given core. Args: core: the RNN core to be wrapped. keep_probs: the recurrent dropout keep probabilities to apply. This should have the same structure has core.init_state. No dropout is applied for leafs set to None.
juraj-google-style
def toInteger(self) -> 'Builder': return self._to_builder(_evaluation.ToIntegerFunction(self.node.context, self.node, []))
The FHIRPath toInteger() function. Casts its operand to an integer. Returns an empty collection if the operand can not be coerced to an integer. Raises a ValueError if the operand collection contains more than one element. Returns: An integer representation of its operand.
github-repos
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() regvalue = event_values.get('regvalue', {}) string_parts = [] for key, value in sorted(regvalue.items()): string_parts.append('{0:s}: {1!s}'.format(key, value)) event_values['text'] = ' '.join(string_parts) urls = event_values.get('urls', []) if urls: event_values['urls'] = ' - '.join(urls) if 'key_path' in event_values: format_string = self.FORMAT_STRING else: format_string = self.FORMAT_STRING_ALTERNATIVE return self._FormatMessages( format_string, self.FORMAT_STRING_SHORT, event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def _refresh(self, _): from google.appengine.api import app_identity try: token, _ = app_identity.get_access_token(self._scopes) except app_identity.Error as e: raise exceptions.CredentialsError(str(e)) self.access_token = token
Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature.
juraj-google-style
def OpenSourcePath(self, source_path): source_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_OS, location=source_path) self.AddScanNode(source_path_spec, None)
Opens the source path. Args: source_path (str): source path.
juraj-google-style
def check_beam_implementation(test: absltest.TestCase, input_data: Union[EventSet, List[EventSet]], output_node: EventSetNode, cast: Optional[DType]=None): if isinstance(input_data, EventSet): input_data = [input_data] tmp_dir = tempfile.mkdtemp() output_path = os.path.join(tmp_dir, 'output.csv') input_paths = [] for input_idx, input_evset in enumerate(input_data): input_path = os.path.join(tmp_dir, f'input_{input_idx}.csv') input_paths.append(input_path) to_tensorflow_record(input_evset, path=input_path) with TestPipeline() as p: input_pcollection = {} for input_path, input_evset in zip(input_paths, input_data): input_pcollection[input_evset.node()] = p | beam_from_tensorflow_record(input_path, input_evset.node().schema) output_pcollection = run_multi_io(inputs=input_pcollection, outputs=[output_node]) assert len(output_pcollection) == 1 output = output_pcollection[output_node] | beam_to_tensorflow_record(output_path, output_node.schema, shard_name_template='') assert_that(output, equal_to([output_path])) beam_output = from_tensorflow_record(output_path, output_node.schema) if cast: beam_output = beam_output.cast(cast) expected_output = output_node.run(input_data) assertEqualEventSet(test, beam_output, expected_output)
Checks the result of the Numpy backend against the Beam backend. Args: test: The absl's test. input_data: An event set to feed to a graph. output_node: Output of the graph. input_node: Input of the graph. If not set, uses input_data.node() instead. cast: DType to cast beam's output to after loading it from csv. Useful for comparing outputs that are expected to be int32 for example, since when written to CSV those will be loaded back up as int64.
github-repos
def update_particle(position_update, velocity_update, state, nbest_topology, idx_particle): (idx, particle) = idx_particle nbest = state.swarm[nbest_topology[idx]].best_position velocity = velocity_update(particle, nbest, state) position = position_update(particle.position, velocity) return particle._replace(position=position, velocity=velocity)
Update function for a particle. Calculates and updates the velocity and position of a particle for a single iteration of the PSO algorithm. Social best particle is determined by the state.params['topology'] function. Args: state: cipy.algorithms.pso.State: The state of the PSO algorithm. nbest_topology: dict: Containing neighbourhood best index for each particle index. idx_particle: tuple: Tuple of the index of the particle and the particle itself. Returns: cipy.algorithms.pso.Particle: A new particle with the updated position and velocity.
codesearchnet
def kick_user(self, user_id, reason=''): try: self.client.api.kick_user(self.room_id, user_id) return True except MatrixRequestError: return False
Kick a user from this room. Args: user_id (str): The matrix user id of a user. reason (str): A reason for kicking the user. Returns: boolean: Whether user was kicked.
codesearchnet
def HashBuffer(self, buf): for hasher in itervalues(self._hashers): hasher.update(buf) if self._progress: self._progress() self._bytes_read += len(buf)
Updates underlying hashers with a given buffer. Args: buf: A byte buffer (string object) that is going to be fed to the hashers.
juraj-google-style
def _get_label_encoder_and_max(self, x): label_count = x.fillna(NAN_INT).value_counts() n_uniq = label_count.shape[0] label_count = label_count[(label_count >= self.min_obs)] n_uniq_new = label_count.shape[0] offset = (0 if (n_uniq == n_uniq_new) else 1) label_encoder = pd.Series((np.arange(n_uniq_new) + offset), index=label_count.index) max_label = label_encoder.max() label_encoder = label_encoder.to_dict() return (label_encoder, max_label)
Return a mapping from values and its maximum of a column to integer labels. Args: x (pandas.Series): a categorical column to encode. Returns: label_encoder (dict): mapping from values of features to integers max_label (int): maximum label
codesearchnet
def exec_start(self, exec_id, detach=False, tty=False, stream=False, socket=False, demux=False): data = {'Tty': tty, 'Detach': detach} headers = ({} if detach else {'Connection': 'Upgrade', 'Upgrade': 'tcp'}) res = self._post_json(self._url('/exec/{0}/start', exec_id), headers=headers, data=data, stream=True) if detach: return self._result(res) if socket: return self._get_raw_response_socket(res) return self._read_from_socket(res, stream, tty=tty, demux=demux)
Start a previously set up exec instance. Args: exec_id (str): ID of the exec instance detach (bool): If true, detach from the exec command. Default: False tty (bool): Allocate a pseudo-TTY. Default: False stream (bool): Stream response data. Default: False socket (bool): Return the connection socket to allow custom read/write operations. demux (bool): Return stdout and stderr separately Returns: (generator or str or tuple): If ``stream=True``, a generator yielding response chunks. If ``socket=True``, a socket object for the connection. A string containing response data otherwise. If ``demux=True``, a tuple with two elements of type byte: stdout and stderr. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def __init__(self, name=None, options=None): compression_type = python_io.TFRecordOptions.get_compression_type_string(options) rr = gen_io_ops.tf_record_reader_v2(name=name, compression_type=compression_type) super(TFRecordReader, self).__init__(rr)
Create a TFRecordReader. Args: name: A name for the operation (optional). options: A TFRecordOptions object (optional).
github-repos
def check_done(self): raise NotImplementedError
Checks whether the restriction has been fully processed. Called by the SDK harness after iterator returned by ``DoFn.process()`` has been fully read. This method must raise a `ValueError` if there is still any unclaimed work remaining in the restriction when this method is invoked. Exception raised must have an informative error message. This API is required to be implemented in order to make sure no data loss during SDK processing. Returns: ``True`` if current restriction has been fully processed. Raises: ValueError: if there is still any unclaimed work remaining.
github-repos
def _get_create_query(partition, tablename, include=None): TYPE_MAP = {'int': 'INTEGER', 'float': 'REAL', six.binary_type.__name__: 'TEXT', six.text_type.__name__: 'TEXT', 'date': 'DATE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE'} columns_types = [] if (not include): include = [] for column in sorted(partition.datafile.reader.columns, key=(lambda x: x['pos'])): if (include and (column['name'] not in include)): continue sqlite_type = TYPE_MAP.get(column['type']) if (not sqlite_type): raise Exception('Do not know how to convert {} to sql column.'.format(column['type'])) columns_types.append(' "{}" {}'.format(column['name'], sqlite_type)) columns_types_str = ',\n'.join(columns_types) query = 'CREATE TABLE IF NOT EXISTS {}(\n{})'.format(tablename, columns_types_str) return query
Creates and returns `CREATE TABLE ...` sql statement for given mprows. Args: partition (orm.Partition): tablename (str): name of the table in the return create query. include (list of str, optional): list of columns to include to query. Returns: str: create table query.
codesearchnet
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def _on_disconnect(self, result): (success, _, context) = self._parse_return(result) callback = context['callback'] connection_id = context['connection_id'] handle = context['handle'] callback(connection_id, self.id, success, 'No reason given') self._remove_connection(handle)
Callback called when disconnection command finishes Args: result (dict): result returned from diconnection command
codesearchnet
def AliasMethod(func, from_constant): new_func = func.Replace(kind=MethodKind.METHOD) if func.kind == MethodKind.STATICMETHOD or (func.kind == MethodKind.METHOD and (not from_constant)): return new_func return new_func.Replace(signatures=tuple((s.Replace(params=s.params[1:]) for s in new_func.signatures)))
Returns method func with its signature modified as if it has been aliased. Args: func: A pytd.Function. from_constant: If True, func will be modified as if it has been aliased from an instance of its defining class, e.g., class Foo: def func(self): ... const = ... # type: Foo func = const.func Otherwise, it will be modified as if aliased from the class itself: class Foo: def func(self): ... func = Foo.func Returns: A pytd.Function, the aliased method.
github-repos
def transform_and_print_file(self, file_path: str, transformation: Optional[Callable[[Iterator[str]], Iterator[str]]]=None, output_stream: io.TextIOBase=cast(io.TextIOBase, sys.stdout)) -> None: if transformation is None: transformation = self.annotate_test_file if file_path == _STANDARD_IO_STREAMS: output_stream.writelines(transformation(sys.stdin)) else: with open(file_path, 'r') as file_contents: output_stream.writelines(transformation(file_contents))
Reads from `file_path`, applies a transformation, and prints to `stdout`. Args: file_path: The path to the input file. If this is equal to the constant `_STANDARD_IO_STREAMS` (i.e. the string "-"), the input will come from `stdin`. transformation: A function that takes an iterator over the lines of an HLO file and returns an iterator over the lines of the transformed file. If this is left as `None`, `self.annotate_test_file` will be used. output_stream: The stream to which the transformed file should be written. Defaults to `stdout`.
github-repos
def AddBitbucketServerConnectedRepository(self, request, global_params=None): config = self.GetMethodConfig('AddBitbucketServerConnectedRepository') return self._RunMethod(config, request, global_params=global_params)
Add a Bitbucket Server repository to a given BitbucketServerConfig's connected repositories. This API is experimental. Args: request: (CloudbuildProjectsLocationsBitbucketServerConfigsAddBitbucketServerConnectedRepositoryRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (AddBitbucketServerConnectedRepositoryResponse) The response message.
github-repos
def decompress(ctype, unc_len, data): if (ctype == UBIFS_COMPR_LZO): try: return lzo.decompress(b''.join((b'\xf0', struct.pack('>I', unc_len), data))) except Exception as e: error(decompress, 'Warn', ('LZO Error: %s' % e)) elif (ctype == UBIFS_COMPR_ZLIB): try: return zlib.decompress(data, (- 11)) except Exception as e: error(decompress, 'Warn', ('ZLib Error: %s' % e)) else: return data
Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data.
codesearchnet
def do_searchfy(self, query, **kwargs): try: results = self.wrapperAPI.search_users(query) for r in results: aux = {} aux["type"]="i3visio.uri" alias=r["value"].split(' - ')[1] qURL = self.createURL(word=alias, mode="usufy") aux["value"]= qURL aux["attributes"]= [] r["attributes"].append(aux) except Exception, e: return super(Twitter, self).do_searchfy(query, **kwargs)
Verifying a usufy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended.
juraj-google-style
def get_extra_inputs(): g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_inputs else: return []
Returns the captured input tensors by the function. Returns: If the default graph is being used to define a function, the returned list of tensors are those accessed inside the function body but defined outside the function body so far. Otherwise, returns an empty list.
github-repos
def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'): assert target_data_format in {'channels_last', 'channels_first'} kernel, bias = dense.get_weights() for i in range(kernel.shape[1]): if target_data_format == 'channels_first': c, h, w = previous_feature_map_shape original_fm_shape = (h, w, c) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (2, 0, 1)) else: h, w, c = previous_feature_map_shape original_fm_shape = (c, h, w) ki = kernel[:, i].reshape(original_fm_shape) ki = np.transpose(ki, (1, 2, 0)) kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),)) dense.set_weights([kernel, bias])
Utility useful when changing a convnet's `data_format`. When porting the weights of a convnet from one data format to the other, if the convnet includes a `Flatten` layer (applied to the last convolutional feature map) followed by a `Dense` layer, the weights of that `Dense` layer should be updated to reflect the new dimension ordering. Args: dense: The target `Dense` layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. `(512, 7, 7)`. The shape of the convolutional feature map right before the `Flatten` layer that came before the target `Dense` layer. target_data_format: One of "channels_last", "channels_first". Set it "channels_last" if converting a "channels_first" model to "channels_last", or reciprocally.
github-repos
def remove_slice_from_lines(lines, clean_text, slice) -> str: base = clean_text[slice[0]] section = list(slice) check_start_flag = False for line_idx in range(max(0, slice[0] - 1), max(0, slice[0] - 5), -1): if not lines[line_idx]: continue if lines[line_idx] == ' section[0] = line_idx break elif ratio(base, remove_numbers(lines[line_idx])) < 0.9: section[0] = line_idx + 1 potential_ref = remove_numbers(lines[max(0, line_idx - 1)].partition('* [')[-1]) if len(potential_ref) >= 0.75 * len(base) and ratio(base, potential_ref) < 0.9: section[0] = line_idx check_start_flag = True break for line_idx in range(min(len(lines), slice[1]), min(len(lines), slice[1] + 5)): if ratio(base, remove_numbers(lines[line_idx])) < 0.9: section[1] = line_idx break if len(lines) <= section[1]: section[1] = len(lines) - 1 to_delete = '\n'.join(lines[section[0]:section[1] + 1]) itera, iterb = (enumerate(lines[section[1] - 1]), enumerate(lines[section[1]])) while True: try: ia, a = next(itera) while a.isnumeric(): ia, a = next(itera) ib, b = next(iterb) while b.isnumeric(): ib, b = next(iterb) if a != b: break except StopIteration: break if check_start_flag and '* [' in to_delete: to_delete = '* [' + to_delete.partition('* [')[-1] try: delta = len(lines[section[1]]) - ib - 1 if delta > 0: to_delete = to_delete[:-delta] except UnboundLocalError: pass return to_delete.strip()
Remove a slice of text from the lines based on specific criteria. This function identifies a slice of text within the lines and removes it based on certain conditions. Args: lines (list of str): The list of lines containing the text. clean_text (list of str): A cleaned version of the text (without numbers). slice (tuple): A tuple representing the start and end indices of the slice to be removed. Returns: str: The removed slice of text as a single string.
github-repos
def read_struct(fstream): line = fstream.readline().strip() fragments = line.split(',') fragments = [x for x in fragments if (x is not None)] partition = dict() if (not (len(fragments) >= 3)): return None partition['struct'] = fragments[0] partition['info'] = fragments[1] partition['num_lines'] = fragments[2] struct = None if ((partition is not None) and (partition['struct'] == 'STRUCT')): num_lines = int(partition['num_lines'].strip()) struct = {} for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) return struct
Read a likwid struct from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing all likwid's struct info as key/value pairs.
codesearchnet
def extract_variable_info(kwargs) -> Tuple[Text, Tuple[int, ...], dtypes.DType, Callable[[], Any]]: if isinstance(kwargs['initial_value'], functools.partial) and ('shape' in kwargs['initial_value'].keywords or kwargs['initial_value'].args): if 'shape' in kwargs['initial_value'].keywords: shape = kwargs['initial_value'].keywords['shape'] else: shape = kwargs['initial_value'].args[0] return (kwargs['name'], shape, kwargs['initial_value'].keywords.get('dtype', kwargs['dtype']), kwargs['initial_value'].func) elif 'shape' not in kwargs or kwargs['shape'] is None or (not callable(kwargs['initial_value'])): raise ValueError('Unable to extract initializer function and shape from {}. Please either pass a function that expects a shape and dtype as the initial value for your variable or functools.partial object with the shape and dtype kwargs set. This is needed so that we can initialize the shards of the ShardedVariable locally.'.format(kwargs['initial_value'])) else: return (kwargs['name'], kwargs['shape'], kwargs['dtype'], kwargs['initial_value'])
Extracts the variable creation attributes from the kwargs. Args: kwargs: a dict of keyword arguments that were passed to a variable creator scope. Returns: A tuple of variable name, shape, dtype, initialization function.
github-repos
def _extract_mnist_images(filename, num_images): with gzip.open(filename) as bytestream: bytestream.read(16) buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1) return data
Extract images from an MNIST file into a numpy array. Args: filename: The path to an MNIST images file. num_images: The number of images in the file. Returns: A numpy array of shape [number_of_images, height, width, channels].
juraj-google-style
def build(self, input_shape): if not hasattr(self.build, '_is_default'): self._build_input_shape = input_shape self.built = True
Creates the variables of the layer (optional, for subclass implementers). This is a method that implementers of subclasses of `Layer` or `Model` can override if they need a state-creation step in-between layer instantiation and layer call. This is typically used to create the weights of `Layer` subclasses. Args: input_shape: Instance of `TensorShape`, or list of instances of `TensorShape` if the layer expects a list of inputs (one instance per input).
github-repos
def get_user_data_configuration(): from cloud_inquisitor import get_local_aws_session, app_config kms_region = app_config.kms_region session = get_local_aws_session() if (session.get_credentials().method == 'iam-role'): kms = session.client('kms', region_name=kms_region) else: sts = session.client('sts') audit_role = sts.assume_role(RoleArn=app_config.aws_api.instance_role_arn, RoleSessionName='cloud_inquisitor') kms = boto3.session.Session(audit_role['Credentials']['AccessKeyId'], audit_role['Credentials']['SecretAccessKey'], audit_role['Credentials']['SessionToken']).client('kms', region_name=kms_region) user_data_url = app_config.user_data_url res = requests.get(user_data_url) if (res.status_code == 200): data = kms.decrypt(CiphertextBlob=b64decode(res.content)) kms_config = json.loads(zlib.decompress(data['Plaintext']).decode('utf-8')) app_config.database_uri = kms_config['db_uri'] else: raise RuntimeError('Failed loading user-data, cannot continue: {}: {}'.format(res.status_code, res.content))
Retrieve and update the application configuration with information from the user-data Returns: `None`
codesearchnet
def dbmax50years(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `dbmax50years`'.format(value)) self._dbmax50years = value
Corresponds to IDD Field `dbmax50years` 50-year return period values for maximum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmax50years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def enable(self, timeout=0): self.client.api.enable_plugin(self.name, timeout) self.reload()
Enable the plugin. Args: timeout (int): Timeout in seconds. Default: 0 Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def get_operator_output_port(self): return OperatorOutputPort(self.rest_client.make_request(self.operatorOutputPort), self.rest_client)
Get the output port of this exported stream. Returns: OperatorOutputPort: Output port of this exported stream.
codesearchnet
def potcar_spec( filename ): p_spec = {} with open( filename, 'r' ) as f: potcars = re.split('(End of Dataset\n)', f.read() ) potcar_md5sums = [ md5sum( ''.join( pair ) ) for pair in zip( potcars[::2], potcars[1:-1:2] ) ] for this_md5sum in potcar_md5sums: for ps in potcar_sets: for p, p_md5sum in potcar_md5sum_data[ ps ].items(): if this_md5sum == p_md5sum: p_spec[ p ] = ps if len( p_spec ) != len( potcar_md5sums ): raise ValueError( 'One or more POTCARs did not have matching md5 hashes' ) return p_spec
Returns a dictionary specifying the pseudopotentials contained in a POTCAR file. Args: filename (Str): The name of the POTCAR file to process. Returns: (Dict): A dictionary of pseudopotential filename: dataset pairs, e.g. { 'Fe_pv': 'PBE_54', 'O', 'PBE_54' }
juraj-google-style
def __init__(self, value: Union[int, float], period: Union[int, float]): self.value = value % period self.period = period
Initializes the equivalence class. Args: value: numerical value to wrap. period: periodicity of the numerical value.
juraj-google-style
def Clear(self): headers = {'Content-length': '0'} (response, _) = self._http.request(('%s/reset' % self._host), method='POST', headers=headers) if (response.status == 200): return True else: logging.warning('failed to clear emulator; response was: %s', response)
Clears all data from the emulator instance. Returns: True if the data was successfully cleared, False otherwise.
codesearchnet
def bulk_create(self, *records): if (not records): raise TypeError('Must provide at least one record') if any(((not isinstance(r, dict)) for r in records)): raise TypeError('New records must be provided as dicts') new_records = [] for record_data in records: record = record_factory(self._app, record_data) record.validate() new_records.append(record) self._swimlane.request('post', 'app/{}/record/batch'.format(self._app.id), json=[r._raw for r in new_records])
Create and validate multiple records in associated app Args: *records (dict): One or more dicts of new record field names and values Notes: Requires Swimlane 2.15+ Validates like :meth:`create`, but only sends a single request to create all provided fields, and does not return the newly created records Any validation failures on any of the records will abort the batch creation, not creating any new records Does not return the newly created records Examples: Create 3 new records with single request :: app.records.bulk_create( {'Field 1': 'value 1', ...}, {'Field 1': 'value 2', ...}, {'Field 1': 'value 3', ...} ) Raises: swimlane.exceptions.UnknownField: If any field in any new record cannot be found swimlane.exceptions.ValidationError: If any field in any new record fails validation TypeError: If no dict of fields was provided, or any provided argument is not a dict
codesearchnet
def config_pp(subs): print('(c|f): available only as CLI argument/in the config file', end='\n\n') for sub in subs: hlp_lst = [] for (opt, meta) in conf[sub].defaults_(): if (meta.cmd_arg ^ meta.conf_arg): opt += (' (c)' if meta.cmd_arg else ' (f)') hlp_lst.append((opt, meta.help)) if hlp_lst: print('{}:'.format(sub)) _pretty_print(hlp_lst, sep=' -- ', text_width=min(get_terminal_size().columns, 100)) print()
Pretty print of configuration options. Args: subs (iterable of str): iterable with the list of conf sections to print.
codesearchnet
def fromkeys(cls, iterable, value=None): if (not callable(value)): return cls(dict.fromkeys(iterable, value)) return cls(((key, value(key)) for key in iterable))
Create a new d from Args: iterable: Iterable containing keys value: value to associate with each key. If callable, will be value[key] Returns: new DictWrapper Example: >>> from ww import d >>> sorted(d.fromkeys('123', value=4).items()) [('1', 4), ('2', 4), ('3', 4)] >>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items()) [(0, 0), (1, 1), (2, 4)]
codesearchnet
def _compile_fragment_ast(schema, current_schema_type, ast, location, context): query_metadata_table = context['metadata'] coerces_to_type_name = ast.type_condition.name.value coerces_to_type_obj = schema.get_type(coerces_to_type_name) basic_blocks = [] is_same_type_as_scope = current_schema_type.is_same_type(coerces_to_type_obj) equivalent_union_type = context['type_equivalence_hints'].get(coerces_to_type_obj, None) is_base_type_of_union = (isinstance(current_schema_type, GraphQLUnionType) and current_schema_type.is_same_type(equivalent_union_type)) if (not (is_same_type_as_scope or is_base_type_of_union)): query_metadata_table.record_coercion_at_location(location, coerces_to_type_obj) basic_blocks.append(blocks.CoerceType({coerces_to_type_name})) inner_basic_blocks = _compile_ast_node_to_ir(schema, coerces_to_type_obj, ast, location, context) basic_blocks.extend(inner_basic_blocks) return basic_blocks
Return a list of basic blocks corresponding to the inline fragment at this AST node. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library. location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: list of basic blocks, the compiled output of the vertex AST node
codesearchnet
def videos(self, **kwargs): path = self._get_series_id_season_number_episode_number_path('videos') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the videos that have been added to a TV episode (teasers, clips, etc...). Args: language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def _substitute_globals(config_dict): constants = _get_all_constants() if type(config_dict) != dict: return for key, val in config_dict.iteritems(): if key in constants and type(val) in _ALLOWED: globals()[key] = val
Set global variables to values defined in `config_dict`. Args: config_dict (dict): dict with data, which are used to set `globals`. Note: `config_dict` have to be dictionary, or it is ignored. Also all variables, that are not already in globals, or are not types defined in :attr:`_ALLOWED` (str, int, ..) or starts with ``_`` are silently ignored.
juraj-google-style
def mark_as_unsaveable(self, error_message): self._saveable = False if isinstance(error_message, str): error_message = [error_message] self._saving_errors.update(error_message)
Marks this FuncGraph as unsaveable. Any attempts to export this FuncGraph will raise an error with the specified message. Args: error_message: List or string containing the error message to be raised when saving this FuncGraph to SavedModel.
github-repos
def parse(cls, representation, corpus=None): criteria_definitions = representation.split('\n') criteria = [] for i in range(0, len(criteria_definitions), 2): filter_name = criteria_definitions[i] filter_repr = criteria_definitions[i + 1] if filter_name not in available_filter_criteria(): raise UnknownFilterCriteriaException('Unknown filter-criterion {}'.format(filter_name)) criterion = available_filter_criteria()[filter_name].parse(filter_repr) criteria.append(criterion) return cls(corpus, criteria)
Creates a subview from a string representation (created with ``self.serialize``). Args: representation (str): The representation. Returns: Subview: The created subview.
juraj-google-style
def _unpack(formatstring, packed): _checkString(formatstring, description='formatstring', minlength=1) _checkString(packed, description='packed string', minlength=1) if sys.version_info[0] > 2: packed = bytes(packed, encoding='latin1') try: value = struct.unpack(formatstring, packed)[0] except: errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.' errortext += ' Bytestring: {0!r} Struct format code is: {1}' raise ValueError(errortext.format(packed, formatstring)) return value
Unpack a bytestring into a value. Uses the built-in :mod:`struct` Python module. Args: * formatstring (str): String for the packing. See the :mod:`struct` module for details. * packed (str): The bytestring to be unpacked. Returns: A value. The type depends on the formatstring. Raises: ValueError Note that the :mod:`struct` module wants byte buffers for Python3, but bytestrings for Python2. This is compensated for automatically.
juraj-google-style
def get_generic_distributions(generic_dists, metric_id): return sum((get_all_distributions_by_type(dist, metric_id) for dist in generic_dists), [])
Creates flatten list of distributions per its value type. A generic distribution is the one which is not processed but saved in the most raw version. Args: generic_dists: list of distributions to be saved metric_id(uuid): id of the current test run Returns: list of dictionaries made from :class:`DistributionMetric`
github-repos
def __init__(self, datastore_client, storage_client, dataset_name): super(DatasetBatches, self).__init__( datastore_client=datastore_client, entity_kind_batches=KIND_DATASET_BATCH, entity_kind_images=KIND_DATASET_IMAGE) self._storage_client = storage_client self._dataset_name = dataset_name
Initializes DatasetBatches. Args: datastore_client: instance of CompetitionDatastoreClient storage_client: instance of CompetitionStorageClient dataset_name: name of the dataset ('dev' or 'final')
juraj-google-style
def update_value(self, offset, value): if ((offset + len(value)) > self.total_size): return Error.INPUT_BUFFER_TOO_LONG if (len(self.current_value) < offset): self.current_value += bytearray((offset - len(self.current_value))) if (len(self.current_value) > offset): self.current_value = self.current_value[:offset] self.current_value += bytearray(value) return 0
Update the binary value currently stored for this config value. Returns: int: An opaque error code that can be returned from a set_config rpc
codesearchnet
def __init__(self, name=None, description=None, arguments=None): if name: self.name = name if description: self.description = description self.arguments = arguments or {} self.data = None
Initialization method. Args: arguments (dict): arguments that will be used for get_data method.
juraj-google-style
def dict_to_schema(schema_dict, required, allow_custom_keys=True, modifier=None): if modifier: modifier = Use(modifier) def _to(value): if isinstance(value, dict): d = {} for (k, v) in value.iteritems(): if isinstance(k, basestring): k = (Required(k) if required else Optional(k)) d[k] = _to(v) if allow_custom_keys: d[Optional(basestring)] = (modifier or object) schema = Schema(d) elif modifier: schema = And(value, modifier) else: schema = value return schema return _to(schema_dict)
Convert a dict of Schemas into a Schema. Args: required (bool): Whether to make schema keys optional or required. allow_custom_keys (bool, optional): If True, creates a schema that allows custom items in dicts. modifier (callable): Functor to apply to dict values - it is applied via `Schema.Use`. Returns: A `Schema` object.
codesearchnet
def FetchSizeOfSignedBinary(binary_urn, token = None ): if _ShouldUseLegacyDatastore(): try: aff4_stream = aff4.FACTORY.Open( binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token) return aff4_stream.size except aff4.InstantiationError: raise SignedBinaryNotFoundError(binary_urn) else: try: references, _ = data_store.REL_DB.ReadSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn)) except db.UnknownSignedBinaryError: raise SignedBinaryNotFoundError(binary_urn) last_reference = references.items[-1] return last_reference.offset + last_reference.size
Returns the size of the given binary (in bytes). Args: binary_urn: RDFURN that uniquely identifies the binary. token: ACL token to use with the legacy (non-relational) datastore. Raises: SignedBinaryNotFoundError: If no signed binary with the given URN exists.
juraj-google-style
def convert(self, inp): inp = self._preprocess(inp) n = NumberService().longestNumber(inp) units = self.extractUnits(inp) quantity = pq.Quantity(float(n), units[0]) quantity.units = units[1] return quantity
Converts a string representation of some quantity of units into a quantities object. Args: inp (str): A textual representation of some quantity of units, e.g., "fifty kilograms". Returns: A quantities object representing the described quantity and its units.
juraj-google-style
def Register(self, name, constructor): precondition.AssertType(name, Text) if name in self._constructors: message = "Duplicated constructors %r and %r for name '%s'" message %= (constructor, self._constructors[name], name) raise ValueError(message) self._constructors[name] = constructor
Registers a new constructor in the factory. Args: name: A name associated with given constructor. constructor: A constructor function that creates instances. Raises: ValueError: If there already is a constructor associated with given name.
juraj-google-style
def draw_mask(im, mask, alpha=0.5, color=None): if (color is None): color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::(- 1)] im = np.where(np.repeat((mask > 0)[(:, :, None)], 3, axis=2), ((im * (1 - alpha)) + (color * alpha)), im) im = im.astype('uint8') return im
Overlay a mask on top of the image. Args: im: a 3-channel uint8 image in BGR mask: a binary 1-channel image of the same size color: if None, will choose automatically
codesearchnet
def make_descriptors(self, base_name): units_name = (base_name + '_units') units_props = self._units_type.make_descriptors(units_name) return (units_props + [UnitsSpecPropertyDescriptor(base_name, self, units_props[0])])
Return a list of ``PropertyDescriptor`` instances to install on a class, in order to delegate attribute access to this property. Unlike simpler property types, ``UnitsSpec`` returns multiple descriptors to install. In particular, descriptors for the base property as well as the associated units property are returned. Args: name (str) : the name of the property these descriptors are for Returns: list[PropertyDescriptor] The descriptors returned are collected by the ``MetaHasProps`` metaclass and added to ``HasProps`` subclasses during class creation.
codesearchnet
def _start_reader_thread(self, stream, chunks): import io import threading def target(): while True: chunk = stream.read(io.DEFAULT_BUFFER_SIZE) if not chunk: break chunks.append(chunk) thread = threading.Thread(target=target) thread.start() return thread
Starts a thread for reading output from FFMPEG. The thread reads consecutive chunks from the stream and saves them in the given list. Args: stream: output stream of the FFMPEG process. chunks: list to save output chunks to. Returns: Thread
juraj-google-style
def find(self, title): files = backend.iterfiles(self._drive, name=title) try: return next((self[id] for (id, _) in files)) except StopIteration: raise KeyError(title)
Fetch and return the first spreadsheet with the given title. Args: title(str): title/name of the spreadsheet to return Returns: SpreadSheet: new SpreadSheet instance Raises: KeyError: if no spreadsheet with the given ``title`` is found
codesearchnet
def register_dispatchable_type(cls): _api_dispatcher.register_dispatchable_type(cls) return cls
Class decorator that registers a type for use with type-based dispatch. Should *not* be used with subclasses of `CompositeTensor` or `ExtensionType` (which are automatically registered). Note: this function is intended to support internal legacy use cases (such as RaggedTensorValue), and will probably not be exposed as a public API. Args: cls: The class to register. Returns: `cls`.
github-repos
def prune(A, threshold): if isinstance(A, Poly): B = A.A.copy() for key in A.keys: values = B[key].copy() values[(numpy.abs(values) < threshold)] = 0.0 B[key] = values return Poly(B, A.dim, A.shape, A.dtype) A = A.copy() A[(numpy.abs(A) < threshold)] = 0.0 return A
Remove coefficients that is not larger than a given threshold. Args: A (Poly): Input data. threshold (float): Threshold for which values to cut. Returns: (Poly): Same type as A. Examples: >>> P = chaospy.sum(chaospy.prange(3)*2**-numpy.arange(0, 6, 2, float)) >>> print(P) 0.0625q0^2+0.25q0+1.0 >>> print(chaospy.prune(P, 0.1)) 0.25q0+1.0 >>> print(chaospy.prune(P, 0.5)) 1.0 >>> print(chaospy.prune(P, 1.5)) 0.0
codesearchnet
def get_callback_url(self, **kwargs): if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_url() method for asynchronous pipelines.') kwargs['pipeline_id'] = self._pipeline_key.name() params = urllib.urlencode(sorted(kwargs.items())) return '%s/callback?%s' % (self.base_path, params)
Returns a relative URL for invoking this Pipeline's callback method. Args: kwargs: Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked. Raises: UnexpectedPipelineError if this is invoked on pipeline that is not async.
juraj-google-style
def get_choices_for(self, field): choices = self._fields[field].choices if isinstance(choices, six.string_types): return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)] else: return choices
Get the choices for the given fields. Args: field (str): Name of field. Returns: List of tuples. [(name, value),...]
juraj-google-style
def __init__(self, queue_id=None): super().__init__(action_type=ActionType.OFPAT_SET_QUEUE, length=8) self.queue_id = queue_id
Create an ActionSetQueue with the optional parameters below. Args: queue_id (int): The queue_id send packets to given queue on port.
juraj-google-style
def __init__(self, file_format=None, shape=(None,)): self._file_format = file_format if len(shape) != 1: raise TypeError( "Audio feature currently only supports 1-D values, got %s." % shape) self._shape = shape super(Audio, self).__init__(shape=shape, dtype=tf.int64)
Constructs the connector. Args: file_format: `str`, the audio file format. Can be any format ffmpeg understands. If `None`, will attempt to infer from the file extension. shape: `tuple`, shape of the data.
juraj-google-style
def add_logger(name, level=None, format=None): format = (format or '%(filename)-11s %(lineno)-3d: %(message)s') log = logging.getLogger(name) log.setLevel((level or logging.INFO)) ch = logging.StreamHandler(sys.stdout) ch.setFormatter(logging.Formatter(format)) log.addHandler(ch) return log
Set up a stdout logger. Args: name (str): name of the logger level: defaults to logging.INFO format (str): format string for logging output. defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``. Returns: The logger object.
codesearchnet
def orient_averaged_adaptive(tm): S = np.zeros((2, 2), dtype=complex) Z = np.zeros((4, 4)) def Sfunc(beta, alpha, i, j, real): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) s = (S_ang[(i, j)].real if real else S_ang[(i, j)].imag) return (s * tm.or_pdf(beta)) ind = range(2) for i in ind: for j in ind: S.real[(i, j)] = (dblquad(Sfunc, 0.0, 360.0, (lambda x: 0.0), (lambda x: 180.0), (i, j, True))[0] / 360.0) S.imag[(i, j)] = (dblquad(Sfunc, 0.0, 360.0, (lambda x: 0.0), (lambda x: 180.0), (i, j, False))[0] / 360.0) def Zfunc(beta, alpha, i, j): (S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) return (Z_ang[(i, j)] * tm.or_pdf(beta)) ind = range(4) for i in ind: for j in ind: Z[(i, j)] = (dblquad(Zfunc, 0.0, 360.0, (lambda x: 0.0), (lambda x: 180.0), (i, j))[0] / 360.0) return (S, Z)
Compute the T-matrix using variable orientation scatterers. This method uses a very slow adaptive routine and should mainly be used for reference purposes. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance Returns: The amplitude (S) and phase (Z) matrices.
codesearchnet
def usufyToTextExport(d, fPath=None): if d == []: return "+------------------+\n| No data found... |\n+------------------+" import pyexcel as pe import pyexcel.ext.text as text if fPath == None: isTerminal = True else: isTerminal = False try: oldData = get_data(fPath) except: oldData = {"OSRFramework":[]} tabularData = _generateTabularData(d, {"OSRFramework":[[]]}, True, canUnicode=False) sheet = pe.Sheet(tabularData["OSRFramework"]) sheet.name = "Profiles recovered (" + getCurrentStrDatetime() +")." sheet.name_columns_by_row(0) text.TABLEFMT = "grid" try: with open(fPath, "w") as oF: oF.write(str(sheet)) except Exception as e: return unicode(sheet)
Workaround to export to a .txt file or to show the information. Args: ----- d: Data to export. fPath: File path for the output file. If None was provided, it will assume that it has to print it. Returns: -------- unicode: It sometimes returns a unicode representation of the Sheet received.
juraj-google-style
def parse(self) -> Statement: self.opt_separator() start = self.offset res = self.statement() if (res.keyword not in ['module', 'submodule']): self.offset = start raise UnexpectedInput(self, "'module' or 'submodule'") if ((self.name is not None) and (res.argument != self.name)): raise ModuleNameMismatch(res.argument, self.name) if self.rev: revst = res.find1('revision') if ((revst is None) or (revst.argument != self.rev)): raise ModuleRevisionMismatch(revst.argument, self.rev) try: self.opt_separator() except EndOfInput: return res raise UnexpectedInput(self, 'end of input')
Parse a complete YANG module or submodule. Args: mtext: YANG module text. Raises: EndOfInput: If past the end of input. ModuleNameMismatch: If parsed module name doesn't match `self.name`. ModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`. UnexpectedInput: If top-level statement isn't ``(sub)module``.
codesearchnet
def GetParserPluginsInformation(cls, parser_filter_expression=None): parser_plugins_information = [] for (_, parser_class) in cls.GetParsers(parser_filter_expression=parser_filter_expression): if parser_class.SupportsPlugins(): for (plugin_name, plugin_class) in parser_class.GetPlugins(): description = getattr(plugin_class, 'DESCRIPTION', '') parser_plugins_information.append((plugin_name, description)) return parser_plugins_information
Retrieves the parser plugins information. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[tuple[str, str]]: pairs of parser plugin names and descriptions.
codesearchnet
def input_shape(self): if not self._inbound_nodes: raise AttributeError('The layer has never been called and thus has no defined input shape.') all_input_shapes = set([str(node.input_shapes) for node in self._inbound_nodes]) if len(all_input_shapes) == 1: return self._inbound_nodes[0].input_shapes else: raise AttributeError('The layer "' + str(self.name) + ' has multiple inbound nodes, with different input shapes. Hence the notion of "input shape" is ill-defined for the layer. Use `get_input_shape_at(node_index)` instead.')
Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode.
github-repos
def extract_compile_commands(parsed_aquery_output: _JSONDict) -> list[CompileCommand]: actions = parsed_aquery_output['actions'] commands = [] for action in actions: command = CompileCommand.from_args_list(action['arguments']) commands.append(command) return commands
Gathers compile commands to run from `bazel aquery` JSON output. Arguments: parsed_aquery_output: Parsed JSON representing the output of `bazel aquery --output=jsonproto`. Returns: The list of CompileCommands that should be executed.
github-repos
def sequence_ids(self, batch_index: int=0) -> List[Optional[int]]: if not self._encodings: raise ValueError('sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).') return self._encodings[batch_index].sequence_ids
Return a list mapping the tokens to the id of their original sentences: - `None` for special tokens added around or between sequences, - `0` for tokens corresponding to words in the first sequence, - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding sequence.
github-repos
def remove(self, key, name=None): with tf.name_scope(name or '%s_lookup_table_remove' % self._name): key = tf.convert_to_tensor(key, self._key_dtype, name='key') op = gen_simple_hash_table_op.examples_simple_hash_table_remove(self.resource_handle, key, value_dtype=self._value_dtype) return op
Remove `key`. Args: key: Scalar key to remove. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when `key` doesn't match the table data type.
github-repos
def sign_adaptation(control: FloatNest, output: FloatTensor, set_point: FloatTensor, adaptation_rate: FloatTensor = 0.01) -> FloatNest: def _get_new_control(control, output, set_point): new_control = mcmc_util.choose(output > set_point, control * (1. + adaptation_rate), control / (1. + adaptation_rate)) return new_control output = maybe_broadcast_structure(output, control) set_point = maybe_broadcast_structure(set_point, control) return tf.nest.map_structure(_get_new_control, control, output, set_point)
A function to do simple sign-based control of a variable. ``` control = control * (1. + adaptation_rate) ** sign(output - set_point) ``` Args: control: The control variable. output: The output variable. set_point: The set point for `output`. This function will adjust `control` so that `output` matches `set_point`. adaptation_rate: Adaptation rate. Returns: control: New control.
juraj-google-style
def read_uint8(self, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.unpack(('%sB' % endian))
Read 1 byte as an unsigned integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
codesearchnet
def GetValues(self): if ((not self._registry_key) and self._registry): self._GetKeyFromRegistry() if self._registry_key: return self._registry_key.GetValues() return iter([])
Retrieves all values within the key. Returns: generator[WinRegistryValue]: Windows Registry value generator.
codesearchnet