code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def save_plot(code, elem): if 'plt' in elem.attributes: figurewidth, figureheight = elem.attributes['plt'].split(',') else: try: figureheight = elem.attributes['height'] except KeyError: figureheight = '4cm' try: figurewidth = elem.attributes['width'] except KeyError: figurewidth = '6cm' return f
Converts matplotlib plots to tikz code. If elem has either the plt attribute (format: plt=width,height) or the attributes width=width and/or height=height, the figurewidth and -height are set accordingly. If none are given, a height of 4cm and a width of 6cm is used as default. Args: code: The matplotlib code. elem: The element. Returns: The code and some code to invoke matplotlib2tikz.
juraj-google-style
def convert_exchange_to_compounds(model): exchanges = set() for reaction in model.reactions: equation = reaction.properties.get('equation') if (equation is None): continue if (len(equation.compounds) != 1): if ((len(equation.left) == 0) != (len(equation.right) == 0)): logger.warning('Exchange reaction {} has more than one compound, it was not converted to exchange compound'.format(reaction.id)) continue exchanges.add(reaction.id) for reaction_id in exchanges: equation = model.reactions[reaction_id].equation (compound, value) = equation.compounds[0] if (compound.compartment != model.extracellular_compartment): continue if (compound in model.exchange): logger.warning('Compound {} is already defined in the exchange definition'.format(compound)) continue (lower_flux, upper_flux) = (None, None) if (reaction_id in model.limits): (_, lower, upper) = model.limits[reaction_id] if (lower is not None): lower_flux = (lower * abs(value)) if (upper is not None): upper_flux = (upper * abs(value)) if ((lower_flux is None) and (equation.direction == Direction.Forward)): lower_flux = 0 if ((upper_flux is None) and (equation.direction == Direction.Reverse)): upper_flux = 0 if (value > 0): (lower_flux, upper_flux) = (((- upper_flux) if (upper_flux is not None) else None), ((- lower_flux) if (lower_flux is not None) else None)) model.exchange[compound] = (compound, reaction_id, lower_flux, upper_flux) model.reactions.discard(reaction_id) model.limits.pop(reaction_id, None)
Convert exchange reactions in model to exchange compounds. Only exchange reactions in the extracellular compartment are converted. The extracelluar compartment must be defined for the model. Args: model: :class:`NativeModel`.
codesearchnet
def __init__(self, size: DurationTypes, offset: TimestampTypes=0): if size <= 0: raise ValueError('The size parameter must be strictly positive.') self.size = Duration.of(size) self.offset = Timestamp.of(offset) % self.size
Initialize a ``FixedWindows`` function for a given size and offset. Args: size (int): Size of the window in seconds. offset(int): Offset of this window as seconds. Windows start at t=N * size + offset where t=0 is the UNIX epoch. The offset must be a value in range [0, size). If it is not it will be normalized to this range.
github-repos
def seek(self, partition, offset): if (not isinstance(partition, TopicPartition)): raise TypeError('partition must be a TopicPartition namedtuple') assert (isinstance(offset, int) and (offset >= 0)), 'Offset must be >= 0' assert (partition in self._subscription.assigned_partitions()), 'Unassigned partition' log.debug('Seeking to offset %s for partition %s', offset, partition) self._subscription.assignment[partition].seek(offset)
Manually specify the fetch offset for a TopicPartition. Overrides the fetch offsets that the consumer will use on the next :meth:`~kafka.KafkaConsumer.poll`. If this API is invoked for the same partition more than once, the latest offset will be used on the next :meth:`~kafka.KafkaConsumer.poll`. Note: You may lose data if this API is arbitrarily used in the middle of consumption to reset the fetch offsets. Arguments: partition (TopicPartition): Partition for seek operation offset (int): Message offset in partition Raises: AssertionError: If offset is not an int >= 0; or if partition is not currently assigned.
codesearchnet
def _wait_creative_activation(self, creative_id, timeout=128): if store.get('CREATIVE', creative_id): creative = self._api_creatives().get(profileId=self.profile_id, id=str(creative_id)).execute() wait = 2 while not creative['active'] and timeout > 0: print('Waiting %s seconds for creative %s activation...' % (wait, creative_id)) time.sleep(wait) timeout -= wait wait *= 2 creative = self._api_creatives().get(profileId=self.profile_id, id=str(creative_id)).execute() if not creative['active']: raise Exception('Creative %s failed to activate within defined timeout' % creative['id'])
Waits for a creative to become active. This function checks the if the creative is active in intervals that increase exponentially (exponential backoff). Args: creative_id: Creative identifier. timeout: Optional parameter, determines how many seconds to wait for the activation. Raises: Exception: In case the creative doesn't activate within the specified timeout
github-repos
def configure_and_build(self, show_progress=True, optimized=True, skip_configuration=False): if (not skip_configuration): configuration_command = ['python', 'waf', 'configure', '--enable-examples', '--disable-gtk', '--disable-python'] if optimized: configuration_command += ['--build-profile=optimized', '--out=build/optimized'] subprocess.call(configuration_command, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) build_process = subprocess.Popen(['python', 'waf', 'build'], cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if show_progress: line_iterator = self.get_build_output(build_process) pbar = None try: [initial, total] = next(line_iterator) pbar = tqdm(line_iterator, initial=initial, total=total, unit='file', desc='Building ns-3', smoothing=0) for (current, total) in pbar: pbar.n = current except StopIteration: if (pbar is not None): pbar.n = pbar.total else: build_process.communicate()
Configure and build the ns-3 code. Args: show_progress (bool): whether or not to display a progress bar during compilation. optimized (bool): whether to use an optimized build. If False, use a standard ./waf configure. skip_configuration (bool): whether to skip the configuration step, and only perform compilation.
codesearchnet
def parse_functions(bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors) -> Tuple[(Parsed, Errors)]: parens = char_locs['parens'] if (not parens): bels_len = (len(bels) - 1) span = (0, bels_len) parsed[span] = {'name': ''.join(bels), 'type': 'Function', 'span': span, 'name_span': span, 'function_level': 'top'} return (parsed, errors) for sp in sorted(parens): (ep, function_level) = parens[sp] if (bels[(sp - 1)] == ' '): continue for i in range((sp - 1), 0, (- 1)): if (bels[i] in [' ', ',', '(']): if (i < (sp - 1)): if (ep == (- 1)): span = ((i + 1), (len(bels) - 1)) else: span = ((i + 1), ep) parsed[span] = {'name': ''.join(bels[(i + 1):sp]), 'type': 'Function', 'span': span, 'name_span': ((i + 1), (sp - 1)), 'parens_span': (sp, ep), 'function_level': function_level} break else: if (ep == (- 1)): span = (0, (len(bels) - 1)) else: span = (0, ep) parsed[span] = {'name': ''.join(bels[0:sp]), 'type': 'Function', 'span': span, 'name_span': (0, (sp - 1)), 'parens_span': (sp, ep), 'function_level': function_level} return (parsed, errors)
Parse functions from BEL using paren, comma, quote character locations Args: bels: BEL string as list of chars char_locs: paren, comma, quote character locations errors: Any error messages generated during the parse Returns: (functions, errors): function names and locations and error messages
codesearchnet
def extract_bundle(self, resource, timeout=-1): return self._client.update(resource, timeout=timeout, custom_headers={"Content-Type": "text/plain"})
Extracts the existing bundle on the appliance and creates all the artifacts. Args: resource (dict): Artifact Bundle to extract. timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in OneView, it just stops waiting for its completion. Returns: dict: The Artifact Bundle.
juraj-google-style
def has_key(cls, *args): key = (args if (len(args) > 1) else args[0]) return (key in cls._instances)
Check whether flyweight object with specified key has already been created. Returns: bool: True if already created, False if not
codesearchnet
def emit(self, signal, message, analysis_id): log.debug('kernel {} zmq send ({}): {}'.format(analysis_id, signal, message)) self.zmq_publish.send(json.dumps({'analysis_id': analysis_id, 'frame': {'signal': signal, 'load': message}}, default=json_encoder_default).encode('utf-8'))
Emit signal to main. Args: signal: Name of the signal to be emitted. message: Message to be sent. analysis_id: Identifies the instance of this analysis.
codesearchnet
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_2_0): if (kmip_version < enums.KMIPVersion.KMIP_2_0): raise exceptions.VersionNotSupported('KMIP {} does not support the Attributes object.'.format(kmip_version.value)) local_stream = BytearrayStream() for attribute in self._attributes: tag = attribute.tag if (not enums.is_attribute(tag, kmip_version=kmip_version)): raise exceptions.AttributeNotSupported('Attribute {} is not supported by KMIP {}.'.format(tag.name, kmip_version.value)) attribute.write(local_stream, kmip_version=kmip_version) self.length = local_stream.length() super(Attributes, self).write(output_stream, kmip_version=kmip_version) output_stream.write(local_stream.buffer)
Write the Attributes structure encoding to the data stream. Args: output_stream (stream): A data stream in which to encode Attributes structure data, supporting a write method. kmip_version (enum): A KMIPVersion enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 2.0. Raises: AttributeNotSupported: Raised if an unsupported attribute is found in the attribute list while encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the Attributes object.
codesearchnet
def list_projects(self, entity=None): query = gql() return self._flatten_edges(self.gql(query, variable_values={ 'entity': entity or self.settings('entity')})['models'])
Lists projects in W&B scoped by entity. Args: entity (str, optional): The entity to scope this project to. Returns: [{"id","name","description"}]
juraj-google-style
def get_mapping(self): return {key: val for (key, val) in self.__dict__.iteritems() if val}
Convert the class to dict. Returns: dict: Copy of ``self.__dict__``.
codesearchnet
def _check_required_fields(self, fields=None, either_fields=None): for (key, value) in fields.items(): if (not value): raise HSException(("Field '%s' is required." % key)) if (either_fields is not None): for field in either_fields: if (not any(field.values())): raise HSException(('One of the following fields is required: %s' % ', '.join(field.keys())))
Check the values of the fields If no value found in `fields`, an exception will be raised. `either_fields` are the fields that one of them must have a value Raises: HSException: If no value found in at least one item of`fields`, or no value found in one of the items of `either_fields` Returns: None
codesearchnet
def zip_fit_params(data): genes, cells = data.shape m = data.mean(1) v = data.var(1) M = (v-m)/(m**2+v-m) M = np.array([min(1.0, max(0.0, x)) for x in M]) L = m + v/m - 1.0 L[np.isnan(L)] = 0.0 L = np.array([max(0.0, x) for x in L]) return L, M
Returns the ZIP parameters that best fit a given data set. Args: data (array): 2d array of genes x cells belonging to a given cluster Returns: L (array): 1d array of means M (array): 1d array of zero-inflation parameter
juraj-google-style
def _GetRequestClass(self, method_descriptor): if method_descriptor.containing_service != self.descriptor: raise RuntimeError( 'GetRequestClass() given method descriptor for wrong service type.') return method_descriptor.input_type._concrete_class
Returns the class of the request protocol message. Args: method_descriptor: Descriptor of the method for which to return the request protocol message class. Returns: A class that represents the input protocol message of the specified method.
juraj-google-style
def diff_cleanupEfficiency(self, diffs): changes = False equalities = [] lastEquality = None pointer = 0 pre_ins = False pre_del = False post_ins = False post_del = False while (pointer < len(diffs)): if (diffs[pointer][0] == self.DIFF_EQUAL): if ((len(diffs[pointer][1]) < self.Diff_EditCost) and (post_ins or post_del)): equalities.append(pointer) pre_ins = post_ins pre_del = post_del lastEquality = diffs[pointer][1] else: equalities = [] lastEquality = None post_ins = post_del = False else: if (diffs[pointer][0] == self.DIFF_DELETE): post_del = True else: post_ins = True if (lastEquality and ((pre_ins and pre_del and post_ins and post_del) or ((len(lastEquality) < (self.Diff_EditCost / 2)) and ((((pre_ins + pre_del) + post_ins) + post_del) == 3)))): diffs.insert(equalities[(- 1)], (self.DIFF_DELETE, lastEquality)) diffs[(equalities[(- 1)] + 1)] = (self.DIFF_INSERT, diffs[(equalities[(- 1)] + 1)][1]) equalities.pop() lastEquality = None if (pre_ins and pre_del): post_ins = post_del = True equalities = [] else: if len(equalities): equalities.pop() if len(equalities): pointer = equalities[(- 1)] else: pointer = (- 1) post_ins = post_del = False changes = True pointer += 1 if changes: self.diff_cleanupMerge(diffs)
Reduce the number of edits by eliminating operationally trivial equalities. Args: diffs: Array of diff tuples.
codesearchnet
def create_hammersley_samples(order, dim=1, burnin=(- 1), primes=()): if (dim == 1): return create_halton_samples(order=order, dim=1, burnin=burnin, primes=primes) out = numpy.empty((dim, order), dtype=float) out[:(dim - 1)] = create_halton_samples(order=order, dim=(dim - 1), burnin=burnin, primes=primes) out[(dim - 1)] = numpy.linspace(0, 1, (order + 2))[1:(- 1)] return out
Create samples from the Hammersley set. For ``dim == 1`` the sequence falls back to Van Der Corput sequence. Args: order (int): The order of the Hammersley sequence. Defines the number of samples. dim (int): The number of dimensions in the Hammersley sequence. burnin (int): Skip the first ``burnin`` samples. If negative, the maximum of ``primes`` is used. primes (tuple): The (non-)prime base to calculate values along each axis. If empty, growing prime values starting from 2 will be used. Returns: (numpy.ndarray): Hammersley set with ``shape == (dim, order)``.
codesearchnet
def get_keys(keyfiles, signature_type): builtin_keys = { ('release', 'sha1'): [mardor.mozilla.release1_sha1, mardor.mozilla.release2_sha1], ('release', 'sha384'): [mardor.mozilla.release1_sha384, mardor.mozilla.release2_sha384], ('nightly', 'sha1'): [mardor.mozilla.nightly1_sha1, mardor.mozilla.nightly2_sha1], ('nightly', 'sha384'): [mardor.mozilla.nightly1_sha384, mardor.mozilla.nightly2_sha384], ('dep', 'sha1'): [mardor.mozilla.dep1_sha1, mardor.mozilla.dep2_sha1], ('dep', 'sha384'): [mardor.mozilla.dep1_sha384, mardor.mozilla.dep2_sha384], ('autograph-stage', 'sha384'): [mardor.mozilla.autograph_stage_sha384], } keys = [] for keyfile in keyfiles: if keyfile.startswith(':mozilla-'): name = keyfile.split(':mozilla-')[1] try: keys.extend(builtin_keys[name, signature_type]) except KeyError: raise ValueError('Invalid internal key name: {}' .format(keyfile)) else: key = open(keyfile, 'rb').read() keys.append(key) return keys
Get public keys for the given keyfiles. Args: keyfiles: List of filenames with public keys, or :mozilla- prefixed key names signature_type: one of 'sha1' or 'sha384' Returns: List of public keys as strings
juraj-google-style
def convert_concat(params, w_name, scope_name, inputs, layers, weights, names): print('Converting concat ...') concat_nodes = [layers[i] for i in inputs] if len(concat_nodes) == 1: layers[scope_name] = concat_nodes[0] return if names == 'short': tf_name = 'CAT' + random_string(5) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) cat = keras.layers.Concatenate(name=tf_name, axis=params['axis']) layers[scope_name] = cat(concat_nodes)
Convert concatenation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def _create(self, monomer, mon_vector): while (self.length != (self.n_units - 1)): if self.linear_chain: move_direction = (np.array(mon_vector) / np.linalg.norm(mon_vector)) else: move_direction = self._next_move_direction() self._add_monomer(monomer.copy(), mon_vector, move_direction)
create the polymer from the monomer Args: monomer (Molecule) mon_vector (numpy.array): molecule vector that starts from the start atom index to the end atom index
codesearchnet
def _ova_to_spec(self, filename): ova_extracted_dir = os.path.splitext(filename)[0] if (not os.path.exists(ova_extracted_dir)): os.makedirs(ova_extracted_dir) subprocess.check_output(['tar', '-xvf', filename, '-C', ova_extracted_dir], stderr=subprocess.STDOUT) ovf = glob.glob((ova_extracted_dir + '/master/vms/*/*.ovf')) if (len(ovf) != 1): raise RuntimeError('We support only one vm in ova') image_file = None memory = None vcpus = None with open(ovf[0]) as fd: obj = xmltodict.parse(fd.read()) hardware_items = [section for section in obj['ovf:Envelope']['Content']['Section'] if (section['@xsi:type'] == 'ovf:VirtualHardwareSection_Type')] if (len(hardware_items) != 1): raise RuntimeError('We support only one machine desc in ova') hardware_items = hardware_items[0] for item in hardware_items['Item']: CPU_RESOURCE = 3 MEMORY_RESOURCE = 4 DISK_RESOURCE = 17 resource_type = int(item['rasd:ResourceType']) if (resource_type == CPU_RESOURCE): vcpus = (int(item['rasd:cpu_per_socket']) * int(item['rasd:num_of_sockets'])) elif (resource_type == MEMORY_RESOURCE): memory = int(item['rasd:VirtualQuantity']) if (item['rasd:AllocationUnits'] != 'MegaBytes'): raise TypeError('Fix me : we need to suport other units too') elif (resource_type == DISK_RESOURCE): image_file = item['rasd:HostResource'] if (image_file is not None): disk_meta = {'root-partition': '/dev/sda1'} disk_spec = [{'type': 'template', 'template_type': 'qcow2', 'format': 'qcow2', 'dev': 'vda', 'name': os.path.basename(image_file), 'path': ((ova_extracted_dir + '/images/') + image_file), 'metadata': disk_meta}] return (disk_spec, memory, vcpus)
Retrieve the given ova and makes a template of it. Creates a disk from network provided ova. Calculates the needed memory from the ovf. The disk will be cached in the template repo Args: filename(str): the url to retrive the data from TODO: * Add hash checking against the server for faster download and latest version * Add config script running on host - other place * Add cloud init support - by using cdroms in other place * Handle cpu in some way - some other place need to pick it up * Handle the memory units properly - we just assume MegaBytes Returns: list of dict: list with the disk specification int: VM memory, None if none defined int: Number of virtual cpus, None if none defined Raises: RuntimeError: If the ova format is not supported TypeError: If the memory units in the ova are noot supported (currently only 'MegaBytes')
codesearchnet
def get_tensors_from_tensor_names(graph, tensor_names): tensor_name_to_tensor = {} for op in graph.get_operations(): for tensor in op.values(): tensor_name_to_tensor[get_tensor_name(tensor)] = tensor tensors = [] invalid_tensors = [] for name in tensor_names: if not isinstance(name, str): raise ValueError("Invalid type for a tensor name in the provided graph. Expected type for a tensor name is 'str', instead got type '{}' for tensor name '{}'".format(type(name), name)) tensor = tensor_name_to_tensor.get(name) if tensor is None: invalid_tensors.append(name) else: tensors.append(tensor) if invalid_tensors: raise ValueError("Invalid tensors '{}' were found.".format(','.join(invalid_tensors))) return tensors
Gets the Tensors associated with the `tensor_names` in the provided graph. Args: graph: TensorFlow Graph. tensor_names: List of strings that represent names of tensors in the graph. Returns: A list of Tensor objects in the same order the names are provided. Raises: ValueError: tensor_names contains an invalid tensor name.
github-repos
def mod(self, other, axis="columns", level=None, fill_value=None): return self._binary_op( "mod", other, axis=axis, level=level, fill_value=fill_value )
Mods this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the mod against this. axis: The axis to mod over. level: The Multilevel index level to apply mod over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Mod applied.
juraj-google-style
def save(self, config=None): if config is not None: clist = [config] else: clist = [ self._system_config, self._global_config, self._repo_config, self._local_config, ] for conf in clist: if conf.filename is None: continue try: logger.debug("Writing '{}'.".format(conf.filename)) dname = os.path.dirname(os.path.abspath(conf.filename)) try: os.makedirs(dname) except OSError as exc: if exc.errno != errno.EEXIST: raise conf.write() except Exception as exc: msg = "failed to write config '{}'".format(conf.filename) raise ConfigError(msg, exc)
Saves config to config files. Args: config (configobj.ConfigObj): optional config object to save. Raises: dvc.config.ConfigError: thrown if failed to write config file.
juraj-google-style
def slice_inputs(self, indices_dataset, inputs): dataset = dataset_ops.DatasetV2.zip((indices_dataset, dataset_ops.DatasetV2.from_tensors(inputs).repeat())) def grab_batch(i, data): return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data) dataset = dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE) options = options_lib.Options() options.experimental_optimization.apply_default_optimizations = False if self._shuffle: options.experimental_external_state_policy = options_lib.ExternalStatePolicy.IGNORE dataset = dataset.with_options(options) return dataset
Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices.
github-repos
def __init__(self, group, tipe, kind, name, version): group = None if "*" == group else group tipe = None if "*" == tipe else tipe kind = None if "*" == kind else kind name = None if "*" == name else name version = None if "*" == version else version self._group = group self._type = tipe self._kind = kind self._name = name self._version = version
Creates instance of a component descriptor Args: group: logical group: 'pip-services-runtime', 'pip-services-logging' type: external type: 'cache', 'services' or 'controllers' kind - implementation: 'memory', 'file' or 'memcached' name - internal content version: compatibility version: '1.0'. '1.5' or '10.4'
juraj-google-style
def _tf_core_packed_nest_with_indices(structure, flat, index, is_nested_fn, sequence_fn=None): packed = [] sequence_fn = sequence_fn or sequence_like for s in _tf_core_yield_value(structure): if is_nested_fn(s): new_index, child = _tf_core_packed_nest_with_indices(s, flat, index, is_nested_fn, sequence_fn) packed.append(sequence_fn(s, child)) index = new_index else: packed.append(flat[index]) index += 1 return (index, packed)
Helper function for pack_sequence_as. Args: structure: structure to mimic. flat: Flattened values to output substructure for. index: Index at which to start reading from flat. is_nested_fn: Function used to test if a value should be treated as a nested structure. sequence_fn: Function used to generate a new structure instance. Returns: The tuple (new_index, child), where: * new_index - the updated index into `flat` having processed `structure`. * packed - the subset of `flat` corresponding to `structure`, having started at `index`, and packed into the same nested format. Raises: ValueError: if `structure` contains more atoms than `flat` (assuming indexing starts from `index`).
github-repos
def decode_response(data): res = CaseInsensitiveDict() for dataline in data.decode('utf-8').splitlines()[1:]: dataline = dataline.strip() if (not dataline): continue line_parts = dataline.split(':', 1) if (len(line_parts) < 2): line_parts = (line_parts[0], '') res[line_parts[0].strip()] = line_parts[1].strip() return res
Decodes the data from a SSDP response. Args: data (bytes): The encoded response. Returns: dict of string -> string: Case-insensitive dictionary of header name to header value pairs extracted from the response.
codesearchnet
def describe(self, **kwargs): new_columns = pandas.DataFrame(columns=self.columns).astype(self.dtypes).describe(**kwargs).columns def describe_builder(df, internal_indices=[], **kwargs): return df.iloc[(:, internal_indices)].describe(**kwargs) func = self._prepare_method(describe_builder, **kwargs) new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns) new_index = self.compute_index(0, new_data, False) return self.__constructor__(new_data, new_index, new_columns)
Generates descriptive statistics. Returns: DataFrame object containing the descriptive statistics of the DataFrame.
codesearchnet
def handle_message(self, msg, host): logger.debug('Executing handle_message method.') response = None if (self.encryption and self.server_key): msg_data = unserialize_data(msg, self.compression, self.encryption) else: msg_data = unserialize_data(msg, self.compression) logger.debug(('Packet received: ' + pformat(msg_data))) if (not msg_data): return response if ('method' in msg_data): if (msg_data['method'] == 'OHAI Client'): logger.debug(('<%s> Autodiscover response from server received from: %s' % (self.cuuid, host[0]))) self.discovered_servers[host] = [msg_data['version'], msg_data['server_name']] if self.autoregistering: self.register(host) self.autoregistering = False elif (msg_data['method'] == 'NOTIFY'): self.event_notifies[msg_data['euuid']] = msg_data['event_data'] logger.debug(('<%s> Notify received' % self.cuuid)) logger.debug(('<%s> Notify event buffer: %s' % (self.cuuid, pformat(self.event_notifies)))) response = serialize_data({'cuuid': str(self.cuuid), 'method': 'OK NOTIFY', 'euuid': msg_data['euuid']}, self.compression, self.encryption, self.server_key) elif (msg_data['method'] == 'OK REGISTER'): logger.debug(('<%s> Ok register received' % self.cuuid)) self.registered = True self.server = host if (('encryption' in msg_data) and self.encryption): self.server_key = PublicKey(msg_data['encryption'][0], msg_data['encryption'][1]) elif ((msg_data['method'] == 'LEGAL') or (msg_data['method'] == 'ILLEGAL')): logger.debug(('<%s> Legality message received' % str(self.cuuid))) self.legal_check(msg_data) response = serialize_data({'cuuid': str(self.cuuid), 'method': 'OK EVENT', 'euuid': msg_data['euuid']}, self.compression, self.encryption, self.server_key) logger.debug('Packet processing completed') return response
Processes messages that have been delivered from the transport protocol Args: msg (string): The raw packet data delivered from the transport protocol. host (tuple): A tuple containing the (address, port) combination of the message's origin. Returns: A formatted response to the client with the results of the processed message. Examples: >>> msg {"method": "OHAI Client", "version": "1.0"} >>> host ('192.168.0.20', 36545)
codesearchnet
def isparent(path1, path2): bits1 = path1.split('/') bits2 = path2.split('/') while (bits1 and (bits1[(- 1)] == '')): bits1.pop() if (len(bits1) > len(bits2)): return False for (bit1, bit2) in zip(bits1, bits2): if (bit1 != bit2): return False return True
Check if ``path1`` is a parent directory of ``path2``. Arguments: path1 (str): A PyFilesytem path. path2 (str): A PyFilesytem path. Returns: bool: `True` if ``path1`` is a parent directory of ``path2`` Example: >>> isparent("foo/bar", "foo/bar/spam.txt") True >>> isparent("foo/bar/", "foo/bar") True >>> isparent("foo/barry", "foo/baz/bar") False >>> isparent("foo/bar/baz/", "foo/baz/bar") False
codesearchnet
def doc_replace(match, sphinx_docs): sphinx_docs.append(match.group("path")) return "`{}`_".format(match.group("value"))
Convert Sphinx ``:doc:`` to plain reST link. Args: match (_sre.SRE_Match): A match (from ``re``) to be used in substitution. sphinx_docs (list): List to be track the documents that have been encountered. Returns: str: The ``match`` converted to a link.
juraj-google-style
def run_scratch(self, path_to_scratch, num_cores=1, outname=None, outdir=None, force_rerun=False): if (not outname): outname = self.project_name if (not outdir): outdir = '' outname = op.join(outdir, outname) self.out_sspro = '{}.ss'.format(outname) self.out_sspro8 = '{}.ss8'.format(outname) self.out_accpro = '{}.acc'.format(outname) self.out_accpro20 = '{}.acc20'.format(outname) ssbio.utils.command_runner(shell_command='{} {} {} {}'.format(path_to_scratch, self.seq_file, outname, num_cores), force_rerun_flag=force_rerun, outfile_checker='{}.ss'.format(outname))
Run SCRATCH on the sequence_file that was loaded into the class. Args: path_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh outname: Prefix to name the output files outdir: Directory to store the output files force_rerun: Flag to force rerunning of SCRATCH even if the output files exist Returns:
codesearchnet
def get_sid_from_name(name): if name is None: name = 'NULL SID' try: sid = win32security.LookupAccountName(None, name)[0] except pywintypes.error as exc: raise CommandExecutionError( 'User {0} not found: {1}'.format(name, exc)) return win32security.ConvertSidToStringSid(sid)
This is a tool for getting a sid from a name. The name can be any object. Usually a user or a group Args: name (str): The name of the user or group for which to get the sid Returns: str: The corresponding SID
juraj-google-style
def execute_by_options(args): if args['subcommand'] == 'sphinx': s = Sphinx(proj_info) if args['quickstart']: s.quickstart() elif args['gen_code_api']: s.gen_code_api() elif args['rst2html']: s.rst2html() pass elif args['subcommand'] == 'offline_dist': pod = PyOfflineDist() if args['freeze_deps']: pod.freeze_deps() elif args['download_deps']: pod.download_deps() elif args['install_deps']: pod.install_deps() elif args['clean_deps']: pod.clean_deps() elif args['mkbinary']: pod.pyinstaller_mkbinary(args['mkbinary']) elif args['clean_binary']: pod.clean_binary() pass
execute by argument dictionary Args: args (dict): command line argument dictionary
juraj-google-style
def save_randomly_initialized_version(config_name: str, save_dir: str, **config_kwargs): cfg = AutoConfig.from_pretrained(config_name, **config_kwargs) model = AutoModelForSeq2SeqLM.from_config(cfg) model.save_pretrained(save_dir) AutoTokenizer.from_pretrained(config_name).save_pretrained(save_dir) return model
Save a randomly initialized version of a model using a pretrained config. Args: config_name: which config to use save_dir: where to save the resulting model and tokenizer config_kwargs: Passed to AutoConfig Usage:: save_randomly_initialized_version("facebook/bart-large-cnn", "distilbart_random_cnn_6_3", encoder_layers=6, decoder_layers=3, num_beams=3)
github-repos
def rr_history(self, ips): api_name = 'opendns-rr_history' fmt_url_path = u'dnsdb/ip/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
Get the domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of resource records and features
codesearchnet
def set_webconfiguration_settings(name, settings, location=''): ps_cmd = [] if (not settings): log.warning('No settings provided') return False settings = _prepare_settings(name, settings) for (idx, setting) in enumerate(settings): if (setting['name'].split('.')[(- 1)] != 'Collection'): settings[idx]['value'] = six.text_type(setting['value']) current_settings = get_webconfiguration_settings(name=name, settings=settings, location=location) if (settings == current_settings): log.debug('Settings already contain the provided values.') return True for setting in settings: if (setting['name'].split('.')[(- 1)] != 'Collection'): try: complex(setting['value']) value = setting['value'] except ValueError: value = "'{0}'".format(setting['value']) else: configelement_list = [] for value_item in setting['value']: configelement_construct = [] for (key, value) in value_item.items(): configelement_construct.append("{0}='{1}'".format(key, value)) configelement_list.append((('@{' + ';'.join(configelement_construct)) + '}')) value = ','.join(configelement_list) ps_cmd.extend(['Set-WebConfigurationProperty', '-PSPath', "'{0}'".format(name), '-Filter', "'{0}'".format(setting['filter']), '-Name', "'{0}'".format(setting['name']), '-Location', "'{0}'".format(location), '-Value', '{0};'.format(value)]) cmd_ret = _srvmgr(ps_cmd) if (cmd_ret['retcode'] != 0): msg = 'Unable to set settings for {0}'.format(name) raise CommandExecutionError(msg) new_settings = get_webconfiguration_settings(name=name, settings=settings, location=location) failed_settings = [] for (idx, setting) in enumerate(settings): is_collection = (setting['name'].split('.')[(- 1)] == 'Collection') if (((not is_collection) and (six.text_type(setting['value']) != six.text_type(new_settings[idx]['value']))) or (is_collection and (list(map(dict, setting['value'])) != list(map(dict, new_settings[idx]['value']))))): failed_settings.append(setting) if failed_settings: log.error('Failed to change settings: %s', failed_settings) return False log.debug('Settings configured successfully: %s', settings) return True
r''' Set the value of the setting for an IIS container. Args: name (str): The PSPath of the IIS webconfiguration settings. settings (list): A list of dictionaries containing setting name, filter and value. location (str): The location of the settings (optional) Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.set_webconfiguration_settings name='IIS:\' settings="[{'name': 'enabled', 'filter': 'system.webServer/security/authentication/anonymousAuthentication', 'value': False}]"
codesearchnet
def consult_hook(self, item_session: ItemSession, verdict: bool, reason: str, test_info: dict): try: reasons = {'filters': test_info['map'], 'reason': reason} verdict = self.hook_dispatcher.call(PluginFunctions.accept_url, item_session, verdict, reasons) reason = 'callback_hook' except HookDisconnected: pass return (verdict, reason)
Consult the scripting hook. Returns: tuple: (bool, str)
codesearchnet
def create_histogram(df): fig = Figure("/mg/histogram/", "mg_histogram") fig.layout.set_size(width=450, height=200) fig.layout.set_margin(left=40, right=40) fig.graphics.animate_on_load() return Histogram(df, fig, "value", 20, init_params={"Data": "Steps"})
create a mg line plot Args: df (pandas.DataFrame): data to plot
juraj-google-style
def _on_channel_close(self, channel, reply_code_or_reason, reply_text=None): if isinstance(reply_code_or_reason, pika_errs.ChannelClosed): reply_code = reply_code_or_reason.reply_code reply_text = reply_code_or_reason.reply_text elif isinstance(reply_code_or_reason, int): reply_code = reply_code_or_reason else: reply_code = 0 reply_text = str(reply_code_or_reason) _log.info('Channel %r closed (%d): %s', channel, reply_code, reply_text) self._channel = None
Callback invoked when the channel is closed. Args: channel (pika.channel.Channel): The channel that got closed. reply_code_or_reason (int|Exception): The reason why the channel was closed. In older versions of pika, this is the AMQP code. reply_text (str): The human-readable reason for the channel's closure (only in older versions of pika).
codesearchnet
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs): input_ = deepcopy(input_) public_key = input_.owners_before[0] message = sha3_256(message.encode()) if input_.fulfills: message.update('{}{}'.format( input_.fulfills.txid, input_.fulfills.output).encode()) try: input_.fulfillment.sign( message.digest(), base58.b58decode(key_pairs[public_key].encode())) except KeyError: raise KeypairMismatchException('Public key {} is not a pair to ' 'any of the private keys' .format(public_key)) return input_
Signs a Ed25519Fulfillment. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
juraj-google-style
def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs): temperatures = np.linspace(tmin, tmax, ntemp) if self.structure: ylabel = '$\\Delta E$ (kJ/mol)' else: ylabel = '$\\Delta E$ (kJ/mol-c)' fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim, factor=0.001, **kwargs) return fig
Plots the vibrational internal energy in a temperature range. Args: tmin: minimum temperature tmax: maximum temperature ntemp: number of steps ylim: tuple specifying the y-axis limits. kwargs: kwargs passed to the matplotlib function 'plot'. Returns: matplotlib figure
codesearchnet
def export(self, composite=False): if composite: if (rname_rfc6680 is None): raise NotImplementedError('Your GSSAPI implementation does not support RFC 6680 (the GSSAPI naming extensions)') return rname_rfc6680.export_name_composite(self) else: return rname.export_name(self)
Export this name as a token. This method exports the name into a byte string which can then be imported by using the `token` argument of the constructor. Args: composite (bool): whether or not use to a composite token -- :requires-ext:`rfc6680` Returns: bytes: the exported name in token form Raises: MechanismNameRequiredError BadNameTypeError BadNameError
codesearchnet
async def send_script(self, conn_id, data): self._ensure_connection(conn_id, True) connection_string = self._get_property(conn_id, 'connection_string') msg = dict(connection_string=connection_string, fragment_count=1, fragment_index=0, script=base64.b64encode(data)) (await self._send_command(OPERATIONS.SEND_SCRIPT, msg, COMMANDS.SendScriptResponse))
Send a a script to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection data (bytes): the script to send to the device
codesearchnet
def init(name, *args): matcher = get(name) if not matcher: raise ValueError('Cannot find matcher: {}'.format(name)) return matcher(*args)
Initializes a matcher instance passing variadic arguments to its constructor. Acts as a delegator proxy. Arguments: name (str): matcher class name or alias to execute. *args (mixed): variadic argument Returns: matcher: matcher instance. Raises: ValueError: if matcher was not found.
juraj-google-style
def list_vmss(access_token, subscription_id, resource_group): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VM Scale Sets in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of a list of scale set model views.
codesearchnet
def __init__(self, dfk, max_threads=10): self._scaling_enabled = False self.label = 'data_manager' self.dfk = dfk self.max_threads = max_threads self.globus = None self.managed = True
Initialize the DataManager. Args: - dfk (DataFlowKernel): The DataFlowKernel that this DataManager is managing data for. Kwargs: - max_threads (int): Number of threads. Default is 10. - executors (list of Executors): Executors for which data transfer will be managed.
juraj-google-style
def get_dimension(self, key, value, **kwargs): return self._get_object_by_name(self._DIMENSION_ENDPOINT_SUFFIX, '{0}/{1}'.format(key, value), **kwargs)
get a dimension by key and value Args: key (string): key of the dimension value (string): value of the dimension Returns: dictionary of response
juraj-google-style
def in_template_path(fn): return os.path.join( os.path.abspath(os.path.dirname(__file__)), "../templates", fn, )
Return `fn` in template context, or in other words add `fn` to template path, so you don't need to write absolute path of `fn` in template directory manually. Args: fn (str): Name of the file in template dir. Return: str: Absolute path to the file.
juraj-google-style
def _expand_args(argv): def _expand_single_arg(arg, result): if arg.startswith('@'): with open(arg[1:]) as f: for earg in f.read().splitlines(): _expand_single_arg(earg, result) else: result.append(arg) expanded_args = [] for arg in argv: _expand_single_arg(arg, expanded_args) return expanded_args
Returns argv with flagfiles expanded. A flagfile is an argument starting with "@". The remainder of the argument is interpreted as the path to a file containing a list of arguments, one per line. Flagfiles may contain references to other flagfiles. Args: argv: Command line arguments.
github-repos
def find_runner(program): if os.path.isfile(program) and not os.access(program, os.X_OK): try: opened = open(program) except PermissionError: return None first_line = opened.readline().strip() if first_line.startswith(' return shlex.split(first_line[2:]) if program.endswith('.py'): return [sys.executable] return None
Return a command that will run program. Args: program: The string name of the program to try to run. Returns: commandline list of strings to run the program (eg. with subprocess.call()) or None
juraj-google-style
def set_xlim(self, xlims, dx, xscale, reverse=False): self._set_axis_limits('x', xlims, dx, xscale, reverse) return
Set x limits for plot. This will set the limits for the x axis for the specific plot. Args: xlims (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
codesearchnet
def Print(self, output_writer): if self._file_scanner: output_writer.Write('\tsignature identifiers: {0:s}\n'.format( ', '.join(self._signature_identifiers)))
Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer.
juraj-google-style
def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, labels: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, bbox: tf.Tensor | None=None, pixel_values: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, training=training) sequence_output = outputs[0][:, 0, :] logits = self.classifier(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification >>> from datasets import load_dataset >>> import tensorflow as tf >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True) >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf") >>> sequence_label = tf.convert_to_tensor([1]) >>> outputs = model(**encoding, labels=sequence_label) >>> loss = outputs.loss >>> logits = outputs.logits ```
github-repos
def get_instance(cls, device): if (cls._nuis.get(device) is None): cls._nuis[device] = AndroidUiautomationPoco(device) return cls._nuis[device]
This is only a slot to store and get already initialized poco instance rather than initializing again. You can simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance. If no such AndroidUiautomationPoco instance, a new instance will be created and stored. Args: device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc`` Returns: poco instance
codesearchnet
def _save_to_database(url, property_name, data): data = json.dumps([ d.to_dict() if hasattr(d, "to_dict") else d for d in data ]) logger.debug("_save_to_database() data: %s" % repr(data)) requests.post( _WEB_URL + _REQUEST_DB_SAVE, timeout=REQUEST_TIMEOUT, allow_redirects=True, verify=False, data={ "url": url, "value": data, "property_name": property_name, } ) logger.info( "`%s` for `%s` sent to REST DB." % ( property_name, url, ) )
Store `data` under `property_name` in the `url` key in REST API DB. Args: url (obj): URL of the resource to which `property_name` will be stored. property_name (str): Name of the property under which the `data` will be stored. data (obj): Any object.
juraj-google-style
def dict_from_items_with_values(*dictionaries, **items): dict_list = list(dictionaries) dict_list.append(items) result = {} for d in dict_list: for key, value in d.items(): if value is not None: result[key] = value return result
Creates a dict with the inputted items; pruning any that are `None`. Args: *dictionaries(dict): Dictionaries of items to be pruned and included. **items: Items to be pruned and included. Returns: dict: A dictionary containing all of the items with a 'non-None' value.
juraj-google-style
def eager_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int=0, mask_function: Callable=causal_mask_function, attention_mask: Optional[torch.Tensor]=None, dtype: torch.dtype=torch.float32, **kwargs) -> torch.Tensor: _ = kwargs.pop('allow_is_causal_skip', None) mask = sdpa_mask(batch_size=batch_size, cache_position=cache_position, kv_length=kv_length, kv_offset=kv_offset, mask_function=mask_function, attention_mask=attention_mask, allow_is_causal_skip=False, allow_torch_fix=False, **kwargs) min_dtype = torch.finfo(dtype).min mask = torch.where(mask, torch.tensor(0.0, device=mask.device, dtype=dtype), min_dtype) return mask
Create a 4D float mask of shape `(batch_size, 1, query_length, kv_length)` where a value of 0 indicates that the element should take part in the attention computation, and -inf (minimum value for the given `dtype`) that it should not. Args: batch_size (`int`): The batch size of the input sequence. cache_position (`torch.Tensor`): A tensor of shape (query_length,) indicating the current indices of the input sequence elements. kv_length (`int`): The size that the key and value states will have during the attention computation. kv_offset (`int`, optional): An optional offset to indicate at which first position the key and values states will refer to. mask_function (`Callable`): The mask factory function describing the mask pattern. attention_mask (`torch.Tensor`, optional): The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length) dtype (`torch.dtype`, optional): The dtype to use for the mask. By default, `torch.float32`.
github-repos
def run(self, dag): for node in dag.op_nodes(self.gate): if not node.op.definition: continue rule = node.op.definition decomposition = DAGCircuit() decomposition.add_qreg(rule[0][1][0][0]) if rule[0][2]: decomposition.add_creg(rule[0][2][0][0]) for inst in rule: decomposition.apply_operation_back(*inst) dag.substitute_node_with_dag(node, decomposition) return dag
Expand a given gate into its decomposition. Args: dag(DAGCircuit): input dag Returns: DAGCircuit: output dag where gate was expanded.
juraj-google-style
def inspect_service(self, service, insert_defaults=None): url = self._url('/services/{0}', service) params = {} if insert_defaults is not None: if utils.version_lt(self._version, '1.29'): raise errors.InvalidVersion( 'insert_defaults is not supported in API version < 1.29' ) params['insertDefaults'] = insert_defaults return self._result(self._get(url, params=params), True)
Return information about a service. Args: service (str): Service name or ID. insert_defaults (boolean): If true, default values will be merged into the service inspect output. Returns: (dict): A dictionary of the server-side representation of the service, including all relevant properties. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def data_group_type(self, group_data): if isinstance(group_data, dict): file_content = group_data.pop('fileContent', None) if (file_content is not None): self._files[group_data.get('xid')] = {'fileContent': file_content, 'type': group_data.get('type')} else: GROUPS_STRINGS_WITH_FILE_CONTENTS = ['Document', 'Report'] if (group_data.data.get('type') in GROUPS_STRINGS_WITH_FILE_CONTENTS): self._files[group_data.data.get('xid')] = group_data.file_data group_data = group_data.data return group_data
Return dict representation of group data. Args: group_data (dict|obj): The group data dict or object. Returns: dict: The group data in dict format.
codesearchnet
def export_saved_model(sess, export_dir, tag_set, signatures): import tensorflow as tf g = sess.graph g._unsafe_unfinalize() builder = tf.saved_model.builder.SavedModelBuilder(export_dir) logging.info('===== signatures: {}'.format(signatures)) signature_def_map = {} for (key, sig) in signatures.items(): signature_def_map[key] = tf.saved_model.signature_def_utils.build_signature_def(inputs={name: tf.saved_model.utils.build_tensor_info(tensor) for (name, tensor) in sig['inputs'].items()}, outputs={name: tf.saved_model.utils.build_tensor_info(tensor) for (name, tensor) in sig['outputs'].items()}, method_name=(sig['method_name'] if ('method_name' in sig) else key)) logging.info('===== signature_def_map: {}'.format(signature_def_map)) builder.add_meta_graph_and_variables(sess, tag_set.split(','), signature_def_map=signature_def_map, clear_devices=True) g.finalize() builder.save()
Convenience function to export a saved_model using provided arguments The caller specifies the saved_model signatures in a simplified python dictionary form, as follows:: signatures = { 'signature_def_key': { 'inputs': { 'input_tensor_alias': input_tensor_name }, 'outputs': { 'output_tensor_alias': output_tensor_name }, 'method_name': 'method' } } And this function will generate the `signature_def_map` and export the saved_model. Args: :sess: a tf.Session instance :export_dir: path to save exported saved_model :tag_set: string tag_set to identify the exported graph :signatures: simplified dictionary representation of a TensorFlow signature_def_map Returns: A saved_model exported to disk at ``export_dir``.
codesearchnet
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if (len(self._credentials) == 0): raise ValueError('Authentication struct missing credentials.') for credential in self._credentials: credential.write(local_stream, kmip_version=kmip_version) self.length = local_stream.length() super(Authentication, self).write(output_stream, kmip_version=kmip_version) output_stream.write(local_stream.buffer)
Write the data encoding the Authentication struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
codesearchnet
def single_gate_params(gate, params=None): if gate in ('U', 'u3'): return params[0], params[1], params[2] elif gate == 'u2': return np.pi / 2, params[0], params[1] elif gate == 'u1': return 0, 0, params[0] elif gate == 'id': return 0, 0, 0 raise QiskitError('Gate is not among the valid types: %s' % gate)
Apply a single qubit gate to the qubit. Args: gate(str): the single qubit gate name params(list): the operation parameters op['params'] Returns: tuple: a tuple of U gate parameters (theta, phi, lam) Raises: QiskitError: if the gate name is not valid
juraj-google-style
def split_folder_and_path(filepath): dirname = op.dirname(filepath) filename = op.basename(filepath) splitext = op.splitext(filename) filename_without_extension = splitext[0] extension = splitext[1] return dirname, filename_without_extension, extension
Split a file path into its folder, filename, and extension Args: path (str): Path to a file Returns: tuple: of (folder, filename (without extension), extension)
juraj-google-style
def add(self, payload=None): try: db = self._client[self.database] col = db[WORKFLOW_DATA_COLLECTION_NAME] return str(col.insert_one({DataStoreDocumentSection.Meta: (payload if isinstance(payload, dict) else {}), DataStoreDocumentSection.Data: {}}).inserted_id) except ConnectionFailure: raise DataStoreNotConnected()
Adds a new document to the data store and returns its id. Args: payload (dict): Dictionary of initial data that should be stored in the new document in the meta section. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: str: The id of the newly created document.
codesearchnet
def NewPathSpec(cls, type_indicator, **kwargs): if (type_indicator not in cls._path_spec_types): raise KeyError('Path specification type: {0:s} not set.'.format(type_indicator)) if (('parent' in kwargs) and (kwargs['parent'] is None)): del kwargs['parent'] path_spec_type = cls._path_spec_types[type_indicator] return path_spec_type(**kwargs)
Creates a new path specification for the specific type indicator. Args: type_indicator (str): type indicator. kwargs (dict): keyword arguments depending on the path specification. Returns: PathSpec: path specification. Raises: KeyError: if path specification is not registered.
codesearchnet
def get_typecast_value(self, value, type): if type == entities.Variable.Type.BOOLEAN: return value == 'true' elif type == entities.Variable.Type.INTEGER: return int(value) elif type == entities.Variable.Type.DOUBLE: return float(value) else: return value
Helper method to determine actual value based on type of feature variable. Args: value: Value in string form as it was parsed from datafile. type: Type denoting the feature flag type. Return: Value type-casted based on type of feature variable.
juraj-google-style
def get_trace(self, trace_id, project_id=None): if project_id is None: project_id = self.project return self.trace_api.get_trace(project_id=project_id, trace_id=trace_id)
Gets a single trace by its ID. Args: trace_id (str): ID of the trace to return. project_id (str): Required. ID of the Cloud project where the trace data is stored. Returns: A Trace dict.
juraj-google-style
def set_external_captures(self, captures): self._captured_inputs = captures
Updates the function capture values. The new values must have tensor types and shapes consistent with the original captures of the concrete function, but it is allowed to change a value captured with a deferred one and vice-versa. Args: captures: A list of tensors or closures. Tensors are value captures, and closures are call-time (deferred captures).
github-repos
def __truediv__(self, other): raise TypeError("unsupported operand type(s) for /: 'Dimension' and '{}', please use
Use `__floordiv__` via `x // y` instead. This function exists only to have a better error message. Instead of: `TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'`, this function will explicitly call for usage of `//` instead. Args: other: Another `Dimension`. Raises: TypeError.
github-repos
def _collect_feature_info(self, candidate_feature_diffs): project_root = self.project.path for diff in candidate_feature_diffs: path = diff.b_path modname = relpath_to_modname(path) modpath = project_root.joinpath(path) importer = partial(import_module_at_path, modname, modpath) yield importer, modname, modpath
Collect feature info Args: candidate_feature_diffs (List[git.diff.Diff]): list of Diffs corresponding to admissible file changes compared to comparison ref Returns: List[Tuple]: list of tuple of importer, module name, and module path. The "importer" is a callable that returns a module
juraj-google-style
def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS): for flag_name in flag_names: mark_flag_as_required(flag_name, flag_values)
Ensures that flags are not None during program execution. Recommended usage: if __name__ == '__main__': flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) app.run() Args: flag_names: Sequence[str], names of the flags. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. Raises: AttributeError: If any of flag name has not already been defined as a flag.
juraj-google-style
def __init__(self, state=None): self._state = state self.job_config = map_job_config.JobConfig._to_map_job_config( state.mapreduce_spec, queue_name=state.mapreduce_spec.params.get("queue_name"))
Init the job instance representing the job with id job_id. Do not directly call this method. Use class methods to construct new instances. Args: state: model.MapreduceState.
juraj-google-style
def measure_topology(script): filter_xml = ' <xmlfilter name="Compute Topological Measures"/>\n' util.write_filter(script, filter_xml) if isinstance(script, mlx.FilterScript): script.parse_topology = True return None
Compute a set of topological measures over a mesh Args: script: the mlx.FilterScript object or script filename to write the filter to. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
codesearchnet
def get_history_by_flight_number(self, flight_number, page=1, limit=100): url = FLT_BASE.format(flight_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url)
Fetch the history of a flight by its number. This method can be used to get the history of a flight route by the number. It checks the user authentication and returns the data accordingly. Args: flight_number (str): The flight number, e.g. AI101 page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('AI101') f.get_history_by_flight_number('AI101',page=1,limit=10)
codesearchnet
def reload(self, napps=None): client = NAppsClient(self._config) client.reload_napps(napps)
Reload a NApp or all NApps. Args: napps (list): NApp list to be reloaded. Raises: requests.HTTPError: When there's a server error.
codesearchnet
def softplus(x): return math_ops.softplus(x)
Softplus activation function, `softplus(x) = log(exp(x) + 1)`. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.softplus(a) >>> b.numpy() array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The softplus activation: `log(exp(x) + 1)`.
github-repos
def extract(self, destination): if os.path.exists(destination): raise OSError(20, 'Destination exists', destination) self.__extract_directory('.', self.files['files'], destination)
Extracts the contents of the archive to the specifed directory. Args: destination (str): Path to an empty directory to extract the files to.
codesearchnet
def recall_at_precision(y_true, y_pred, precision): y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return recall[np.searchsorted(precision - precision, 0)]
Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall
juraj-google-style
def parse_pair_args(labels, argclass): label_data = set() for arg in labels: name, value = split_pair(arg, '=', nullable_idx=1) label_data.add(argclass(name, value)) return label_data
Parse flags of key=value pairs and return a list of argclass. For pair variables, we need to: * split the input into name=value pairs (value optional) * Create the EnvParam object Args: labels: list of 'key' or 'key=value' strings. argclass: Container class for args, must instantiate with argclass(k, v). Returns: list of argclass objects.
juraj-google-style
def end_day_to_datetime(end_day, config): day_start_time = config['day_start'] day_end_time = get_day_end(config) if (day_start_time == datetime.time(0, 0, 0)): end = datetime.datetime.combine(end_day, day_end_time) else: end = (datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1)) return end
Convert a given end day to its proper datetime. This is non trivial because of variable ``day_start``. We want to make sure that even if an 'end day' is specified the actual point in time may reach into the following day. Args: end (datetime.date): Raw end date that is to be adjusted. config: Controller config containing information on when a workday starts. Returns: datetime.datetime: The endday as a adjusted datetime object. Example: Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to consider even points in time up to ``2015-04-02 5:29``. That is to represent that a *work day* does not match *calendar days*. Note: An alternative implementation for the similar problem in legacy hamster: ``hamster.storage.db.Storage.__get_todays_facts``.
codesearchnet
def remove_section(self, name): existed = self.has_section(name) if existed: idx = self._get_section_idx(name) del self._structure[idx] return existed
Remove a file section. Args: name: name of the section Returns: bool: whether the section was actually removed
juraj-google-style
def SetEnvironmentVariable(self, name, value): if isinstance(value, py2to3.STRING_TYPES): value = self._PathStripPrefix(value) if (value is not None): self._environment_variables[name.upper()] = value
Sets an environment variable in the Windows path helper. Args: name (str): name of the environment variable without enclosing %-characters, e.g. SystemRoot as in %SystemRoot%. value (str): value of the environment variable.
codesearchnet
def download_and_uncompress(self, fileobj, dst_path): try: with tarfile.open(mode="r|*", fileobj=fileobj) as tgz: for tarinfo in tgz: abs_target_path = _merge_relative_path(dst_path, tarinfo.name) if tarinfo.isfile(): self._extract_file(tgz, tarinfo, abs_target_path) elif tarinfo.isdir(): tf_v1.gfile.MakeDirs(abs_target_path) else: raise ValueError( "Unexpected object type in tar archive: %s" % tarinfo.type) total_size_str = tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True) self._print_download_progress_msg( "Downloaded %s, Total size: %s" % (self._url, total_size_str), flush=True) except tarfile.ReadError: raise IOError("%s does not appear to be a valid module." % self._url)
Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file.
juraj-google-style
def request_and_check(self, url, method='get', expected_content_type=None, **kwargs): assert method in ['get', 'post'] result = self.driver.request(method, url, **kwargs) if result.status_code != requests.codes.ok: raise RuntimeError('Error requesting %r, status = %d' % (url, result.status_code)) if expected_content_type is not None: content_type = result.headers.get('content-type', '') if not re.match(expected_content_type, content_type): raise RuntimeError( 'Error requesting %r, content type %r does not match %r' % (url, content_type, expected_content_type)) return result
Performs a request, and checks that the status is OK, and that the content-type matches expectations. Args: url: URL to request method: either 'get' or 'post' expected_content_type: prefix to match response content-type against **kwargs: passed to the request method directly. Raises: RuntimeError if status_code does not match.
juraj-google-style
def parse_exception(line): m = RAISES_REGEX.match(line) if m is None: raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Raises block'.format(line=line)) return m.group(2), m.group(1)
Parse the first line of a Cartouche exception description. Args: line (str): A single line Cartouche exception description. Returns: A 2-tuple containing the exception type and the first line of the description.
juraj-google-style
def from_json(cls, json): mapreduce_spec = cls(json['name'], json['mapreduce_id'], json['mapper_spec'], json.get('params'), json.get('hooks_class_name')) return mapreduce_spec
Create new MapreduceSpec from the json, encoded by to_json. Args: json: json representation of MapreduceSpec. Returns: an instance of MapreduceSpec with all data deserialized from json.
codesearchnet
def _init_volume_service(self, version): volume_cfg = self._load_config_section(CONFIG_VOLUME_SECTION) self._token_volume = volume_cfg[CONFIG_TOKEN] proto = volume_cfg[CONFIG_PROTOCOL] host = volume_cfg[CONFIG_HOST] self._volume = VolumeService(host, version) self._volume.base_protocol = proto self._volume.set_auth(self._token_volume)
Method to initialize the Volume Service from the config data Args: version (string): Version of Boss API to use. Returns: None Raises: (KeyError): if given invalid version.
juraj-google-style
def __call__(self, shape, dtype=None, **kwargs): _validate_kwargs(self.__class__.__name__, kwargs) dtype = _get_dtype(dtype) if not dtype.is_floating and (not dtype.is_integer): raise ValueError('Expected float or integer dtype, got %s.' % dtype) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)
Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point and integer types are supported. If not specified, `tf.keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `tf.keras.backend.set_floatx(float_dtype)`). **kwargs: Additional keyword arguments.
github-repos
def _CreateDictReader(self, line_reader): delimiter = self.DELIMITER quotechar = self.QUOTE_CHAR magic_test_string = self._MAGIC_TEST_STRING if py2to3.PY_3: delimiter = delimiter.decode(self._encoding) quotechar = quotechar.decode(self._encoding) magic_test_string = magic_test_string.decode(self._encoding) return csv.DictReader(line_reader, delimiter=delimiter, fieldnames=self.COLUMNS, quotechar=quotechar, restkey=magic_test_string, restval=magic_test_string)
Returns a reader that processes each row and yields dictionaries. csv.DictReader does this job well for single-character delimiters; parsers that need multi-character delimiters need to override this method. Args: line_reader (iter): yields lines from a file-like object. Returns: iter: a reader of dictionaries, as returned by csv.DictReader().
codesearchnet
def run_numerical_analysis(table, schema_list, args): import google.datalab.bigquery as bq numerical_columns = [] for col_schema in schema_list: col_type = col_schema['type'].lower() if col_type == 'integer' or col_type == 'float': numerical_columns.append(col_schema['name']) if numerical_columns: sys.stdout.write('Running numerical analysis...') max_min = [ ('max({name}) as max_{name}, ' 'min({name}) as min_{name}, ' 'avg({name}) as avg_{name} ').format(name=name) for name in numerical_columns] if args.bigquery_table: sql = 'SELECT %s from `%s`' % (', '.join(max_min), parse_table_name(args.bigquery_table)) numerical_results = bq.Query(sql).execute().result().to_dataframe() else: sql = 'SELECT %s from csv_table' % ', '.join(max_min) query = bq.Query(sql, data_sources={'csv_table': table}) numerical_results = query.execute().result().to_dataframe() results_dict = {} for name in numerical_columns: results_dict[name] = {'max': numerical_results.iloc[0]['max_%s' % name], 'min': numerical_results.iloc[0]['min_%s' % name], 'mean': numerical_results.iloc[0]['avg_%s' % name]} file_io.write_string_to_file( os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE), json.dumps(results_dict, indent=2, separators=(',', ': '))) sys.stdout.write('done.\n')
Find min/max values for the numerical columns and writes a json file. Args: table: Reference to FederatedTable (if bigquery_table is false) or a regular Table (otherwise) schema_list: Bigquery schema json object args: the command line args
juraj-google-style
def save_hdf5(X, y, path): with h5py.File(path, 'w') as f: is_sparse = (1 if sparse.issparse(X) else 0) f['issparse'] = is_sparse f['target'] = y if is_sparse: if (not sparse.isspmatrix_csr(X)): X = X.tocsr() f['shape'] = np.array(X.shape) f['data'] = X.data f['indices'] = X.indices f['indptr'] = X.indptr else: f['data'] = X
Save data as a HDF5 file. Args: X (numpy or scipy sparse matrix): Data matrix y (numpy array): Target vector. path (str): Path to the HDF5 file to save data.
codesearchnet
def create_from(cls, has_display_data): if not isinstance(has_display_data, HasDisplayData): raise ValueError('Element of class {}.{} does not subclass HasDisplayData'.format(has_display_data.__module__, has_display_data.__class__.__name__)) return cls(has_display_data._get_display_data_namespace(), has_display_data.display_data())
Creates :class:`~apache_beam.transforms.display.DisplayData` from a :class:`HasDisplayData` instance. Returns: ~apache_beam.transforms.display.DisplayData: A :class:`~apache_beam.transforms.display.DisplayData` instance with populated items. Raises: ValueError: If the **has_display_data** argument is not an instance of :class:`HasDisplayData`.
github-repos
def require_config(config_model): def _decorator(func): @wraps(func) def _inner(*args, **kwargs): if not config_model.current().enabled: return HttpResponseNotFound() return func(*args, **kwargs) return _inner return _decorator
View decorator that enables/disables a view based on configuration. Arguments: config_model (ConfigurationModel subclass): The class of the configuration model to check. Returns: HttpResponse: 404 if the configuration model is disabled, otherwise returns the response from the decorated view.
juraj-google-style
def filter_curated_references(root, head, update): if (('references' not in head) or ('references' not in update)): return (root, head, update) references_curated = are_references_curated(root.get('references', []), head.get('references', [])) if ('references' in root): root = root.remove('references') if references_curated: update = update.remove('references') else: head = head.remove('references') return (root, head, update)
Remove references from either ``head`` or ``update`` depending on curation. If references have been curated, then it removes all references from the update to keep the existing ones. Otherwise, it removes all references from the head to force replacement with the update ones. Args: root (pmap): the root record. head (pmap): the head record. update (pmap): the update record. Returns: tuple: ``(root, head, update)`` with ``references`` removed from ``root`` and either ``head`` or ``update``.
codesearchnet
def tokenize(self, text: str, customize=True, disable=[]) -> List[Token]: if not self.keep_multi_space: text = re.sub(' +', ' ', text) tokens = self.nlp(text, disable=disable) if customize: tokens = [self.custom_token(a_token) for a_token in tokens] return tokens
Tokenize the given text, returning a list of tokens. Type token: class spacy.tokens.Token Args: text (string): Returns: [tokens]
juraj-google-style