code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def recommendations(self, **kwargs): path = self._get_id_path('recommendations') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get a list of recommended movies for a movie. Args: language: (optional) ISO 639-1 code. page: (optional) Minimum value of 1. Expected value is an integer. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def calculate_stress(self, strain): strain = np.array(strain) if (strain.shape == (6,)): strain = Strain.from_voigt(strain) assert (strain.shape == (3, 3)), 'Strain must be 3x3 or voigt-notation' stress_matrix = (self.einsum_sequence(([strain] * (self.order - 1))) / factorial((self.order - 1))) return Stress(stress_matrix)
Calculate's a given elastic tensor's contribution to the stress using Einstein summation Args: strain (3x3 array-like): matrix corresponding to strain
codesearchnet
def _get_music_services_data_xml(soco=None): device = (soco or discovery.any_soco()) log.debug('Fetching music services data from %s', device) available_services = device.musicServices.ListAvailableServices() descriptor_list_xml = available_services['AvailableServiceDescriptorList'] log.debug('Services descriptor list: %s', descriptor_list_xml) return descriptor_list_xml
Fetch the music services data xml from a Sonos device. Args: soco (SoCo): a SoCo instance to query. If none is specified, a random device will be used. Defaults to `None`. Returns: str: a string containing the music services data xml
codesearchnet
def update_defaults(self, new_defaults, respect_none=False): for (key, value) in six.iteritems(new_defaults): item = self.get_item(key) if (item is None): raise YapconfItemNotFound('Cannot update default for {0}, there is no config item by the name of {1}'.format(key, key), None) item.update_default(value, respect_none)
Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default.
codesearchnet
def _convert_to_wakat_format(seeder_struct): def pick_active(seeder_struct, what): '\n From the list of dicts, choose only first of such, that contains\n ``"active": True`` item.\n\n If not found, just pick the first.\n\n Args:\n seeder_struct (dict): Dict with bunch of data.\n what (str): What key to use in `seeder_struct` to identify the\n list of dicts.\n\n Returns:\n dict: Active or first dict.\n ' items = seeder_struct.get(what) if (not items): return None if (not (isinstance(items, list) or isinstance(items, tuple))): items = [items] active_items = [item for item in items if item.get('active')] if (not active_items): return items[0] return active_items[0] if (not seeder_struct): return None active_seed = pick_active(seeder_struct, 'seeds') publisher_contact = pick_active(seeder_struct.get('publisher', {}), 'contacts') if (not active_seed): active_seed = pick_active(seeder_struct, 'seed') if (not active_seed): return None model = Model() model.url = active_seed['url'] model.issn = seeder_struct.get('issn') model.title_tags = seeder_struct.get('name') model.publisher_tags = seeder_struct.get('publisher', {}).get('name') model.annotation_tags = seeder_struct.get('comment') if publisher_contact: model.place_tags = publisher_contact.get('address') rules = {} rules['frequency'] = str(seeder_struct.get('frequency')) _add_if_set(rules, 'budget', active_seed.get('budget')) _add_if_set(rules, 'youtube', active_seed.get('youtube')) _add_if_set(rules, 'calendars', active_seed.get('calendars')) _add_if_set(rules, 'javascript', active_seed.get('javascript')) _add_if_set(rules, 'local_traps', active_seed.get('local_traps')) _add_if_set(rules, 'gentle_fetch', active_seed.get('gentle_fetch')) _add_if_set(rules, 'global_reject', active_seed.get('global_reject')) model.rules = rules for key in model.keys(): val = getattr(model, key) if (val and ('tags' in key)): setattr(model, key, [{'val': val, 'source': 'Seeder'}]) return model.get_mapping()
Convert Seeder's structure to the internal structure used at frontend. Args:, seeder_struct (dict): Dictionary with Seeder data. Returns: obj: :class:`Model`.
codesearchnet
def mutate_list(self, dna_list: List[pg.DNA], global_state: pg.geno.AttributeDict, step: int=0) -> List[pg.DNA]: results = [] for dna in dna_list: output = self._mutate(dna, global_state=global_state, step=step) if isinstance(output, list): results.extend(output) else: results.append(output) return results
Mutate the DNA in the input one by one and concatenate their outputs. User should override this method instead of `mutate` if mutation depends on the list-wise information. Keyword arguments `global_state` and `step` are optional when override. Args: dna_list: a list of DNA to mutate. global_state: An `AttributeDict` object as the container of global states. step: Number of examples historically proposed, which can be used for determining a mutation schedule. Returns: a list of DNA as the result of the mutation.
github-repos
def _modeIsValid(self, mode): try: return mode in self.modes.keys() except AttributeError as e: if mode in self.isValidMode.keys(): if mode in self.isValidMode.keys(): return True return False
Verification of whether the mode is a correct option to be used. Args: ----- mode: Mode to be executed. Return: ------- True if the mode exists in the three main folders.
juraj-google-style
class NougatProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, images=None, text=None, do_crop_margin: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: 'PILImageResampling'=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, data_format: Optional['ChannelDimension']='channels_first', input_data_format: Optional[Union[str, 'ChannelDimension']]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True): if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.') if images is not None: inputs = self.image_processor(images, do_crop_margin=do_crop_margin, do_resize=do_resize, size=size, resample=resample, do_thumbnail=do_thumbnail, do_align_long_axis=do_align_long_axis, do_pad=do_pad, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, return_tensors=return_tensors, data_format=data_format, input_data_format=input_data_format) if text is not None: encodings = self.tokenizer(text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose) if text is None: return inputs elif images is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) def post_process_generation(self, *args, **kwargs): return self.tokenizer.post_process_generation(*args, **kwargs)
Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor. [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information. Args: image_processor ([`NougatImageProcessor`]): An instance of [`NougatImageProcessor`]. The image processor is a required input. tokenizer ([`NougatTokenizerFast`]): An instance of [`NougatTokenizerFast`]. The tokenizer is a required input.
github-repos
def sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor, num_masks: int) -> torch.Tensor: criterion = nn.BCEWithLogitsLoss(reduction='none') cross_entropy_loss = criterion(inputs, labels) loss = cross_entropy_loss.mean(1).sum() / num_masks return loss
Args: inputs (`torch.Tensor`): A float tensor of arbitrary shape. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). Returns: loss (`torch.Tensor`): The computed loss.
github-repos
def stage_tc_indicator_entity(self, indicator_data): path = '@.{value: summary, ' path += 'type: type, ' path += 'ownerName: ownerName, ' path += 'confidence: confidence || `0`, ' path += 'rating: rating || `0`}' return self.path_data(indicator_data, path)
Convert JSON data to TCEntity. Args: indicator_data (str): [description] Returns: [type]: [description]
juraj-google-style
def _transform_col(self, x, i): return x.fillna(NAN_INT).map(self.target_encoders[i]).fillna(self.target_mean)
Encode one categorical column into average target values. Args: x (pandas.Series): a categorical column to encode i (int): column index Returns: x (pandas.Series): a column with labels.
juraj-google-style
def allows_latest(self, version_key_name): if not self.version_keys().has_key(version_key_name): raise RuntimeError("service registry doesn't have a version key entry for: {}".format(version_key_name)) if not self.version_keys()[version_key_name].has_key("allow_latest"): raise RuntimeError("service registry key {} doesn't have an 'allow_latest' value".format( version_key_name)) return self.version_keys()[version_key_name]["allow_latest"]
Does this version key allow 'latest' as an option (e.g. "latest AMI" makes sense and is allowed) Args: version_key_name: the version key to check for "allow_latest" Returns: True if the version key allows latest, False if it does not Raises: ValueError if the key was not found
juraj-google-style
def bounds(self, thr=0, lower_index=0, upper_index=(- 1)): points = self.points[lower_index:upper_index] min_lat = float('inf') min_lon = float('inf') max_lat = (- float('inf')) max_lon = (- float('inf')) for point in points: min_lat = min(min_lat, point.lat) min_lon = min(min_lon, point.lon) max_lat = max(max_lat, point.lat) max_lon = max(max_lon, point.lon) return ((min_lat - thr), (min_lon - thr), (max_lat + thr), (max_lon + thr))
Computes the bounds of the segment, or part of it Args: lower_index (int, optional): Start index. Defaults to 0 upper_index (int, optional): End index. Defaults to 0 Returns: :obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that (min_lat, min_lon, max_lat, max_lon)
codesearchnet
def create_indexes(names, settings=None): for name in names: index = Index(name) try: if not index.exists(): logger.debug("Creating Elasticsearch index: {0}".format(name)) if settings is None: index.settings(number_of_shards=1, number_of_replicas=1) else: index.settings(**settings) index.create() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__()))
Create Elasticsearch indexes Args: names (list): A list of index names settings (dict): Index settings
juraj-google-style
def install_json_output_variables(self, ij=None): if self._install_json_output_variables is None or ij is not None: self._install_json_output_variables = {} if ij is None: ij = self.install_json for p in ij.get('playbook', {}).get('outputVariables') or []: self._install_json_output_variables.setdefault(p.get('name'), []).append(p) return self._install_json_output_variables
Return install.json output variables in a dict with name param as key. Args: ij (dict, optional): Defaults to None. The install.json contents. Returns: dict: A dictionary containing the install.json output variables with name as key.
juraj-google-style
def save_image(byteio, imgfmt): from os import path, mkdir ptdir = "{}.{}".format(project, task) uuid = str(uuid4()) idir = path.join(dbdir, ptdir) if not path.isdir(idir): mkdir(idir) ipath = path.join(idir, "{}.{}".format(uuid, imgfmt)) with open(ipath, 'wb') as f: f.write(byteio) return uuid
Saves the specified image to disk. Args: byteio (bytes): image bytes to save to disk. imgfmt (str): used as the extension of the saved file. Returns: str: a uuid for the saved image that can be added to the database entry.
juraj-google-style
def mean_squared_error(true, pred): result = tf.reduce_sum( tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred)) return result
L2 distance between tensors true and pred. Args: true: the ground truth image. pred: the predicted image. Returns: mean squared error between ground truth and predicted image.
juraj-google-style
def check_oversized_pickle(pickled, name, obj_type, worker): length = len(pickled) if (length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE): return warning_message = 'Warning: The {} {} has size {} when pickled. It will be stored in Redis, which could cause memory issues. This may mean that its definition uses a large array or other object.'.format(obj_type, name, length) push_error_to_driver(worker, ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, warning_message, driver_id=worker.task_driver_id)
Send a warning message if the pickled object is too large. Args: pickled: the pickled object. name: name of the pickled object. obj_type: type of the pickled object, can be 'function', 'remote function', 'actor', or 'object'. worker: the worker used to send warning message.
codesearchnet
def _detect(self): results = [] for c in self.contracts: functions = IncorrectERC20InterfaceDetection.detect_incorrect_erc20_interface(c) if functions: info = '{} ({}) has incorrect ERC20 function interface(s):\n' info = info.format(c.name, c.source_mapping_str) for function in functions: info += '\t-{} ({})\n'.format(function.name, function.source_mapping_str) json = self.generate_json_result(info) self.add_functions_to_json(functions, json) results.append(json) return results
Detect incorrect erc20 interface Returns: dict: [contrat name] = set(str) events
codesearchnet
def parse_args(test: ArgList=None) -> argparse.Namespace: parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('source_data', help='File path of the source training data to extract features.') parser.add_argument('-o', '--outfile', help='Output file path for the encoded training data.\n (default: encoded_data.txt)', default=DEFAULT_OUTPUT_FILENAME) parser.add_argument('--processes', type=int, help='Number of processes to use.\n (default: the number of CPUs in the system)', default=None) parser.add_argument('--scale', type=int, help='Weight scale for the entries. The value should be a unsigned\n integer. (default: 1)', default=1) if test is None: return parser.parse_args() else: return parser.parse_args(test)
Parses commandline arguments. Args: test (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None. Returns: argparse.Namespace: Parsed data of args.
github-repos
def set_np_doc_form(value): global _np_doc_form _np_doc_form = value
Selects the form of the original numpy docstrings. This function sets a global variable that controls how a tf-numpy symbol's docstring should refer to the original numpy docstring. If `value` is `'inlined'`, the numpy docstring will be verbatim copied into the tf-numpy docstring. Otherwise, a link to the original numpy docstring will be added. Which numpy version the link points to depends on `value`: * `'stable'`: the current stable version; * `'dev'`: the current development version; * pattern `\d+(\.\d+(\.\d+)?)?`: `value` will be treated as a version number, e.g. '1.16'. Args: value: the value to set the global variable to.
github-repos
async def _get_socket_url(self): data = (await self.api.execute_method(self.RTM_START_ENDPOINT, simple_latest=True, no_unreads=True)) return data['url']
Get the WebSocket URL for the RTM session. Warning: The URL expires if the session is not joined within 30 seconds of the API call to the start endpoint. Returns: :py:class:`str`: The socket URL.
codesearchnet
def put(self, dash_id=0): data = request.get_json() updated = self._update_dash(dash_id, data) return build_response(dict(data=updated, code=200))
Update a dash meta and content, return updated dash content. Args: dash_id: dashboard id. Returns: A dict containing the updated content of that dashboard, not include the meta info.
juraj-google-style
def apply(self, *args, **kwargs): arglist = list(args) arglist.insert(1, self) return self.pipeline.apply(*arglist, **kwargs)
Applies a transform or callable to a PValue. Args: *args: positional arguments. **kwargs: keyword arguments. The method will insert the pvalue as the next argument following an optional first label and a transform/callable object. It will call the pipeline.apply() method with this modified argument list.
github-repos
def ListThreads(self): if self.inferior.is_running: return self.inferior.threads logging.error('Not attached to any process.') return []
List the currently running python threads. Returns: A list of the inferior's thread idents, or None if the debugger is not attached to any process.
codesearchnet
def elmo_loss2ppl(losses: List[np.ndarray]) -> float: avg_loss = np.mean(losses) return float(np.exp(avg_loss))
Calculates perplexity by loss Args: losses: list of numpy arrays of model losses Returns: perplexity : float
juraj-google-style
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist): (response, _) = self.music_library._music_lib_search(sonos_playlist.item_id, 0, 1) update_id = response['UpdateID'] metadata = to_didl_string(queueable_item) self.avTransport.AddURIToSavedQueue([('InstanceID', 0), ('UpdateID', update_id), ('ObjectID', sonos_playlist.item_id), ('EnqueuedURI', queueable_item.resources[0].uri), ('EnqueuedURIMetaData', metadata), ('AddAtIndex', 4294967295)])
Adds a queueable item to a Sonos' playlist. Args: queueable_item (DidlObject): the item to add to the Sonos' playlist sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to which the item should be added
codesearchnet
def map_structprop_resnums_to_seqprop_resnums(self, resnums, structprop=None, chain_id=None, seqprop=None, use_representatives=False): resnums = ssbio.utils.force_list(resnums) if use_representatives: seqprop = self.representative_sequence structprop = self.representative_structure chain_id = self.representative_chain if (not structprop): raise ValueError('No representative structure set, please specify sequence, structure, and chain ID') elif ((not seqprop) or (not structprop) or (not chain_id)): raise ValueError('Please specify sequence, structure, and chain ID') if (structprop.id == self.representative_structure.id): full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '') else: full_structure_id = '{}-{}'.format(structprop.id, chain_id) aln_id = '{}_{}'.format(seqprop.id, full_structure_id) access_key = '{}_chain_index'.format(aln_id) if (access_key not in seqprop.letter_annotations): raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id)) chain = structprop.chains.get_by_id(chain_id) chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums'] final_mapping = {} for resnum in resnums: resnum = int(resnum) resnum_index = chain_structure_resnum_mapping.index(resnum) struct_res_singleaa = structprop.chains.get_by_id(chain_id).seq_record[resnum_index] what = seqprop.letter_annotations[access_key].index((resnum_index + 1)) seq_res_singleaa = seqprop[what] sp_resnum = (what + 1) final_mapping[resnum] = sp_resnum format_data = {'seqprop_id': seqprop.id, 'seqprop_resid': seq_res_singleaa, 'seqprop_resnum': sp_resnum, 'structprop_id': structprop.id, 'structprop_chid': chain_id, 'structprop_resid': struct_res_singleaa, 'structprop_resnum': resnum} if (struct_res_singleaa != seq_res_singleaa): log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to structure {structprop_id}-{structprop_chid} residue {structprop_resid}{structprop_resnum}. NOTE: this may be due to structural differences'.format(**format_data)) else: log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to structure {structprop_id}-{structprop_chid} residue {structprop_resid}{structprop_resnum}'.format(**format_data)) return final_mapping
Map a residue number in any StructProp + chain ID to any SeqProp's residue number. Args: resnums (int, list): Residue numbers in the structure structprop (StructProp): StructProp object chain_id (str): Chain ID to map from seqprop (SeqProp): SeqProp object use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop, structprop, and chain_id do not need to be defined. Returns: dict: Mapping of structure residue numbers to sequence residue numbers
codesearchnet
def sg_summary_loss(tensor, prefix='losses', name=None): r prefix = '' if prefix is None else prefix + '/' name = prefix + _pretty_name(tensor) if name is None else prefix + name _scalar(name, tf.reduce_mean(tensor)) _histogram(name + '-h', tensor)
r"""Register `tensor` to summary report as `loss` Args: tensor: A `Tensor` to log as loss prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
juraj-google-style
def send_client_cmd(self, data, cmd=None, via_queue=None): mq_channel = self._connect_mq() if cmd: data['cmd'] = cmd if via_queue: mq_channel.basic_publish(exchange='', routing_key=via_queue, body=json.dumps(data)) else: mq_channel.basic_publish(exchange=self.prv_exchange, routing_key='', body=json.dumps(data))
Send arbitrary cmd and data to client if queue name passed by "via_queue" parameter, that queue will be used instead of users private exchange. Args: data: dict cmd: string via_queue: queue name,
codesearchnet
def extract_pivots(self, assignments): raise NotImplementedError()
Find values for every variable that appears in this term. This finds all variables that appear in this term and limits them to the values they appear together with. For example, consider the equation t = v1 | (t = v2 & (t = v2 | t = v3)) Here, t can be limited to [v1, v2]. (v3 is impossible.) Args: assignments: The current "upper bound", i.e. all values that are still possible for variables. Used for extracting pivots out of Eq(var, var). Returns: A dictionary mapping strings (variable names) to sets of strings (value or variable names).
github-repos
def from_scf_input(cls, workdir, scf_input, ph_ngqpt, with_becs=True, manager=None, allocate=True): flow = cls(workdir, manager=manager) flow.register_scf_task(scf_input) scf_task = flow[0][0] (scf_ngkpt, ph_ngqpt) = (np.array(scf_input['ngkpt']), np.array(ph_ngqpt)) if any(((scf_ngkpt % ph_ngqpt) != 0)): raise ValueError(('ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s' % (ph_ngqpt, scf_ngkpt))) qpoints = scf_input.abiget_ibz(ngkpt=ph_ngqpt, shiftk=(0, 0, 0), kptopt=1).points for qpt in qpoints: if (np.allclose(qpt, 0) and with_becs): ph_work = BecWork.from_scf_task(scf_task) else: ph_work = PhononWork.from_scf_task(scf_task, qpoints=qpt) flow.register_work(ph_work) if allocate: flow.allocate() return flow
Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run. Args: workdir: Working directory of the flow. scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run. ph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for electrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice whereas ph_ngqpt = (3, 3, 3) is not! with_becs: True if Born effective charges are wanted. manager: :class:`TaskManager` object. Read from `manager.yml` if None. allocate: True if the flow should be allocated before returning. Return: :class:`PhononFlow` object.
codesearchnet
def get_lowest_decomposition(self, composition): entries_list = [] elements = [e.symbol for e in composition.elements] for i in range(len(elements)): for combi in itertools.combinations(elements, i + 1): chemsys = [Element(e) for e in combi] x = self.costdb.get_entries(chemsys) entries_list.extend(x) try: pd = PhaseDiagram(entries_list) return pd.get_decomposition(composition) except IndexError: raise ValueError("Error during PD building; most likely, " "cost data does not exist!")
Get the decomposition leading to lowest cost Args: composition: Composition as a pymatgen.core.structure.Composition Returns: Decomposition as a dict of {Entry: amount}
juraj-google-style
def _GetResponseClass(self, method_descriptor): if (method_descriptor.containing_service != self.descriptor): raise RuntimeError('GetResponseClass() given method descriptor for wrong service type.') return method_descriptor.output_type._concrete_class
Returns the class of the response protocol message. Args: method_descriptor: Descriptor of the method for which to return the response protocol message class. Returns: A class that represents the output protocol message of the specified method.
codesearchnet
def linkToChannelInputFile(self, session, channelInputFile, force=False): if ((self.channelInputFile is not None) and (not force)): return self.channelInputFile = channelInputFile orderedLinks = channelInputFile.getOrderedLinks(session) timeSteps = self.timeSteps for timeStep in timeSteps: linkDatasets = timeStep.linkDatasets for (l, linkDataset) in enumerate(linkDatasets): streamLink = orderedLinks[l] streamNodes = streamLink.nodes linkDataset.link = streamLink nodeDatasets = linkDataset.nodeDatasets if ((len(nodeDatasets) > 0) and (len(streamNodes) > 0)): for (n, nodeDataset) in enumerate(nodeDatasets): nodeDataset.node = streamNodes[n] session.add(self) session.commit()
Create database relationships between the link node dataset and the channel input file. The link node dataset only stores references to the links and nodes--not the geometry. The link and node geometries are stored in the channel input file. The two files must be linked with database relationships to allow the creation of link node dataset visualizations. This process is not performed automatically during reading, because it can be very costly in terms of read time. This operation can only be performed after both files have been read into the database. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database channelInputFile (:class:`gsshapy.orm.ChannelInputFile`): Channel input file object to be associated with this link node dataset file. force (bool, optional): Force channel input file reassignment. When false (default), channel input file assignment is skipped if it has already been performed.
codesearchnet
def parse(path): def paired(iterable): cursor = iter(iterable) return zip(cursor, cursor) def unwrap_if_sexp_symbol(datum): return datum.value() if isinstance(datum, sexpdata.Symbol) else datum def sexp2dict(sexps): newdict = {} for key, value in paired(sexps): key = str(unwrap_if_sexp_symbol(key)).lstrip(':') if isinstance(value, list) and value: if isinstance(value[0], list): newdict[key] = [sexp2dict(val) for val in value] elif isinstance(value[0], sexpdata.Symbol): newdict[key] = sexp2dict(value) else: newdict[key] = value else: newdict[key] = value return newdict conf = sexpdata.loads(Util.read_file(path)) return sexp2dict(conf)
Parse an ``.ensime`` config file from S-expressions. Args: path (str): Path of an ``.ensime`` file to parse. Returns: dict: Configuration values with string keys.
juraj-google-style
def load(fin, dtype=np.float32, max_vocab=None): vocab = {} arr = None i = 0 for line in fin: if max_vocab is not None and i >= max_vocab: break try: token, v = _parse_line(line, dtype) except (ValueError, IndexError): raise ParseError(b'Parsing error in line: ' + line) if token in vocab: parse_warn(b'Duplicated vocabulary ' + token) continue if arr is None: arr = np.array(v, dtype=dtype).reshape(1, -1) else: if arr.shape[1] != len(v): raise ParseError(b'Vector size did not match in line: ' + line) arr = np.append(arr, [v], axis=0) vocab[token] = i i += 1 return arr, vocab
Load word embedding file. Args: fin (File): File object to read. File should be open for reading ascii. dtype (numpy.dtype): Element data type to use for the array. max_vocab (int): Number of vocabulary to read. Returns: numpy.ndarray: Word embedding representation vectors dict: Mapping from words to vector indices.
juraj-google-style
def _get_upload_cmd(self, mirror=False): if mirror: dest_uri = self.s3_mirror_uri else: dest_uri = self.s3_version_uri cmd = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}'.format(self.artifact_path, dest_uri, self.env) return cmd
Generate the S3 CLI upload command Args: mirror (bool): If true, uses a flat directory structure instead of nesting under a version. Returns: str: The full CLI command to run.
juraj-google-style
def transformer_decoder_attention_unit(x, hparams, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True): with tf.variable_scope('self_attention'): y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, cache=None, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) if (encoder_output is not None): with tf.variable_scope('encdec_attention'): y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k) x = common_layers.layer_postprocess(x, y, hparams) return x
Applies multihead attention function which is parametrised for decoding. Args: x: input (decoder input) hparams: model hyper-parameters encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout layers to save memory during training save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: The output tensor
codesearchnet
def _get_proxy_info(self, _=None): (target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint) sock = None if target_path: sock = self._ssh_tunnel.forward_unix(path=target_path) else: sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port) return SSHTunnelProxyInfo(sock=sock)
Generate a ProxyInfo class from a connected SSH transport Args: _ (None): Ignored. This is just here as the ProxyInfo spec requires it. Returns: SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH
juraj-google-style
def setupArgparse(): parser = argparse.ArgumentParser() parser.add_argument('callsign', help='Callsign of radio') parser.add_argument('id', type=int, help='ID number radio') parser.add_argument('-l', '--loopback', action='store_true', help='Use software loopback serial port') parser.add_argument('-p', '--port', default='/dev/ttyUSB0', help='Physical serial port of radio') return parser.parse_args()
Sets up argparse module to create command line options and parse them. Uses the argparse module to add arguments to the command line for faradayio-cli. Once the arguments are added and parsed the arguments are returned Returns: argparse.Namespace: Populated namespace of arguments
codesearchnet
def __call__(self, request: beam.Row, *args, **kwargs): embedded_query = request['text'] base_query = f'{self.hybrid_fields}=>[KNN {self.k} @{self.vector_field} $vector AS vector_score]' query = Query(base_query).return_fields(*self.return_fields).paging(0, self.k).dialect(2) params_dict = {'vector': np.array(embedded_query).astype(dtype=np.float32).tobytes()} results = self.client.ft(self.index_name).search(query, params_dict) return (beam.Row(text=embedded_query), beam.Row(docs=results.docs))
Reads a row from the redis Vector DB and returns a `Tuple` of request and response. Args: request: the input `beam.Row` to enrich.
github-repos
def merge_collections(collections, force_dense=False, sampling_rate='auto'): if len(listify(collections)) == 1: return collections levels = set([c.level for c in collections]) if len(levels) > 1: raise ValueError("At the moment, it's only possible to merge " "Collections at the same level of analysis. You " "passed collections at levels: %s." % levels) variables = list(chain(*[c.variables.values() for c in collections])) cls = collections[0].__class__ variables = cls.merge_variables(variables, sampling_rate=sampling_rate) if isinstance(collections[0], BIDSRunVariableCollection): if sampling_rate == 'auto': rates = [var.sampling_rate for var in variables if isinstance(var, DenseRunVariable)] sampling_rate = rates[0] if rates else None return cls(variables, sampling_rate) return cls(variables)
Merge two or more collections at the same level of analysis. Args: collections (list): List of Collections to merge. sampling_rate (int, str): Sampling rate to use if it becomes necessary to resample DenseRunVariables. Either an integer or 'auto' (see merge_variables docstring for further explanation). Returns: A BIDSVariableCollection or BIDSRunVariableCollection, depending on the type of the input collections.
juraj-google-style
def get_block_sysfee(self, height, id=None, endpoint=None): return self._call_endpoint(GET_BLOCK_SYS_FEE, params=[height], id=id, endpoint=endpoint)
Get the system fee of a block by height. This is used in calculating gas claims Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
juraj-google-style
def Mean(self): mu = 0.0 for (x, p) in self.d.iteritems(): mu += (p * x) return mu
Computes the mean of a PMF. Returns: float mean
codesearchnet
def process_data(key, data_list, result_info_key, identifier_keys): master_data = [] for item_data in data_list: data = item_data[key] if (data is None): current_item_data = {} elif (key == 'property/value'): current_item_data = data['value'] elif (key == 'property/details'): top_level_keys = ['property', 'assessment'] current_item_data = flatten_top_level_keys(data, top_level_keys) elif (key == 'property/school'): current_item_data = data['school'] school_list = [] for school_type_key in current_item_data: schools = current_item_data[school_type_key] for school in schools: school['school_type'] = school_type_key school['school_address'] = school['address'] school['school_zipcode'] = school['zipcode'] school_list.append(school) current_item_data = school_list elif (key == 'property/value_forecast'): current_item_data = {} for month_key in data: current_item_data[month_key] = data[month_key]['value'] elif (key in ['property/value_within_block', 'property/rental_value_within_block']): current_item_data = flatten_top_level_keys(data, ['housecanary_value_percentile_range', 'housecanary_value_sqft_percentile_range', 'client_value_percentile_range', 'client_value_sqft_percentile_range']) elif (key in ['property/zip_details', 'zip/details']): top_level_keys = ['multi_family', 'single_family'] current_item_data = flatten_top_level_keys(data, top_level_keys) else: current_item_data = data if isinstance(current_item_data, dict): _set_identifier_fields(current_item_data, item_data, result_info_key, identifier_keys) master_data.append(current_item_data) else: for item in current_item_data: _set_identifier_fields(item, item_data, result_info_key, identifier_keys) master_data.extend(current_item_data) return master_data
Given a key as the endpoint name, pulls the data for that endpoint out of the data_list for each address, processes the data into a more excel-friendly format and returns that data. Args: key: the endpoint name of the data to process data_list: the main data list to take the data from result_info_key: the key in api_data dicts that contains the data results identifier_keys: the list of keys used as requested identifiers (address, zipcode, block_id, etc) Returns: A list of dicts (rows) to be written to a worksheet
codesearchnet
def heightmap_get_slope(hm: np.ndarray, x: int, y: int) -> float: return float(lib.TCOD_heightmap_get_slope(_heightmap_cdata(hm), x, y))
Return the slope between 0 and (pi / 2) at given coordinates. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. x (int): The x coordinate. y (int): The y coordinate. Returns: float: The steepness at ``x``, ``y``. From 0 to (pi / 2)
juraj-google-style
def run(self, source, **kwargs): kwargs['output'] = self.__graph__() if isinstance(source, str): import json source = json.loads(source) self.source = source super(JSONProcessor, self).run(**kwargs) self.output = kwargs['output'] return output
Method takes a JSON source and any keywords and transforms from JSON to Lean BIBFRAME 2.0 triples Args: ---- source: str, dict
juraj-google-style
def lint(self, targets): LinterRunner.targets = targets linters = self._config.get_linter_classes() with Pool() as pool: out_err_none = pool.map(LinterRunner.run, linters) out_err = [item for item in out_err_none if (item is not None)] (stdout, stderr) = zip(*out_err) return (sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr))
Run linters in parallel and sort all results. Args: targets (list): List of files and folders to lint.
codesearchnet
def save_cache(cache): with open(settings.DUP_FILTER_FILE, 'w') as f: f.write(json.dumps(list(cache)))
Save cahce to the disk. Args: cache (set): Set with cached data.
codesearchnet
def Scalars(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.Scalars(tag)
Retrieve the scalar events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.ScalarEvents`.
codesearchnet
def _update_workflow_stages(stage_data: dict, workflow_stage: WorkflowStage, docker: DockerSwarmClient): service_status_complete = [] if (stage_data['status'] != 'complete'): for (service_id, service_dict) in stage_data['services'].items(): service_state = docker.get_service_state(service_id) if (service_state == 'shutdown'): docker.delete_service(service_id) service_dict['status'] = service_state service_dict['complete'] = (service_state == 'shutdown') service_status_complete.append(service_dict['complete']) if all(service_status_complete): LOG.info('Workflow stage service %s complete!', workflow_stage.id) stage_data['status'] = 'complete'
Check and update the status of a workflow stage. This function checks and updates the status of a workflow stage specified by the parameters in the specified stage_data dictionary. If the workflow stage is not marked as complete, this function will check with the Docker Swarm API on the status of Docker services defined for the stage. If **all** services are found to be complete (based on their service state being reported as 'shutdown', the workflow stage is marked complete. This function is used by `execute_processing_block`. TODO(BMo) This function will need refactoring at some point as part of an update to the way workflow state metadata is stored in the configuration database. Currently the stage_data dictionary is a bit of a hack for a badly specified Configuration Database backed WorkflowStage object. Args: stage_data (dict): Dictionary holding workflow stage metadata. workflow_stage (WorkflowStage): Workflow stage data object. docker (DockerClient): Docker Swarm Client object.
codesearchnet
def predict(self, features, verbose=False): probs = self.clf.predict_proba(features) if verbose: labels = self.labels.classes_ res = [] for prob in probs: vals = {} for (i, val) in enumerate(prob): label = labels[i] vals[label] = val res.append(vals) return res else: return probs
Probability estimates of each feature See sklearn's SGDClassifier predict and predict_proba methods. Args: features (:obj:`list` of :obj:`list` of :obj:`float`) verbose: Boolean, optional. If true returns an array where each element is a dictionary, where keys are labels and values are the respective probabilities. Defaults to False. Returns: Array of array of numbers, or array of dictionaries if verbose i True
codesearchnet
def run(self, tasklet, **kwds): start_time = time.time() n = 1 while True: e = None result = None got_result = False try: result = (yield tasklet(**kwds)) got_result = True if (not self.should_retry(result)): raise ndb.Return(result) except runtime.DeadlineExceededError: logging.debug('Tasklet has exceeded request deadline after %s seconds total', (time.time() - start_time)) raise except self.retriable_exceptions as e: pass if (n == 1): logging.debug('Tasklet is %r', tasklet) delay = self.retry_params.delay(n, start_time) if (delay <= 0): logging.debug('Tasklet failed after %s attempts and %s seconds in total', n, (time.time() - start_time)) if got_result: raise ndb.Return(result) elif (e is not None): raise e else: assert False, 'Should never reach here.' if got_result: logging.debug('Got result %r from tasklet.', result) else: logging.debug('Got exception "%r" from tasklet.', e) logging.debug('Retry in %s seconds.', delay) n += 1 (yield tasklets.sleep(delay))
Run a tasklet with retry. The retry should be transparent to the caller: if no results are successful, the exception or result from the last retry is returned to the caller. Args: tasklet: the tasklet to run. **kwds: keywords arguments to run the tasklet. Raises: The exception from running the tasklet. Returns: The result from running the tasklet.
codesearchnet
def get_idiomatic_name_in_language(cls, name, language): if language in cls.idiomatic_methods_cache: m = cls.idiomatic_methods_cache[language] if not m: return name return m(name) found, method = load_language_plugins(language, 'get_idiomatic_name') if found: cls.idiomatic_methods_cache[language] = method if method: return method(name) else: return name module = importlib.import_module('.lang.%s' % language, package="monolithe.generators") if not hasattr(module, 'get_idiomatic_name'): cls.idiomatic_methods_cache[language] = None return name method = getattr(module, 'get_idiomatic_name') cls.idiomatic_methods_cache[language] = method return method(name)
Get the name for the given language Args: name (str): the name to convert language (str): the language to use Returns: a name in the given language Example: get_idiomatic_name_in_language("EnterpriseNetwork", "python") >>> enterprise_network
juraj-google-style
def signed_to_twos_comp(val: int, n_bits: int) -> int: assert n_bits % 8 == 0, "Must specify a whole number of bytes" n_bytes = n_bits b = val.to_bytes(n_bytes, byteorder=sys.byteorder, signed=True) return int.from_bytes(b, byteorder=sys.byteorder, signed=False)
Convert a signed integer to its "two's complement" representation. Args: val: signed integer n_bits: number of bits (which must reflect a whole number of bytes) Returns: unsigned integer: two's complement version
juraj-google-style
def _tensor_product(self, other, reverse=False): if not isinstance(other, Choi): other = Choi(other) if reverse: input_dims = self.input_dims() + other.input_dims() output_dims = self.output_dims() + other.output_dims() data = _bipartite_tensor( other.data, self._data, shape1=other._bipartite_shape, shape2=self._bipartite_shape) else: input_dims = other.input_dims() + self.input_dims() output_dims = other.output_dims() + self.output_dims() data = _bipartite_tensor( self._data, other.data, shape1=self._bipartite_shape, shape2=other._bipartite_shape) return Choi(data, input_dims, output_dims)
Return the tensor product channel. Args: other (QuantumChannel): a quantum channel. reverse (bool): If False return self ⊗ other, if True return if True return (other ⊗ self) [Default: False Returns: Choi: the tensor product channel as a Choi object. Raises: QiskitError: if other is not a QuantumChannel subclass.
juraj-google-style
def _cancel_batch(batch_fn, cancel_fn, ops): canceled = [] failed = [] def handle_cancel_response(request_id, response, exception): 'Callback for the cancel response.' del response if exception: msg = ('error %s: %s' % (exception.resp.status, exception.resp.reason)) if (exception.resp.status == FAILED_PRECONDITION_CODE): detail = json.loads(exception.content) status = detail.get('error', {}).get('status') if (status == FAILED_PRECONDITION_STATUS): msg = 'Not running' failed.append({'name': request_id, 'msg': msg}) else: canceled.append({'name': request_id}) return batch = batch_fn(callback=handle_cancel_response) ops_by_name = {} for op in ops: op_name = op.get_field('internal-id') ops_by_name[op_name] = op batch.add(cancel_fn(name=op_name, body={}), request_id=op_name) batch.execute() canceled_ops = [ops_by_name[op['name']] for op in canceled] error_messages = [] for fail in failed: op = ops_by_name[fail['name']] error_messages.append(("Error canceling '%s': %s" % (get_operation_full_job_id(op), fail['msg']))) return (canceled_ops, error_messages)
Cancel a batch of operations. Args: batch_fn: API-specific batch function. cancel_fn: API-specific cancel function. ops: A list of operations to cancel. Returns: A list of operations canceled and a list of error messages.
codesearchnet
def _to_enos_roles(roles): def to_host(h): extra = {} for nic, roles in h["nics"]: for role in roles: extra[role] = nic return Host(h["host"], user="root", extra=extra) enos_roles = {} for role, hosts in roles.items(): enos_roles[role] = [to_host(h) for h in hosts] logger.debug(enos_roles) return enos_roles
Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init`
juraj-google-style
def MakeParser(prog): def AddStandardOptions(parser, *args): if 'application' in args: parser.add_argument('-a', '--application', default='.', help='The path to the Python App Engine App') if 'format' in args: parser.add_argument('-f', '--format', default='rest', choices=['rest'], help='The requested API protocol type (ignored)') if 'hostname' in args: help_text = ('Default application hostname, if none is specified ' 'for API service.') parser.add_argument('--hostname', help=help_text) if 'output' in args: parser.add_argument('-o', '--output', default='.', help='The directory to store output files') if 'language' in args: parser.add_argument('language', help='The target output programming language') if 'service' in args: parser.add_argument('service', nargs='+', help='Fully qualified service class name') if 'discovery_doc' in args: parser.add_argument('discovery_doc', nargs=1, help='Path to the discovery document') if 'build_system' in args: parser.add_argument('-bs', '--build_system', default='default', help='The target build system') parser = _EndpointsParser(prog=prog) subparsers = parser.add_subparsers( title='subcommands', metavar='{%s}' % ', '.join(_VISIBLE_COMMANDS)) get_client_lib = subparsers.add_parser( 'get_client_lib', help=('Generates discovery documents and client ' 'libraries from service classes')) get_client_lib.set_defaults(callback=_GetClientLibCallback) AddStandardOptions(get_client_lib, 'application', 'hostname', 'output', 'language', 'service', 'build_system') get_discovery_doc = subparsers.add_parser( 'get_discovery_doc', help='Generates discovery documents from service classes') get_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback) AddStandardOptions(get_discovery_doc, 'application', 'format', 'hostname', 'output', 'service') get_openapi_spec = subparsers.add_parser( 'get_openapi_spec', help='Generates OpenAPI (Swagger) specs from service classes') get_openapi_spec.set_defaults(callback=_GenOpenApiSpecCallback) AddStandardOptions(get_openapi_spec, 'application', 'hostname', 'output', 'service') get_openapi_spec.add_argument('--x-google-api-name', action='store_true', help="Add the 'x-google-api-name' field to the generated spec") get_swagger_spec = subparsers.add_parser( 'get_swagger_spec', help='Generates OpenAPI (Swagger) specs from service classes') get_swagger_spec.set_defaults(callback=_GenOpenApiSpecCallback) AddStandardOptions(get_swagger_spec, 'application', 'hostname', 'output', 'service') gen_api_config = subparsers.add_parser('gen_api_config') gen_api_config.set_defaults(callback=_GenApiConfigCallback) AddStandardOptions(gen_api_config, 'application', 'hostname', 'output', 'service') gen_discovery_doc = subparsers.add_parser('gen_discovery_doc') gen_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback) AddStandardOptions(gen_discovery_doc, 'application', 'format', 'hostname', 'output', 'service') gen_client_lib = subparsers.add_parser('gen_client_lib') gen_client_lib.set_defaults(callback=_GenClientLibCallback) AddStandardOptions(gen_client_lib, 'output', 'language', 'discovery_doc', 'build_system') return parser
Create an argument parser. Args: prog: The name of the program to use when outputting help text. Returns: An argparse.ArgumentParser built to specification.
juraj-google-style
def tanh_shrink(x): if any_symbolic_tensors((x,)): return TanhShrink().symbolic_call(x) return backend.nn.tanh_shrink(x)
Applies the tanh shrink function element-wise. It is defined as: `f(x) = x - tanh(x)`. Args: x: Input tensor. Returns: Output tensor of the same shape as `x`, where each element is transformed according to the tanh shrink operation. Example: >>> x = np.array([ -1., 0., 1.]) >>> x_tanh_shrink = keras.ops.tanh_shrink(x) >>> print(x_tanh_shrink) array([-0.23840584 0. 0.23840584], shape=(3,), dtype=float64)
github-repos
def get_matcher(patterns, case_sensitive): if (not patterns): return (lambda name: True) if case_sensitive: return partial(match_any, patterns) else: return partial(imatch_any, patterns)
Get a callable that matches names against the given patterns. Arguments: patterns (list): A list of wildcard pattern. e.g. ``["*.py", "*.pyc"]`` case_sensitive (bool): If ``True``, then the callable will be case sensitive, otherwise it will be case insensitive. Returns: callable: a matcher that will return `True` if the name given as an argument matches any of the given patterns. Example: >>> from fs import wildcard >>> is_python = wildcard.get_matcher(['*.py'], True) >>> is_python('__init__.py') True >>> is_python('foo.txt') False
codesearchnet
def plot_conductivity_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None): import matplotlib.pyplot as plt cond = self._bz.get_conductivity(relaxation_time=relaxation_time, output=output, doping_levels=False)[temp] plt.figure(figsize=(9, 7)) plt.semilogy(self._bz.mu_steps, cond, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if (output == 'eig'): plt.legend(['$\\Sigma_1$', '$\\Sigma_2$', '$\\Sigma_3$']) if (xlim is None): plt.xlim((- 0.5), (self._bz.gap + 0.5)) else: plt.xlim(xlim) plt.ylim([(10000000000000.0 * relaxation_time), (1e+20 * relaxation_time)]) plt.ylabel('conductivity,\n $\\Sigma$ (1/($\\Omega$ m))', fontsize=30.0) plt.xlabel('E-E$_f$ (eV)', fontsize=30.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
Plot the conductivity in function of Fermi level. Semi-log plot Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) tau: A relaxation time in s. By default none and the plot is by units of relaxation time Returns: a matplotlib object
codesearchnet
def _get_task_with_policy(queue_name, task_id, owner): now = datetime.datetime.utcnow() task = WorkQueue.query.filter_by(queue_name=queue_name, task_id=task_id).with_lockmode('update').first() if (not task): raise TaskDoesNotExistError(('task_id=%r' % task_id)) lease_delta = (now - task.eta) if (lease_delta > datetime.timedelta(0)): db.session.rollback() raise LeaseExpiredError(('queue=%r, task_id=%r expired %s' % (task.queue_name, task_id, lease_delta))) if (task.last_owner != owner): db.session.rollback() raise NotOwnerError(('queue=%r, task_id=%r, owner=%r' % (task.queue_name, task_id, task.last_owner))) return task
Fetches the specified task and enforces ownership policy. Args: queue_name: Name of the queue the work item is on. task_id: ID of the task that is finished. owner: Who or what has the current lease on the task. Returns: The valid WorkQueue task that is currently owned. Raises: TaskDoesNotExistError if the task does not exist. LeaseExpiredError if the lease is no longer active. NotOwnerError if the specified owner no longer owns the task.
codesearchnet
def update(self, resource, timeout=-1): return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES)
Updates only name for the Artifact Bundle. Args: resource (dict): Object to update. timeout: Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in OneView, it just stops waiting for its completion. Returns: dict: Updated resource.
juraj-google-style
def _CountStoredAttributeContainers(self, container_type): if (not (container_type in self._CONTAINER_TYPES)): raise ValueError('Attribute container type {0:s} is not supported'.format(container_type)) if (not self._HasTable(container_type)): return 0 query = 'SELECT MAX(_ROWID_) FROM {0:s} LIMIT 1'.format(container_type) self._cursor.execute(query) row = self._cursor.fetchone() if (not row): return 0 return (row[0] or 0)
Counts the number of attribute containers of the given type. Args: container_type (str): attribute container type. Returns: int: number of attribute containers of the given type. Raises: ValueError: if an unsupported container_type is provided.
codesearchnet
def _autopacking_helper(list_or_tuple, dtype, name): if context.executing_eagerly(): if all((isinstance(elem, core.Tensor) for elem in list_or_tuple)): return gen_array_ops.pack(list_or_tuple, name=name) must_pack = False converted_elems = [] with ops.name_scope(name) as scope: for i, elem in enumerate(list_or_tuple): if isinstance(elem, core.Tensor): if dtype is not None and elem.dtype.base_dtype != dtype: raise TypeError(f'Cannot convert a list containing a tensor of dtype {elem.dtype} to {dtype} (Tensor is: {elem!r})') converted_elems.append(elem) must_pack = True elif isinstance(elem, (list, tuple)): converted_elem = _autopacking_helper(elem, dtype, str(i)) if isinstance(converted_elem, core.Tensor): must_pack = True converted_elems.append(converted_elem) else: converted_elems.append(elem) if must_pack: elems_as_tensors = [] for i, elem in enumerate(converted_elems): if isinstance(elem, core.Tensor): elems_as_tensors.append(elem) else: elems_as_tensors.append(constant_op.constant(elem, dtype=dtype, name=str(i))) return gen_array_ops.pack(elems_as_tensors, name=scope) else: return converted_elems
Converts the given list or tuple to a tensor by packing. Args: list_or_tuple: A (possibly nested) list or tuple containing a tensor. dtype: The element type of the returned tensor. name: A name for the returned tensor. Returns: A `tf.Tensor` with value equivalent to `list_or_tuple`.
github-repos
def _call_rpc(self, header): length, _, cmd, feature, address = struct.unpack("<BBBBB", bytes(header)) rpc_id = (feature << 8) | cmd payload = self.rpc_payload[:length] status = (1 << 6) try: response = self.device.call_rpc(address, rpc_id, bytes(payload)) if len(response) > 0: status |= (1 << 7) except (RPCInvalidIDError, RPCNotFoundError): status = 2 response = b'' except TileNotFoundError: status = 0xFF response = b'' except Exception: status = 3 response = b'' self._logger.exception("Exception raise while calling rpc, header=%s, payload=%s", header, payload) self._audit( "RPCReceived", rpc_id=rpc_id, address=address, payload=binascii.hexlify(payload), status=status, response=binascii.hexlify(response) ) resp_header = struct.pack("<BBBB", status, 0, 0, len(response)) if len(response) > 0: self._send_rpc_response( (ReceiveHeaderChar.value_handle, resp_header), (ReceivePayloadChar.value_handle, response) ) else: self._send_rpc_response((ReceiveHeaderChar.value_handle, resp_header))
Call an RPC given a header and possibly a previously sent payload It is executed in the baBLE working thread: should not be blocking. Args: header (bytearray): The RPC header we should call
juraj-google-style
def repeat(coro, times=1, step=1, limit=1, loop=None): assert_corofunction(coro=coro) times = max(int(times), 1) iterable = range(1, (times + 1), step) return (yield from map(coro, iterable, limit=limit, loop=loop))
Executes the coroutine function ``x`` number of times, and accumulates results in order as you would use with ``map``. Execution concurrency is configurable using ``limit`` param. This function is a coroutine. Arguments: coro (coroutinefunction): coroutine function to schedule. times (int): number of times to execute the coroutine. step (int): increment iteration step, as with ``range()``. limit (int): concurrency execution limit. Defaults to 10. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if coro is not a coroutine function. Returns: list: accumulated yielded values returned by coroutine. Usage:: async def mul_2(num): return num * 2 await paco.repeat(mul_2, times=5) # => [2, 4, 6, 8, 10]
codesearchnet
def handle_response(self, item_session: ItemSession) -> Actions: action = self.consult_response_hook(item_session) if (action == Actions.RETRY): item_session.set_status(Status.error) elif (action == Actions.FINISH): item_session.set_status(Status.done) elif (action == Actions.STOP): raise HookStop('Script requested immediate stop.') return action
Generic handler for a response. Returns: A value from :class:`.hook.Actions`.
codesearchnet
def on_train_batch_end(self, batch, logs=None): self.on_batch_end(batch, logs=logs)
Called at the end of a training batch in `fit` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `tf.keras.Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.
github-repos
def setOption(self, name, value): if isinstance(value, bool): lock_and_call((lambda : self._impl.setBoolOption(name, value)), self._lock) elif isinstance(value, int): lock_and_call((lambda : self._impl.setIntOption(name, value)), self._lock) elif isinstance(value, float): lock_and_call((lambda : self._impl.setDblOption(name, value)), self._lock) elif isinstance(value, basestring): lock_and_call((lambda : self._impl.setOption(name, value)), self._lock) else: raise TypeError
Set an AMPL option to a specified value. Args: name: Name of the option to be set (alphanumeric without spaces). value: The value the option must be set to. Raises: InvalidArgumet: if the option name is not valid. TypeError: if the value has an invalid type.
codesearchnet
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): if timeout is None: timeout = int(os.environ.get('PYTEST_TIMEOUT', 600)) start_methohd = 'spawn' ctx = multiprocessing.get_context(start_methohd) input_queue = ctx.Queue(1) output_queue = ctx.JoinableQueue(1) input_queue.put(inputs, timeout=timeout) process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) process.start() try: results = output_queue.get(timeout=timeout) output_queue.task_done() except Exception as e: process.terminate() test_case.fail(e) process.join(timeout=timeout) if results['error'] is not None: test_case.fail(f'{results['error']}')
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. Args: test_case (`unittest.TestCase`): The test that will run `target_func`. target_func (`Callable`): The function implementing the actual testing logic. inputs (`dict`, *optional*, defaults to `None`): The inputs that will be passed to `target_func` through an (input) queue. timeout (`int`, *optional*, defaults to `None`): The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
github-repos
def trace_format(self): cmd = enums.JLinkTraceCommand.GET_FORMAT data = ctypes.c_uint32(0) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to get trace format.') return data.value
Retrieves the current format the trace buffer is using. Args: self (JLink): the ``JLink`` instance. Returns: The current format the trace buffer is using. This is one of the attributes of ``JLinkTraceFormat``.
codesearchnet
def dumps(xs, model=None, properties=False, indent=True, **kwargs): xs = list(xs) if not xs: return '' given_class = xs[0].__class__ if model is None: model = xs[0].__class__ if not hasattr(model, 'to_triples'): raise TypeError( '{} class does not implement to_triples()'.format(model.__name__) ) if given_class.__name__ in ('Mrs', 'Xmrs'): xs = [model.from_xmrs(x, **kwargs) for x in xs] elif given_class.__name__ == 'Eds' and model.__name__ != 'Eds': raise ValueError('Cannot convert EDS to non-EDS') codec = XMRSCodec() graphs = [ codec.triples_to_graph(model.to_triples(x, properties=properties)) for x in xs ] if 'pretty_print' in kwargs: indent = kwargs['pretty_print'] return penman.dumps(graphs, cls=XMRSCodec, indent=indent)
Serialize Xmrs (or subclass) objects to PENMAN notation Args: xs: iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to serialize model: Xmrs subclass used to get triples properties: if `True`, encode variable properties indent: if `True`, adaptively indent; if `False` or `None`, don't indent; if a non-negative integer N, indent N spaces per level Returns: the PENMAN serialization of *xs*
juraj-google-style
def dispatch_callback(self, items): if (not self._manager.is_active): return batched_commands = collections.defaultdict(list) for item in items: batched_commands[item.__class__].append(item) _LOGGER.debug('Handling %d batched requests', len(items)) if batched_commands[requests.LeaseRequest]: self.lease(batched_commands.pop(requests.LeaseRequest)) if batched_commands[requests.ModAckRequest]: self.modify_ack_deadline(batched_commands.pop(requests.ModAckRequest)) if batched_commands[requests.AckRequest]: self.ack(batched_commands.pop(requests.AckRequest)) if batched_commands[requests.NackRequest]: self.nack(batched_commands.pop(requests.NackRequest)) if batched_commands[requests.DropRequest]: self.drop(batched_commands.pop(requests.DropRequest))
Map the callback request to the appropriate gRPC request. Args: action (str): The method to be invoked. kwargs (Dict[str, Any]): The keyword arguments for the method specified by ``action``. Raises: ValueError: If ``action`` isn't one of the expected actions "ack", "drop", "lease", "modify_ack_deadline" or "nack".
codesearchnet
def from_hoy(cls, hoy, leap_year=False): return cls.from_moy(round(hoy * 60), leap_year)
Create Ladybug Datetime from an hour of the year. Args: hoy: A float value 0 <= and < 8760
juraj-google-style
def log(self, level, msg, *args, **kwargs): if (level >= logging.FATAL): extra = kwargs.setdefault('extra', {}) extra[_ABSL_LOG_FATAL] = True super(ABSLLogger, self).log(level, msg, *args, **kwargs)
Logs a message at a cetain level substituting in the supplied arguments. This method behaves differently in python and c++ modes. Args: level: int, the standard logging level at which to log the message. msg: str, the text of the message to log. *args: The arguments to substitute in the message. **kwargs: The keyword arguments to substitute in the message.
codesearchnet
class PatchTSMixerEncoderOutput(ModelOutput): last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
Base class for `PatchTSMixerEncoderOutput`, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`): Hidden-state at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer.
github-repos
def _from_compatible_tensor_list(self, tensor_list: List['core_types.Symbol']) -> Any: return self._from_components(nest.pack_sequence_as(self._component_specs, tensor_list, expand_composites=True))
Reconstructs a value from a compatible flat list of `tf.Tensor`. Args: tensor_list: A flat list of `tf.Tensor`, compatible with `self._flat_tensor_specs`. (Caller is responsible for ensuring compatibility.) Returns: A value that is compatible with this `TypeSpec`.
github-repos
def median(series): if np.issubdtype(series.dtype, np.number): return series.median() else: return np.nan
Returns the median value of a series. Args: series (pandas.Series): column to summarize.
juraj-google-style
def _AddAttribute(self, attribute): if attribute.identifier in self._attributes: raise KeyError(( 'Volume attribute object already set for volume attribute ' 'identifier: {0:s}.').format(attribute.identifier)) self._attributes[attribute.identifier] = attribute
Adds an attribute. Args: attribute (VolumeAttribute): a volume attribute. Raises: KeyError: if volume attribute is already set for the corresponding volume attribute identifier.
juraj-google-style
def Close(self): if self._connection: self._cursor = None self._connection.close() self._connection = None try: os.remove(self._temp_file_path) except (IOError, OSError): pass self._temp_file_path = ''
Closes the database file object. Raises: IOError: if the close failed. OSError: if the close failed.
codesearchnet
def __init__(self, lang='en', lower=True, charset=None): super(CharTokenizer, self).__init__(lang, lower) self.charset = charset
Encodes text into `(samples, characters)` Args: lang: The spacy language to use. (Default value: 'en') lower: Lower cases the tokens if True. (Default value: True) charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used. (Default value: None)
juraj-google-style
def libravatar_url(email=None, openid=None, size=64, default='retro'): params = collections.OrderedDict([('s', size), ('d', default)]) query = parse.urlencode(params) if email: value = email elif openid: value = openid else: raise ValueError('You must provide either the email or the openid.') idhash = sha256(value.encode('utf-8')).hexdigest() return ('https:
Get the URL to an avatar from libravatar. Either the user's email or openid must be provided. If you want to use Libravatar federation (through DNS), you should install and use the ``libravatar`` library instead. Check out the ``libravatar.libravatar_url()`` function. Args: email (str): The user's email openid (str): The user's OpenID size (int): Size of the avatar in pixels (it's a square). default (str): Default avatar to return if not found. Returns: str: The URL to the avatar image. Raises: ValueError: If neither email nor openid are provided.
codesearchnet
def os_deployment_servers(self): if (not self.__os_deployment_servers): self.__os_deployment_servers = OsDeploymentServers(self.__connection) return self.__os_deployment_servers
Gets the Os Deployment Servers API client. Returns: OsDeploymentServers:
codesearchnet
class StandardInputStep(Step): def __init__(self, dataset_fn, distribution): super(StandardInputStep, self).__init__(distribution) self._iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn()) def initialize(self): return self._iterator.initializer
Step with a standard implementation of input handling. Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model.
github-repos
def create_webdriver(self, testname=None): try: driver_type = self._config_reader.get(self.DRIVER_TYPE_CONFIG) except: driver_type = self.DRIVER_TYPE_LOCAL _wtflog.warn('%s setting is missing from config. Using default setting, %s', self.DRIVER_TYPE_CONFIG, driver_type) if (driver_type == self.DRIVER_TYPE_REMOTE): self.webdriver = self.__create_remote_webdriver_from_config(testname=testname) else: self.webdriver = self.__create_driver_from_browser_config() try: self.webdriver.maximize_window() except: time.sleep(self._timeout_mgr.BRIEF) try: self.webdriver.maximize_window() except Exception as e: if (isinstance(e, WebDriverException) and ('implemented' in e.msg.lower())): pass else: _wtflog.warn(('Unable to maxmize browser window. ' + 'It may be possible the browser did not instantiate correctly. % s'), e) return self.webdriver
Creates an instance of Selenium webdriver based on config settings. This should only be called by a shutdown hook. Do not call directly within a test. Kwargs: testname: Optional test name to pass, this gets appended to the test name sent to selenium grid. Returns: WebDriver - Selenium Webdriver instance.
codesearchnet
def remove_empty_text(utterances: List[Utterance]) -> List[Utterance]: return [utter for utter in utterances if utter.text.strip() != ""]
Remove empty utterances from a list of utterances Args: utterances: The list of utterance we are processing
juraj-google-style
def enable_inheritance(path, objectType, clear=False): dc = daclConstants() objectType = dc.getObjectTypeBit(objectType) path = dc.processPath(path, objectType) return _set_dacl_inheritance(path, objectType, True, None, clear)
enable/disable inheritance on an object Args: path: The path to the object objectType: The type of object (FILE, DIRECTORY, REGISTRY) clear: True will remove non-Inherited ACEs from the ACL Returns (dict): A dictionary containing the results CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.enable_inheritance c:\temp directory
juraj-google-style
def add_signature(name=None, inputs=None, outputs=None): if (not name): name = 'default' if (inputs is None): inputs = {} if (outputs is None): outputs = {} if (not isinstance(inputs, dict)): inputs = {'default': inputs} if (not isinstance(outputs, dict)): outputs = {'default': outputs} message = find_signature_inputs_from_multivalued_ops(inputs) if message: logging.error(message) message = find_signature_input_colocation_error(name, inputs) if message: raise ValueError(message) saved_model_lib.add_signature(name, inputs, outputs)
Adds a signature to the module definition. NOTE: This must be called within a `module_fn` that is defining a Module. Args: name: Signature name as a string. If omitted, it is interpreted as 'default' and is the signature used when `Module.__call__` `signature` is not specified. inputs: A dict from input name to Tensor or SparseTensor to feed when applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. outputs: A dict from output name to Tensor or SparseTensor to return from applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. Raises: ValueError: if the arguments are invalid.
codesearchnet
class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor): def __init__(self, begin_suppress_tokens, begin_index): self.begin_suppress_tokens = list(begin_suppress_tokens) self.begin_index = begin_index def __call__(self, input_ids, scores, cur_len: int): apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index) scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float('inf')), scores) return scores
[`FlaxLogitsProcessor`] suppressing a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the beginning of the generation. Args: begin_suppress_tokens (`List[int]`): Tokens to not sample. begin_index (`int`): Index where the tokens are suppressed.
github-repos
def Scripts(unicode_dir=_UNICODE_DIR): scripts = {} def DoLine(codes, fields): (_, name) = fields scripts.setdefault(name, []).extend(codes) ReadUnicodeTable(unicode_dir+"/Scripts.txt", 2, DoLine) return scripts
Returns dict mapping script names to code lists. Args: unicode_dir: Unicode data directory Returns: dict mapping script names to code lists
juraj-google-style
def with_accounted_types(self, account_type_regexes): self._options['account_type_regexes'] = copy.copy(account_type_regexes) return self
Selectively counting statistics based on node types. Here, 'types' means the profiler nodes' properties. Profiler by default consider device name (e.g. /job:xx/.../device:GPU:0) and operation type (e.g. MatMul) as profiler nodes' properties. User can also associate customized 'types' to profiler nodes through OpLogProto proto. For example, user can select profiler nodes placed on gpu:0 with: `account_type_regexes=['.*gpu:0.*']` If none of a node's properties match the specified regexes, the node is not displayed nor accounted. Args: account_type_regexes: A list of regexes specifying the types. Returns: self.
github-repos
def list_container_instance_groups(access_token, subscription_id, resource_group): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
List the container groups in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON list of container groups and their properties.
codesearchnet
def data_vectors(self): return {field: self.record[field] for field in self.record.dtype.names if (field != 'sample')}
The per-sample data in a vector. Returns: dict: A dict where the keys are the fields in the record and the values are the corresponding arrays. Examples: >>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN, energy=[-1, 1]) >>> sampleset.data_vectors['energy'] array([-1, 1]) Note that this is equivalent to, and less performant than: >>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN, energy=[-1, 1]) >>> sampleset.record['energy'] array([-1, 1])
codesearchnet
def download_data_impl(self, run, tag, response_format): scalars_plugin_instance = self._get_scalars_plugin() if not scalars_plugin_instance: raise ValueError(('Failed to respond to request for /download_data. ' 'The scalars plugin is oddly not registered.')) body, mime_type = scalars_plugin_instance.scalars_impl( tag, run, None, response_format) return body, mime_type
Provides a response for downloading scalars data for a data series. Args: run: The run. tag: The specific tag. response_format: A string. One of the values of the OutputFormat enum of the scalar plugin. Raises: ValueError: If the scalars plugin is not registered. Returns: 2 entities: - A JSON object response body. - A mime type (string) for the response.
juraj-google-style
def log(msg, level=0): red = '\033[91m' endc = '\033[0m' cfg = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'stdout': { 'format': '[%(levelname)s]: %(asctime)s - %(message)s', 'datefmt': '%x %X' }, 'stderr': { 'format': red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc, 'datefmt': '%x %X' } }, 'handlers': { 'stdout': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'stdout' }, 'stderr': { 'class': 'logging.StreamHandler', 'level': 'ERROR', 'formatter': 'stderr' } }, 'loggers': { 'info': { 'handlers': ['stdout'], 'level': 'INFO', 'propagate': True }, 'error': { 'handlers': ['stderr'], 'level': 'ERROR', 'propagate': False } } } dictConfig(cfg) lg = 'info' if level == 0 else 'error' lvl = 20 if level == 0 else 40 logger = logging.getLogger(lg) logger.log(lvl, msg)
Logs a message to the console, with optional level paramater Args: - msg (str): message to send to console - level (int): log level; 0 for info, 1 for error (default = 0)
juraj-google-style
def _get_ngrams(segment, max_order): ngram_counts = collections.Counter() for order in range(1, max_order + 1): for i in range(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams up to max_order in segment with a count of how many times each n-gram occurred.
juraj-google-style
def append(self, species, coords, validate_proximity=True, properties=None): return self.insert(len(self), species, coords, validate_proximity=validate_proximity, properties=properties)
Appends a site to the molecule. Args: species: Species of inserted site coords: Coordinates of inserted site validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to True. properties (dict): A dict of properties for the Site. Returns: New molecule with inserted site.
codesearchnet