code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def dump(self, conf_file=None): if conf_file: conf_dir = os.path.dirname(conf_file) if not conf_dir: conf_dir = self.__invoke_dir elif not os.path.exists(conf_dir): os.makedirs(conf_dir) else: conf_dir = self.__conf_dir final_conf = {} for key, value in list(self.__config.items()): if key in self.__cli: continue final_conf[key] = value for key, value in list(self.__cli.items()): if key.endswith('index') or key in ['sitemap', 'output']: path = self.__abspath(value, from_conf=False) if path: relpath = os.path.relpath(path, conf_dir) final_conf[key] = relpath elif key.endswith('sources') or key.endswith('source_filters'): new_list = [] for path in value: path = self.__abspath(path, from_conf=False) if path: relpath = os.path.relpath(path, conf_dir) new_list.append(relpath) final_conf[key] = new_list elif key not in ['command', 'output_conf_file']: final_conf[key] = value with open(conf_file or self.conf_file or 'hotdoc.json', 'w') as _: _.write(json.dumps(final_conf, sort_keys=True, indent=4))
Dump the possibly updated config to a file. Args: conf_file: str, the destination, or None to overwrite the existing configuration.
juraj-google-style
def update_version_in_examples(version: str): for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): if 'legacy' in directories: directories.remove('legacy') for fname in fnames: if fname.endswith('.py'): update_version_in_file(os.path.join(folder, fname), version, file_type='examples')
Update the version in all examples files. Args: version (`str`): The new version to set in the examples.
github-repos
def scope(self): return super(OneDeviceStrategy, self).scope()
Returns a context manager selecting this Strategy as current. Inside a `with strategy.scope():` code block, this thread will use a variable creator set by `strategy`, and will enter its "cross-replica context". In `OneDeviceStrategy`, all variables created inside `strategy.scope()` will be on `device` specified at strategy construction time. See example in the docs for this class. Returns: A context manager to use for creating variables with this strategy.
github-repos
def to_binary(self, copy=False): if self.vartype is Vartype.BINARY: if copy: return self.copy() else: return self new = BinaryPolynomial({}, Vartype.BINARY) for term, bias in self.items(): for t in map(frozenset, powerset(term)): newbias = bias * 2**len(t) * (-1)**(len(term) - len(t)) if t in new: new[t] += newbias else: new[t] = newbias return new
Return a binary polynomial over `{0, 1}` variables. Args: copy (optional, default=False): If True, the returned polynomial is always a copy. Otherwise, if the polynomial is binary-valued already it returns itself. Returns: :obj:`.BinaryPolynomial`
juraj-google-style
def deserialize(name, custom_objects=None): return deserialize_keras_object(name, module_objects=globals(), custom_objects=custom_objects, printable_module_name='loss function')
Deserializes a serialized loss class/function instance. Args: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras `Loss` instance or a loss function.
github-repos
def connections(self, origin, destination, dt=datetime.now(), only_direct=False): query = { 'S': origin, 'Z': destination, 'date': dt.strftime("%d.%m.%y"), 'time': dt.strftime("%H:%M"), 'start': 1, 'REQ0JourneyProduct_opt0': 1 if only_direct else 0 } rsp = requests.get('http: return parse_connections(rsp.text)
Find connections between two stations Args: origin (str): origin station destination (str): destination station dt (datetime): date and time for query only_direct (bool): only direct connections
juraj-google-style
def join(input_layer, others, include_self=True, join_function=None): if include_self: list_of_tensors = [input_layer] list_of_tensors.extend(others) else: list_of_tensors = others return prettytensor.join_pretty_tensors(list_of_tensors, input_layer, join_function)
Joins the provided PrettyTensors with this using the join function. Args: input_layer: The input layer for this op. others: Sequence of PrettyTensor objects. include_self: Whether or not this includes itself or if the value is only derived from others. join_function: The function to use for joining, must accept a list of tensors. Use None for concat on the final dimension. Returns: self.
juraj-google-style
def to_representation(self, instance): request = self.context['request'] enterprise_customer = instance.enterprise_customer representation = super(EnterpriseCustomerCatalogDetailSerializer, self).to_representation(instance) paginated_content = instance.get_paginated_content(request.GET) count = paginated_content['count'] search_results = paginated_content['results'] for item in search_results: content_type = item['content_type'] marketing_url = item.get('marketing_url') if marketing_url: item['marketing_url'] = utils.update_query_parameters(marketing_url, utils.get_enterprise_utm_context(enterprise_customer)) if (content_type == 'course'): item['enrollment_url'] = instance.get_course_enrollment_url(item['key']) if (content_type == 'courserun'): item['enrollment_url'] = instance.get_course_run_enrollment_url(item['key']) if (content_type == 'program'): item['enrollment_url'] = instance.get_program_enrollment_url(item['uuid']) previous_url = None next_url = None page = int(request.GET.get('page', '1')) request_uri = request.build_absolute_uri() if paginated_content['previous']: previous_url = utils.update_query_parameters(request_uri, {'page': (page - 1)}) if paginated_content['next']: next_url = utils.update_query_parameters(request_uri, {'page': (page + 1)}) representation['count'] = count representation['previous'] = previous_url representation['next'] = next_url representation['results'] = search_results return representation
Serialize the EnterpriseCustomerCatalog object. Arguments: instance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize. Returns: dict: The EnterpriseCustomerCatalog converted to a dict.
codesearchnet
def __init__(self, executable): self.long_name = executable self.name = os.path.basename(executable) (self.short_name, self.ext) = os.path.splitext(self.name) self.executable = GetRealPath(executable) self.output = [] self.desc = [] self.modules = {} self.module_list = [] self.date = time.localtime(time.time())
Create object with executable. Args: executable Program to execute (string)
juraj-google-style
def IsCppString(line): line = line.replace('\\\\', 'XX') return ((((line.count('"') - line.count('\\"')) - line.count('\'"\'')) & 1) == 1)
Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant.
codesearchnet
def set_reboot_required_witnessed(): errcode = (- 1) dir_path = os.path.dirname(NILRT_REBOOT_WITNESS_PATH) if (not os.path.exists(dir_path)): try: os.makedirs(dir_path) except OSError as ex: raise SaltInvocationError('Error creating {0} (-{1}): {2}'.format(dir_path, ex.errno, ex.strerror)) rdict = __salt__['cmd.run_all']('touch {0}'.format(NILRT_REBOOT_WITNESS_PATH)) errcode = rdict['retcode'] return (errcode == 0)
This function is used to remember that an event indicating that a reboot is required was witnessed. This function writes to a temporary filesystem so the event gets cleared upon reboot. Returns: bool: ``True`` if successful, otherwise ``False`` .. code-block:: bash salt '*' system.set_reboot_required_witnessed
codesearchnet
def _parse_signed_int_components(buf): sign_bit = 0 value = 0 first = True while True: ch = buf.read(1) if (ch == b''): break octet = ord(ch) if first: if (octet & _SIGNED_INT_SIGN_MASK): sign_bit = 1 value = (octet & _SIGNED_INT_SIGN_VALUE_MASK) first = False else: value <<= 8 value |= octet return (sign_bit, value)
Parses the remainder of a file-like object as a signed magnitude value. Returns: Returns a pair of the sign bit and the unsigned magnitude.
codesearchnet
def app(environ, start_response): from wsgi import container container.bind('Environ', environ) try: for provider in container.make('WSGIProviders'): container.resolve(provider.boot) except Exception as e: container.make('ExceptionHandler').load_exception(e) start_response(container.make('Request').get_status_code(), container.make('Request').get_and_reset_headers()) return iter([bytes(container.make('Response'), 'utf-8')])
The WSGI Application Server. Arguments: environ {dict} -- The WSGI environ dictionary start_response {WSGI callable} Returns: WSGI Response
juraj-google-style
def apply_channel_shift(x, intensity, channel_axis=0): x = np.rollaxis(x, channel_axis, 0) min_x, max_x = (np.min(x), np.max(x)) channel_images = [np.clip(x_channel + intensity, min_x, max_x) for x_channel in x] x = np.stack(channel_images, axis=0) x = np.rollaxis(x, 0, channel_axis + 1) return x
Performs a channel shift. DEPRECATED. Args: x: Input tensor. Must be 3D. intensity: Transformation intensity. channel_axis: Index of axis for channels in the input tensor. Returns: Numpy image tensor.
github-repos
async def addNodes(self, nodedefs): async with await self.snap() as snap: snap.strict = False async for node in snap.addNodes(nodedefs): yield node
Quickly add/modify a list of nodes from node definition tuples. This API is the simplest/fastest way to add nodes, set node props, and add tags to nodes remotely. Args: nodedefs (list): A list of node definition tuples. See below. A node definition tuple is defined as: ( (form, valu), {'props':{}, 'tags':{}) The "props" or "tags" keys may be omitted.
juraj-google-style
def __getitem__(self, key: Union[Tuple[int, int], Tuple[str, str], Tuple[Node, Node]]) -> Optional[Edge]: if isinstance(key[0], Node) and isinstance(key[1], Node): return self.get_edge(key[0], key[1]) elif isinstance(key[0], int) and isinstance(key[1], int): return self.get_edge_by_index(key[0], key[1]) elif isinstance(key[0], str) and isinstance(key[1], str): return self.get_edge_by_name(key[0], key[1]) raise ValueError("Invalid edge key: {}".format(key))
Returns the edge corresponding to the given key. If the given key is a tuple of nodes or node indexes, then the edge connecting the two nodes will be returned if such an edge exists. If the given key is a tuple of node names, then the edge connecting the corresponding nodes will be returned if such an edge exists. Arguments: key (Union[Tuple[int, int], Tuple[str, str], Tuple[Node, Node]]): The key identifying the edge to return.
juraj-google-style
def epoch_to_human_time(epoch_time): if isinstance(epoch_time, int): try: d = datetime.datetime.fromtimestamp((epoch_time / 1000)) return d.strftime('%m-%d-%Y %H:%M:%S ') except ValueError: return None
Converts an epoch timestamp to human readable time. This essentially converts an output of get_current_epoch_time to an output of get_current_human_time Args: epoch_time: An integer representing an epoch timestamp in milliseconds. Returns: A time string representing the input time. None if input param is invalid.
codesearchnet
def pad(boxes, top, left, height=None, width=None, bounding_box_format='xyxy'): if bounding_box_format != 'xyxy': raise NotImplementedError box_utils = BoundingBox() if backend_utils.in_tf_graph(): box_utils.backend.set_backend('tensorflow') outputs = box_utils.pad(boxes, top, left) box_utils.backend.reset() return outputs
Pads bounding boxes by adding top and left offsets. This function adds padding to the bounding boxes by increasing the 'top' and 'left' coordinates by the specified amounts. The method assume the input bounding_box_format is `xyxy`. Args: boxes: Bounding boxes to pad. Shape `(N, 4)` or `(batch, N, 4)`. top: Vertical padding to add. left: Horizontal padding to add. height: Image height. Defaults to None. width: Image width. Defaults to None. bounding_box_format: The format of the input bounding boxes. Defaults to `"xyxy"`. Returns: Padded bounding boxes in the original format.
github-repos
def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False): if (mkdir and (not os.path.exists(path))): os.mkdir(path, (mode or 511)) elif mode: os.chmod(path, mode) if (uid and gid): os.chown(path, uid, gid) _SetSELinuxContext(path)
Set the permissions and ownership of a path. Args: path: string, the path for which owner ID and group ID needs to be setup. mode: octal string, the permissions to set on the path. uid: int, the owner ID to be set for the path. gid: int, the group ID to be set for the path. mkdir: bool, True if the directory needs to be created.
codesearchnet
def __closely_associated_score(self, normalized_sentences, top_n_words): scores_list = [] sentence_idx = (- 1) for sentence in normalized_sentences: self.tokenize(sentence) sentence = self.token sentence_idx += 1 word_idx = [] for w in top_n_words: try: word_idx.append(sentence.index(w)) except ValueError: pass word_idx.sort() if (len(word_idx) == 0): continue clusters = [] cluster = [word_idx[0]] i = 1 while (i < len(word_idx)): if ((word_idx[i] - word_idx[(i - 1)]) < self.cluster_threshold): cluster.append(word_idx[i]) else: clusters.append(cluster[:]) cluster = [word_idx[i]] i += 1 clusters.append(cluster) max_cluster_score = 0 for c in clusters: significant_words_in_cluster = len(c) total_words_in_cluster = ((c[(- 1)] - c[0]) + 1) score = (((1.0 * significant_words_in_cluster) * significant_words_in_cluster) / total_words_in_cluster) if (score > max_cluster_score): max_cluster_score = score scores_list.append((sentence_idx, score)) return scores_list
Scoring the sentence with closely associations. Args: normalized_sentences: The list of sentences. top_n_words: Important sentences. Returns: The list of scores.
codesearchnet
def quality(self, tests, alias=None): alias = alias or {} alias = alias.get('striplog', alias.get('Striplog', [])) this_tests =\ tests.get('all', [])+tests.get('All', [])+tests.get('ALL', [])\ + tests.get('striplog', tests.get('Striplog', []))\ + utils.flatten_list([tests.get(a) for a in alias]) this_tests = filter(None, this_tests) if not tests.get('striplog', tests.get('Striplog', 1)): this_tests = [] return {test.__name__: test(self) for test in this_tests}
Run a series of tests and return the corresponding results. Based on curve testing for ``welly``. Args: tests (list): a list of functions. Returns: list. The results. Stick to booleans (True = pass) or ints.
juraj-google-style
def pre_finalize(self, init_result, writer_results): raise NotImplementedError
Pre-finalization stage for sink. Called after all bundle writes are complete and before finalize_write. Used to setup and verify filesystem and sink states. Args: init_result: the result of ``initialize_write()`` invocation. writer_results: an iterable containing results of ``Writer.close()`` invocations. This will only contain results of successful writes, and will only contain the result of a single successful write for a given bundle. Returns: An object that contains any sink specific state generated. This object will be passed to finalize_write().
github-repos
def _GetInstanceAndProjectAttributes(self, metadata_dict): metadata_dict = metadata_dict or {} try: instance_data = metadata_dict['instance']['attributes'] except KeyError: instance_data = {} self.logger.warning('Instance attributes were not found.') try: project_data = metadata_dict['project']['attributes'] except KeyError: project_data = {} self.logger.warning('Project attributes were not found.') return instance_data, project_data
Get dictionaries for instance and project attributes. Args: metadata_dict: json, the deserialized contents of the metadata server. Returns: tuple, two dictionaries for instance and project attributes.
juraj-google-style
def _process_has_substring_filter_directive(filter_operation_info, location, context, parameters): filtered_field_type = filter_operation_info.field_type filtered_field_name = filter_operation_info.field_name if (not strip_non_null_from_type(filtered_field_type).is_same_type(GraphQLString)): raise GraphQLCompilationError(u'Cannot apply "has_substring" to non-string type {}'.format(filtered_field_type)) argument_inferred_type = GraphQLString (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type) filter_predicate = expressions.BinaryComposition(u'has_substring', expressions.LocalField(filtered_field_name), argument_expression) if (non_existence_expression is not None): filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate) return blocks.Filter(filter_predicate)
Return a Filter basic block that checks if the directive arg is a substring of the field. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! parameters: list of 1 element, specifying the collection in which the value must exist; if the collection is optional and missing, the check will return True Returns: a Filter basic block that performs the substring check
codesearchnet
def load_ipython_extension(shell): def _request(self, uri, method="GET", body=None, headers=None, redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): if headers is None: headers = {} headers['user-agent'] = 'GoogleCloudDataLab/1.0' return _orig_request(self, uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type) _httplib2.Http.request = _request def _init_session(self): _orig_init(self) self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0' _requests.Session.__init__ = _init_session def _run_line_magic(self, magic_name, line): fn = self.find_line_magic(magic_name) if fn is None: cm = self.find_cell_magic(magic_name) if cm: return _run_cell_magic(self, magic_name, line, None) return _orig_run_line_magic(self, magic_name, line) def _run_cell_magic(self, magic_name, line, cell): if cell is None or len(cell) == 0 or cell.isspace(): fn = self.find_line_magic(magic_name) if fn: return _orig_run_line_magic(self, magic_name, line) cell = None return _orig_run_cell_magic(self, magic_name, line, cell) _shell.InteractiveShell.run_cell_magic = _run_cell_magic _shell.InteractiveShell.run_line_magic = _run_line_magic def _get_project_id(): try: return google.datalab.Context.default().project_id except Exception: return None def _set_project_id(project_id): context = google.datalab.Context.default() context.set_project_id(project_id) try: from datalab.context import Context as _old_context _old_context.default().set_project_id(project_id) except ImportError: pass try: if 'datalab_project_id' not in _IPython.get_ipython().user_ns: _IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id _IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id except TypeError: pass
Called when the extension is loaded. Args: shell - (NotebookWebApplication): handle to the Notebook interactive shell instance.
juraj-google-style
def copy_numbered_block(self): raw_block = self.copy_raw_block() raw_block.insert(0, range(self.start[1], self.end[1])) return raw_block
Copies the block as it was originally specified by start and end into a new table. Additionally inserts the original table indices in the first row of the block. Returns: A copy of the block with no block transformations.
codesearchnet
def run_step(context): logger.debug('started') context.assert_child_key_has_value('fileWriteJson', 'path', __name__) out_path = context.get_formatted_string(context['fileWriteJson']['path']) is_payload_specified = ('payload' in context['fileWriteJson']) logger.debug(f'opening destination file for writing: {out_path}') os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True) with open(out_path, 'w') as outfile: if is_payload_specified: payload = context['fileWriteJson']['payload'] formatted_iterable = context.get_formatted_iterable(payload) else: formatted_iterable = context.get_formatted_iterable(context) json.dump(formatted_iterable, outfile, indent=2, ensure_ascii=False) logger.info(f'formatted context content and wrote to {out_path}') logger.debug('done')
Write payload out to json file. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileWriteJson - path. mandatory. path-like. Write output file to here. Will create directories in path for you. - payload. optional. Write this key to output file. If not specified, output entire context. Returns: None. Raises: pypyr.errors.KeyNotInContextError: fileWriteJson or fileWriteJson['path'] missing in context. pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or fileWriteJson['path'] exists but is None.
codesearchnet
def convert_predict_response(pred, serving_bundle): output = pred.outputs[serving_bundle.predict_output_tensor] raw_output = output.float_val if (serving_bundle.model_type == 'classification'): values = [] for example_index in range(output.tensor_shape.dim[0].size): start = (example_index * output.tensor_shape.dim[1].size) values.append(raw_output[start:(start + output.tensor_shape.dim[1].size)]) else: values = raw_output return convert_prediction_values(values, serving_bundle, pred.model_spec)
Converts a PredictResponse to ClassificationResponse or RegressionResponse. Args: pred: PredictResponse to convert. serving_bundle: A `ServingBundle` object that contains the information about the serving request that the response was generated by. Returns: A ClassificationResponse or RegressionResponse.
codesearchnet
def update(self,identity,params=None, headers=None): path = self._sub_url_params('/payments/:identity', { 'identity': identity, }) if params is not None: params = {self._envelope_key(): params} response = self._perform_request('PUT', path, params, headers, retry_failures=True) return self._resource_for(response)
Update a payment. Updates a payment object. This accepts only the metadata parameter. Args: identity (string): Unique identifier, beginning with "PM". params (dict, optional): Request body. Returns: ListResponse of Payment instances
juraj-google-style
def as_text(content: ProcessorContentTypes, *, strict: bool=False, substream_name: str | None=None) -> str: text_parts = [] for mime, part in ProcessorContent(content).items(): if substream_name is not None and part.substream_name != substream_name: continue if is_text(mime): text_parts.append(part.text) elif strict: raise ValueError(f'Unsupported content type {mime}.') return ''.join(text_parts)
Returns a text representation of the content. The returned text is a concatenation of all text parts in the content. Args: content: The content to process. This can be of various types as defined by `ProcessorContentTypes`. strict: If True, unsupported content types will raise a ValueError. Otherwise, they will be ignored. substream_name: If set, only text parts with the given substream name will be returned.
github-repos
def check_num(self, checks, radl): prefixes = {} for f in self.features: if (not isinstance(f, Feature)): continue (prefix, sep, tail) = f.prop.partition('.') if ((not sep) or (prefix not in checks)): continue checks0 = checks[prefix] (num, sep, suffix) = tail.partition('.') try: num = int(num) except: raise RADLParseException('Invalid property name; expected an index.', line=f.line) if ((not sep) or (suffix not in checks0)): continue f._check(checks0[suffix], radl) if (prefix not in prefixes): prefixes[prefix] = set() prefixes[prefix].add(num) for (prefix, nums) in prefixes.items(): if ((min(nums) != 0) or (max(nums) != (len(nums) - 1))): raise RADLParseException(("Invalid indices values in properties '%s'" % prefix)) return prefixes
Check types, operators and units in features with numbers. Args: - checks(dict of dict of str:tuples): keys are property name prefixes, and the values are dict with keys are property name suffixes and values are iterable as in ``_check_feature``. - radl: passed to ``_check_feature``.
codesearchnet
def _to_backend_mesh(device_mesh): shape = device_mesh.devices.shape devices = [_to_backend_device(d) for d in device_mesh.devices.flatten()] devices = np.array(devices).reshape(shape) return jax.sharding.Mesh(devices, device_mesh.axis_names)
Convert the DeviceMesh to JAX backend specific Mesh. Args: device_mesh: DeviceMesh instance to convert. Returns: A `jax.sharding.Mesh` instance.
github-repos
def convert_predict_response(pred, serving_bundle): output = pred.outputs[serving_bundle.predict_output_tensor] raw_output = output.float_val if serving_bundle.model_type == 'classification': values = [] for example_index in range(output.tensor_shape.dim[0].size): start = example_index * output.tensor_shape.dim[1].size values.append(raw_output[start:start + output.tensor_shape.dim[1].size]) else: values = raw_output return convert_prediction_values(values, serving_bundle, pred.model_spec)
Converts a PredictResponse to ClassificationResponse or RegressionResponse. Args: pred: PredictResponse to convert. serving_bundle: A `ServingBundle` object that contains the information about the serving request that the response was generated by. Returns: A ClassificationResponse or RegressionResponse.
juraj-google-style
def append(parent: ScheduleComponent, child: ScheduleComponent, name: str = None) -> Schedule: r common_channels = set(parent.channels) & set(child.channels) insertion_time = parent.ch_stop_time(*common_channels) return insert(parent, insertion_time, child, name=name)
r"""Return a new schedule with by appending `child` to `parent` at the last time of the `parent` schedule's channels over the intersection of the parent and child schedule's channels. $t = \textrm{max}({x.stop\_time |x \in parent.channels \cap child.channels})$ Args: parent: The schedule to be inserted into child: The schedule to insert name: Name of the new schedule. Defaults to name of parent
juraj-google-style
def __init__(self, filename, ionicstep_start=1, ionicstep_end=None, comment=None): preamble = None coords_str = [] structures = [] preamble_done = False if (ionicstep_start < 1): raise Exception('Start ionic step cannot be less than 1') if (ionicstep_end is not None and ionicstep_start < 1): raise Exception('End ionic step cannot be less than 1') ionicstep_cnt = 1 with zopen(filename, "rt") as f: for l in f: l = l.strip() if preamble is None: preamble = [l] elif not preamble_done: if l == "" or "Direct configuration=" in l: preamble_done = True tmp_preamble = [preamble[0]] for i in range(1, len(preamble)): if preamble[0] != preamble[i]: tmp_preamble.append(preamble[i]) else: break preamble = tmp_preamble else: preamble.append(l) elif l == "" or "Direct configuration=" in l: p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str)) if ionicstep_end is None: if (ionicstep_cnt >= ionicstep_start): structures.append(p.structure) else: if ionicstep_start <= ionicstep_cnt < ionicstep_end: structures.append(p.structure) ionicstep_cnt += 1 coords_str = [] else: coords_str.append(l) p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str)) if ionicstep_end is None: if ionicstep_cnt >= ionicstep_start: structures.append(p.structure) else: if ionicstep_start <= ionicstep_cnt < ionicstep_end: structures.append(p.structure) self.structures = structures self.comment = comment or self.structures[0].formula
Init a Xdatcar. Args: filename (str): Filename of input XDATCAR file. ionicstep_start (int): Starting number of ionic step. ionicstep_end (int): Ending number of ionic step.
juraj-google-style
def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True, use_time_reversal=True, comment=None): return cls( kpts=[ngkpt], kpt_shifts=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak, comment=comment if comment else "Monkhorst-Pack scheme with user-specified shiftk")
Convenient static constructor for a Monkhorst-Pack mesh. Args: ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors. shiftk: Shift to be applied to the kpoints. use_symmetries: Use spatial symmetries to reduce the number of k-points. use_time_reversal: Use time-reversal symmetry to reduce the number of k-points. Returns: :class:`KSampling` object.
juraj-google-style
def backup(filenames, prefix="error"): num = max([0] + [int(f.split(".")[1]) for f in glob("{}.*.tar.gz".format(prefix))]) filename = "{}.{}.tar.gz".format(prefix, num + 1) logging.info("Backing up run to {}.".format(filename)) with tarfile.open(filename, "w:gz") as tar: for fname in filenames: for f in glob(fname): tar.add(f)
Backup files to a tar.gz file. Used, for example, in backing up the files of an errored run before performing corrections. Args: filenames ([str]): List of files to backup. Supports wildcards, e.g., *.*. prefix (str): prefix to the files. Defaults to error, which means a series of error.1.tar.gz, error.2.tar.gz, ... will be generated.
juraj-google-style
def output_refs(self, transitive: bool=True) -> List['SymbolReference']: parent_func = self.parent_func() references: List[SymbolReference] = [] if parent_func is not None: output_vars = self.output_vars() def find_references(code: Code): refs = [] def identify_reference(k, v, p): del k, p if isinstance(v, SymbolReference): if v.name in output_vars: refs.append(v) pg.traverse(code, identify_reference) return refs for line in self.succeeding_lines(): ins_refs = find_references(line) references.extend(ins_refs) new_assigned = line.output_vars() if ins_refs and transitive: output_vars.update(new_assigned) else: output_vars -= new_assigned return references
Returns the references to the symbols that this code outputs. Args: transitive: If True, transitive symbol references will be included. Otherwise, only the direct dependencies will be included. Returns: A list of ``Var` or ``FunctionCall`` in their definition order that consume the outputs of current instruction. Users can use :meth:`parent_instruction` or :meth:`line` to get their context.
github-repos
def parameterize(self, country: Optional[str]='South Sudan', state: Optional[str]=None, year: Optional[int]=None, month: Optional[int]=None, unit: Optional[str]=None, fallback_aggaxes: List[str]=['year', 'month'], aggfunc: Callable=np.mean): valid_axes = ('country', 'state', 'year', 'month') if any(map((lambda axis: (axis not in valid_axes)), fallback_aggaxes)): raise ValueError(f'All elements of the fallback_aggaxes set must be one of the following: {valid_axes}') for n in self.nodes(data=True): for indicator in n[1]['indicators'].values(): (indicator.mean, indicator.unit) = get_indicator_value(indicator, country, state, year, month, unit, fallback_aggaxes, aggfunc) indicator.stdev = (0.1 * abs(indicator.mean))
Parameterize the analysis graph. Args: country year month fallback_aggaxes: An iterable of strings denoting the axes upon which to perform fallback aggregation if the desired constraints cannot be met. aggfunc: The function that will be called to perform the aggregation if there are multiple matches.
codesearchnet
def set_available(self, show=None): show = (self.state.show if (show is None) else show) self.set_presence(PresenceState(available=True, show=show))
Sets the agent availability to True. Args: show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)
codesearchnet
def block_reducible(cm, nodes1, nodes2): if not nodes1 or not nodes2: return True cm = cm[np.ix_(nodes1, nodes2)] if not cm.sum(0).all() or not cm.sum(1).all(): return True if len(nodes1) > 1 and len(nodes2) > 1: return block_cm(cm) return False
Return whether connections from ``nodes1`` to ``nodes2`` are reducible. Args: cm (np.ndarray): The network's connectivity matrix. nodes1 (tuple[int]): Source nodes nodes2 (tuple[int]): Sink nodes
juraj-google-style
def write(self, __text: str) -> None: if (__text == os.linesep): self.handle.write(__text) else: frame = inspect.currentframe() if (frame is None): filename = 'unknown' lineno = 0 else: outer = frame.f_back filename = outer.f_code.co_filename.split(os.sep)[(- 1)] lineno = outer.f_lineno self.handle.write('[{:>15s}:{:03d}] {}'.format(filename[(- 15):], lineno, __text))
Write text to the debug stream. Args: __text: Text to write
codesearchnet
def exclude(self, scheduled_operation: ScheduledOperation) -> bool: try: self.scheduled_operations.remove(scheduled_operation) return True except ValueError: return False
Omits a scheduled operation from the schedule, if present. Args: scheduled_operation: The operation to try to remove. Returns: True if the operation was present and is now removed, False if it was already not present.
juraj-google-style
def filter_segs(self, segs): def whole_seg(seg): m = self.seg_regex.match(seg) if (m and (m.group(0) == seg)): return True else: return False return list(filter(whole_seg, segs))
Given list of strings, return only those which are valid segments. Args: segs (list): list of unicode values Returns: list: values in `segs` that are valid segments (according to the definititions of bases and diacritics/modifiers known to the object
codesearchnet
def on_message(self, fragment): try: message = (yield self._receive(fragment)) except Exception as e: log.error('Unhandled exception receiving a message: %r: %r', e, fragment, exc_info=True) self._internal_error('server failed to parse a message') try: if message: if (_message_test_port is not None): _message_test_port.received.append(message) work = (yield self._handle(message)) if work: (yield self._schedule(work)) except Exception as e: log.error('Handler or its work threw an exception: %r: %r', e, message, exc_info=True) self._internal_error('server failed to handle a message') raise gen.Return(None)
Process an individual wire protocol fragment. The websocket RFC specifies opcodes for distinguishing text frames from binary frames. Tornado passes us either a text or binary string depending on that opcode, we have to look at the type of the fragment to see what we got. Args: fragment (unicode or bytes) : wire fragment to process
codesearchnet
def _get_string_match(self, key): expression = r'(?:\s*)'.join([ '^', 'define', r'\(', '\'{}\''.format(key), ',', r'\'(.*)\'', r'\)', ';' ]) pattern = re.compile(expression, re.MULTILINE) return pattern.search(self._content)
Gets a MatchObject for the given key, assuming a string value. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
juraj-google-style
def _verify_request(self, signature_chain_url: str, signature: str, request_body: bytes) -> bool: if (signature_chain_url not in self.valid_certificates.keys()): amazon_cert: X509 = verify_cert(signature_chain_url) if amazon_cert: amazon_cert_lifetime: timedelta = self.config['amazon_cert_lifetime'] expiration_timestamp = (datetime.utcnow() + amazon_cert_lifetime) validated_cert = ValidatedCert(cert=amazon_cert, expiration_timestamp=expiration_timestamp) self.valid_certificates[signature_chain_url] = validated_cert log.info(f'Certificate {signature_chain_url} validated') else: log.error(f'Certificate {signature_chain_url} validation failed') return False else: validated_cert: ValidatedCert = self.valid_certificates[signature_chain_url] amazon_cert: X509 = validated_cert.cert if verify_signature(amazon_cert, signature, request_body): result = True else: log.error(f"Failed signature verification for request: {request_body.decode('utf-8', 'replace')}") result = False return result
Conducts series of Alexa request verifications against Amazon Alexa requirements. Args: signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header. signature: Base64 decoded Alexa request signature from Signature HTTP header. request_body: full HTTPS request body Returns: result: True if verification was successful, False if not.
codesearchnet
def __init__(self, credentials=None): super(Authentication, self).__init__(enums.Tags.AUTHENTICATION) self._credentials = [] self.credentials = credentials
Construct an Authentication struct. Args: credentials (list): A list of Credential structs to be used for authentication. Optional, defaults to None.
juraj-google-style
def view_packgets_list(self, option: str='-e', keyword: str='') -> list: if (option not in ['-f', '-d', '-e', '-s', '-3', '-i', '-u']): raise ValueError(f'There is no option called {option!r}.') (output, _) = self._execute('-s', self.device_sn, 'shell', 'pm', 'list', 'packages', option, keyword) return list(map((lambda x: x[8:]), output.splitlines()))
Show all packages. Args: option: -f see their associated file -d filter to only show disabled packages -e filter to only show enabled packages -s filter to only show system packages -3 filter to only show third party packages -i see the installer for the packages -u also include uninstalled packages -keyword: optionally only those whose name contains the text in keyword
codesearchnet
def _generate_flush_cache_op(self, num_replicas, on_tpu, tensor_trace_order, graph): def _flush_fun(cache, replica_id, step_num): def _f(file_index): def _print_cache(): replica_str = '%d' % file_index if self._parameters.trace_dir: output_path = os.path.join(self._parameters.trace_dir, _COMPACT_TRACE_FILE_PREFIX) + replica_str + self._get_outfile_suffix() output_stream = _OUTPUT_STREAM_ESCAPE + output_path else: output_stream = sys.stderr new_step_line = _REPLICA_ID_TAG + replica_str print_ops = [] if self._parameters.inspect_trace: if self._num_signature_dimensions() > 1: raise ValueError('Inspecting multi signatures are not supported.') if self._parameters.trace_mode in tensor_tracer_flags.TRACE_MODE_HISTORY: print_ops.append(self._inspect_history_cache(cache=cache, replica_id=replica_id, step_num=step_num, tensor_trace_order=tensor_trace_order)) else: print_ops.append(self._inspect_summary_cache(cache=cache, replica_id=replica_id, step_num=step_num, output_stream=output_stream, tensor_trace_order=tensor_trace_order)) else: for i in range(self._num_signature_dimensions()): print_ops.append(logging_ops.print_v2(new_step_line, '\n', cache[:, i], '\n', summarize=-1, output_stream=output_stream)) with ops.control_dependencies(print_ops): return constant_op.constant(0).op return _print_cache def _eq(file_index): return math_ops.equal(replica_id, file_index) flush_op_cases = {} flush_op_cases[_eq(0)] = _f(0) for i in range(1, num_replicas): if on_tpu and (not self._parameters.collect_summary_per_core): flush_op_cases[_eq(i)] = control_flow_ops.no_op else: flush_op_cases[_eq(i)] = _f(i) return control_flow_case.case(flush_op_cases, exclusive=True) cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph) if self._use_temp_cache(): cache_val = cache else: cache_val = cache.value() if on_tpu: if not self._parameters.collect_summary_per_core: cache_val = self.merge_caches_on_tpu(cache_val) cache_val = self.aggregate_global_cache(cache_val)[0] flush_op = tpu_replication.outside_compilation(_flush_fun, cache_val, self._replica_id, array_ops.identity(training_util.get_or_create_global_step())) else: global_step = training_util.get_or_create_global_step() flush_op = _flush_fun(cache_val, self._replica_id, global_step) if self._use_temp_cache(): with ops.control_dependencies([flush_op]): return constant_op.constant(0).op else: with ops.control_dependencies([flush_op]): reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE, dtype=cache.dtype, shape=cache.shape) assign_op = state_ops.assign(cache, reset_value).op with ops.control_dependencies([assign_op]): return constant_op.constant(0).op
Generates an Op that will flush the cache to file. Args: num_replicas: total number of replicas. on_tpu: if the graph is executed on TPU. tensor_trace_order: TensorTraceOrder object holding tensorname to id map. graph: TensorFlow graph. Returns: The Op to flush the cache to file.
github-repos
def send_course_refund_email(self, email, refund_id, amount, course_name, order_number, order_url, site_code=None): config = get_sailthru_configuration(site_code) try: sailthru_client = get_sailthru_client(site_code) except SailthruError: return email_vars = {'amount': amount, 'course_name': course_name, 'order_number': order_number, 'order_url': order_url} try: response = sailthru_client.send(template=config['templates']['course_refund'], email=email, _vars=email_vars) except SailthruClientError: logger.exception('A client error occurred while attempting to send a course refund notification for refund [%d].', refund_id) return if response.is_ok(): logger.info('Course refund notification sent for refund %d.', refund_id) else: error = response.get_error() logger.error('An error occurred while attempting to send a course refund notification for refund [%d]: %d - %s', refund_id, error.get_error_code(), error.get_message()) if can_retry_sailthru_request(error): logger.info('An attempt will be made again to send a course refund notification for refund [%d].', refund_id) schedule_retry(self, config) else: logger.warning('No further attempts will be made to send a course refund notification for refund [%d].', refund_id)
Sends the course refund email. Args: self: Ignore. email (str): Recipient's email address. refund_id (int): ID of the refund that initiated this task. amount (str): Formatted amount of the refund. course_name (str): Name of the course for which payment was refunded. order_number (str): Order number of the order that was refunded. order_url (str): Receipt URL of the refunded order. site_code (str): Identifier of the site sending the email.
codesearchnet
def is_compatible_with(self, other): other = as_dimension(other) return self._value is None or other.value is None or self._value == other.value
Returns true if `other` is compatible with this Dimension. Two known Dimensions are compatible if they have the same value. An unknown Dimension is compatible with all other Dimensions. Args: other: Another Dimension. Returns: True if this Dimension and `other` are compatible.
github-repos
def get_tensor_shape(self, tensor_name): tensor = self._name_to_tensor(tensor_name) if isinstance(tensor, mtf.Tensor): return tf.TensorShape(tensor.shape.to_integer_list) else: return tensor.shape
The tf.TensorShape of a tensor. Args: tensor_name: string, the name of a tensor in the graph. Returns: a tf.TensorShape
codesearchnet
def set_tensor(self, tensor_index, value): self._interpreter.SetTensor(tensor_index, value)
Sets the value of the input tensor. Note this copies data in `value`. If you want to avoid copying, you can use the `tensor()` function to get a numpy buffer pointing to the input buffer in the tflite interpreter. Args: tensor_index: Tensor index of tensor to set. This value can be gotten from the 'index' field in get_input_details. value: Value of tensor to set. Raises: ValueError: If the interpreter could not set the tensor.
github-repos
def Check(self, error, filename, linenum): if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger))
Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check.
juraj-google-style
def __is_bound_method(method): if not(hasattr(method, "__func__") and hasattr(method, "__self__")): return False return six.get_method_self(method) is not None
Return ``True`` if the `method` is a bound method (attached to an class instance. Args: method: A method or function type object.
juraj-google-style
def __init__(self, input_reader=None, output_writer=None): super(StorageMediaTool, self).__init__( input_reader=input_reader, output_writer=output_writer) self._custom_artifacts_path = None self._artifact_definitions_path = None self._artifact_filters = None self._credentials = [] self._credential_configurations = [] self._filter_file = None self._partitions = None self._process_vss = False self._source_scanner = source_scanner.SourceScanner() self._source_path = None self._source_path_specs = [] self._textwrapper = textwrap.TextWrapper() self._user_selected_vss_stores = False self._volumes = None self._vss_only = False self._vss_stores = None
Initializes the CLI tool object. Args: input_reader (Optional[InputReader]): input reader, where None indicates that the stdin input reader should be used. output_writer (Optional[OutputWriter]): output writer, where None indicates that the stdout output writer should be used.
juraj-google-style
def get_shifted_center_blocks(x, indices): center_x = gather_blocks_2d(x, indices) def shift_right_2d_blocks(x): 'Shift the second to last dimension of x right by one.' shifted_targets = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[(:, :, :, :(- 1), :)] return shifted_targets x_shifted = shift_right_2d_blocks(center_x) return x_shifted
Get right shifted blocks for masked local attention 2d. Args: x: A tensor with shape [batch, heads, height, width, depth] indices: The indices to gather blocks Returns: x_shifted: a tensor of extracted blocks, each block right shifted along length.
codesearchnet
def __init__(self, callback): self._callback = callback self._vcs = brocade_vcs( callback=pynos.utilities.return_xml )
VCS init method. Args: callback: Callback function that will be called for each action. Returns: VCS Object Raises: None
juraj-google-style
def get_from(input_file, property_names): with open(input_file) as f: feature_collection = geojson.load(f) features = feature_collection['features'] values = [tuple([feat['properties'].get(x) for x in property_names]) for feat in features] return values
Reads a geojson and returns a list of value tuples, each value corresponding to a property in property_names. Args: input_file (str): File name. property_names: List of strings; each string is a property name. Returns: List of value tuples.
codesearchnet
def ekm_log(logstr, priority=3): if priority <= ekmmeters_log_level: dt = datetime.datetime stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M.%f") ekmmeters_log_func("[EKM Meter Debug Message: " + stamp + "] -> " + logstr) pass
Send string to module level log Args: logstr (str): string to print. priority (int): priority, supports 3 (default) and 4 (special).
juraj-google-style
def get_image_size_fit_to_canvas(image_height: int, image_width: int, canvas_height: int, canvas_width: int, tile_size: int) -> Tuple[int, int]: target_width = np.clip(image_width, tile_size, canvas_width) target_height = np.clip(image_height, tile_size, canvas_height) scale_h = target_height / image_height scale_w = target_width / image_width if scale_w < scale_h: new_width = target_width new_height = min(math.floor(image_height * scale_w) or 1, target_height) else: new_height = target_height new_width = min(math.floor(image_width * scale_h) or 1, target_width) return (new_height, new_width)
Calculates the new size of an image to fit within a canvas while maintaining aspect ratio. This function calculates the optimal size for an image to fit within a canvas defined by canvas_height and canvas_width, while ensuring that the image dimensions are not smaller than tile_size. If the image is larger than the canvas, the returned size will fit within the canvas. If the image already fits within the canvas, the size remains unchanged. The aspect ratio of the original image is preserved as much as possible. Args: image_height (`int`): The height of the original image. image_width (`int`): The width of the original image. canvas_height (`int`): The height of the canvas. canvas_width (`int`): The width of the canvas. tile_size (`int`): The tile size. Returns: `Tuple[int, int]`: A tuple containing the new height and width of the image.
github-repos
def _workflow_complete(workflow_stage_dict: dict): complete_stages = [] for _, stage_config in workflow_stage_dict.items(): complete_stages.append((stage_config['status'] == 'complete')) if all(complete_stages): LOG.info('PB workflow complete!') return True return False
Check if the workflow is complete. This function checks if the entire workflow is complete. This function is used by `execute_processing_block`. Args: workflow_stage_dict (dict): Workflow metadata dictionary. Returns: bool, True if the workflow is complete, otherwise False.
juraj-google-style
def get(self): with warnings.catch_warnings(record=False): warnings.simplefilter('ignore') return math.sqrt(np.nanvar(self._queue, ddof=1))
Calculates and returns the stdev of the current sliding window. Returns: float: The standard deviation of the values in the current sliding window. Returns NaN if the window contains fewer than 2 elements.
github-repos
def get_model_files(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[str, Union[Path, List[Path]]]: module_name = model_type_to_module_name(model_type) model_module = TRANSFORMERS_PATH / 'models' / module_name model_files = list(model_module.glob('*.py')) model_files = filter_framework_files(model_files, frameworks=frameworks) doc_file = REPO_PATH / 'docs' / 'source' / 'en' / 'model_doc' / f'{model_type}.md' test_files = [f'test_modeling_{module_name}.py', f'test_modeling_tf_{module_name}.py', f'test_modeling_flax_{module_name}.py', f'test_tokenization_{module_name}.py', f'test_image_processing_{module_name}.py', f'test_feature_extraction_{module_name}.py', f'test_processor_{module_name}.py'] test_files = filter_framework_files(test_files, frameworks=frameworks) test_files = [REPO_PATH / 'tests' / 'models' / module_name / f for f in test_files] test_files = [f for f in test_files if f.exists()] return {'doc_file': doc_file, 'model_files': model_files, 'module_name': module_name, 'test_files': test_files}
Retrieves all the files associated to a model. Args: model_type (`str`): A valid model type (like "bert" or "gpt2") frameworks (`List[str]`, *optional*): If passed, will only keep the model files corresponding to the passed frameworks. Returns: `Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys: - **doc_file** -- The documentation file for the model. - **model_files** -- All the files in the model module. - **test_files** -- The test files for the model.
github-repos
def set(self, name, value): name = str(name) if (name not in self._properties): raise ArgumentError('Unknown property in DeviceModel', name=name) self._properties[name] = value
Set a device model property. Args: name (str): The name of the property to set value (int, bool): The value of the property to set
codesearchnet
def CallNtpdate(logger): ntpd_inactive = subprocess.call(['service', 'ntpd', 'status']) try: if not ntpd_inactive: subprocess.check_call(['service', 'ntpd', 'stop']) subprocess.check_call( 'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`', shell=True) if not ntpd_inactive: subprocess.check_call(['service', 'ntpd', 'start']) except subprocess.CalledProcessError: logger.warning('Failed to sync system time with ntp server.') else: logger.info('Synced system time with ntp server.')
Sync clock using ntpdate. Args: logger: logger object, used to write to SysLog and serial port.
juraj-google-style
def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english): if (min_key >= max_key): raise ValueError('min_key cannot exceed max_key') decryptions = [] for key in range(min_key, max_key): plaintext = decrypt(key, ciphertext, shift_function=shift_function) decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions))) return sorted(decryptions, reverse=True)
Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``. Example: >>> decryptions = crack("KHOOR", fitness.english.quadgrams) >>> print(''.join(decryptions[0].plaintext)) HELLO Args: ciphertext (iterable): The symbols to decrypt *fitness_functions (variable length argument list): Functions to score decryption with Keyword Args: min_key (int): Key to start with max_key (int): Key to stop at (exclusive) shift_function (function(shift, symbol)): Shift function to use Returns: Sorted list of decryptions Raises: ValueError: If min_key exceeds max_key ValueError: If no fitness_functions are given
codesearchnet
def fit_arrhenius(temps, diffusivities): t_1 = 1 / np.array(temps) logd = np.log(diffusivities) a = np.array([t_1, np.ones(len(temps))]).T w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None) w = np.array(w) n = len(temps) if n > 2: std_Ea = (res[0] / (n - 2) / ( n * np.var(t_1))) ** 0.5 * const.k / const.e else: std_Ea = None return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea
Returns Ea, c, standard error of Ea from the Arrhenius fit: D = c * exp(-Ea/kT) Args: temps ([float]): A sequence of temperatures. units: K diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). units: cm^2/s
juraj-google-style
def get_modules_to_fuse(model, quantization_config): if not isinstance(model, PreTrainedModel): raise TypeError(f'The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}') if quantization_config.modules_to_fuse is not None: current_fused_mapping = quantization_config.modules_to_fuse current_fused_mapping['max_seq_len'] = quantization_config.fuse_max_seq_len elif model.config.model_type in AWQ_FUSED_MAPPINGS: current_fused_mapping = AWQ_FUSED_MAPPINGS[model.config.model_type] config = model.config.get_text_config(decoder=True) hidden_size = config.hidden_size num_attention_heads = config.num_attention_heads num_key_value_heads = getattr(config, 'num_key_value_heads', num_attention_heads) current_fused_mapping['hidden_size'] = hidden_size current_fused_mapping['num_attention_heads'] = num_attention_heads current_fused_mapping['num_key_value_heads'] = num_key_value_heads current_fused_mapping['max_seq_len'] = quantization_config.fuse_max_seq_len else: raise ValueError('Fusing mapping not found either on the quantization config or the supported `AWQ_FUSED_MAPPINGS`. Please pass a `fused_mapping` argument in the `quantization_config` or raise an issue on transformers https: return current_fused_mapping
Returns the fusing mapping given the quantization config and the model Args: model (`~PreTrainedModel`): The model to fuse - note this model should have been converted into AWQ format beforehand. quantization_config (`~transformers.quantization_config.AWQConfig`): The quantization configuration to use.
github-repos
def assemble_buffer(self, buf_header, buf_payload): if self.header.get('num_buffers', 0) <= len(self._buffers): raise ProtocolError("too many buffers received expecting " + str(self.header['num_buffers'])) self._buffers.append((buf_header, buf_payload))
Add a buffer header and payload that we read from the socket. This differs from add_buffer() because we're validating vs. the header's num_buffers, instead of filling in the header. Args: buf_header (``JSON``) : a buffer header buf_payload (``JSON`` or bytes) : a buffer payload Returns: None Raises: ProtocolError
juraj-google-style
def __init__(self, context): self._multiplexer = context.multiplexer self._index_cached = None self._index_impl_lock = threading.Lock() self._index_impl_thread = None
Instantiates TextPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
juraj-google-style
def UpdateOsLogin(self, oslogin_desired, two_factor_desired=False): oslogin_configured = self._GetStatus(two_factor=False) if oslogin_configured is None: return None two_factor_configured = self._GetStatus(two_factor=True) two_factor_desired = two_factor_desired and oslogin_desired if oslogin_desired: params = ['activate'] if two_factor_desired: params += ['--twofactor'] if not oslogin_configured: self.logger.info('Activating OS Login.') return self._RunOsLoginControl(params) or self._RunOsLoginNssCache() if two_factor_desired and not two_factor_configured: self.logger.info('Activating OS Login two factor authentication.') return self._RunOsLoginControl(params) or self._RunOsLoginNssCache() if two_factor_configured and not two_factor_desired: self.logger.info('Reactivating OS Login with two factor disabled.') return (self._RunOsLoginControl(['deactivate']) or self._RunOsLoginControl(params)) current_time = time.time() if current_time - self.update_time > NSS_CACHE_DURATION_SEC: self.update_time = current_time return self._RunOsLoginNssCache() elif oslogin_configured: self.logger.info('Deactivating OS Login.') return (self._RunOsLoginControl(['deactivate']) or self._RemoveOsLoginNssCache()) return 0
Update whether OS Login is enabled and update NSS cache if necessary. Args: oslogin_desired: bool, enable OS Login if True, disable if False. two_factor_desired: bool, enable two factor if True, disable if False. Returns: int, the return code from updating OS Login, or None if not present.
juraj-google-style
def remove(self, key): self.raise_error_if_not_open() if key in self._file: del self._file[key]
Remove the data stored for the given key. Args: key (str): Key of the data to remove. Note: The container has to be opened in advance.
juraj-google-style
def is_flat(neurite, tol, method='tolerance'): ext = principal_direction_extent(neurite.points[:, COLS.XYZ]) assert method in ('tolerance', 'ratio'), "Method must be one of 'tolerance', 'ratio'" if method == 'ratio': sorted_ext = np.sort(ext) return sorted_ext[0] / sorted_ext[1] < float(tol) return any(ext < float(tol))
Check if neurite is flat using the given method Args: neurite(Neurite): neurite to operate on tol(float): tolerance method(string): the method of flatness estimation: 'tolerance' returns true if any extent of the tree is smaller than the given tolerance 'ratio' returns true if the ratio of the smallest directions is smaller than tol. e.g. [1,2,3] -> 1/2 < tol Returns: True if neurite is flat
juraj-google-style
def _enum_from_direction(direction): if isinstance(direction, int): return direction if direction == Query.ASCENDING: return enums.StructuredQuery.Direction.ASCENDING elif direction == Query.DESCENDING: return enums.StructuredQuery.Direction.DESCENDING else: msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING) raise ValueError(msg)
Convert a string representation of a direction to an enum. Args: direction (str): A direction to order by. Must be one of :attr:`~.firestore.Query.ASCENDING` or :attr:`~.firestore.Query.DESCENDING`. Returns: int: The enum corresponding to ``direction``. Raises: ValueError: If ``direction`` is not a valid direction.
juraj-google-style
def is_partial(self, filepath): path, filename = os.path.split(filepath) return filename.startswith('_')
Check if file is a Sass partial source (see `Sass partials Reference`_). Args: filepath (str): A file path. Can be absolute, relative or just a filename. Returns: bool: True if file is a partial source, else False.
juraj-google-style
def draw_lines(self, *points): point_array = ffi.new('SDL_Point[]', len(points)) for (i, p) in enumerate(points): point_array[i] = p._ptr[0] check_int_err(lib.SDL_RenderDrawLines(self._ptr, point_array, len(points)))
Draw a series of connected lines on the current rendering target. Args: *points (Point): The points along the lines. Raises: SDLError: If an error is encountered.
codesearchnet
def _parse_interfaces(self): interfaces = dict() names = re.findall('^interface (Po.+)$', self.config, re.M) for name in names: config = self.get_block(('interface %s' % name)) match = re.search('mlag (\\d+)', config) if match: interfaces[name] = dict(mlag_id=match.group(1)) return dict(interfaces=interfaces)
Scans the global config and returns the configured interfaces Returns: dict: A dict object that is intended to be merged into the resource dict.
codesearchnet
def create(window, root): notifications = {} _id = root.get_property('id') from foxpuppet.windows.browser.notifications import addons notifications.update(addons.NOTIFICATIONS) return notifications.get(_id, BaseNotification)(window, root)
Create a notification object. Args: window (:py:class:`BrowserWindow`): Window object this region appears in. root (:py:class:`~selenium.webdriver.remote.webelement.WebElement`): WebDriver element object that serves as the root for the notification. Returns: :py:class:`BaseNotification`: Firefox notification.
codesearchnet
def cumsum(x, dim, exclusive=False): with tf.variable_scope("cumsum"): new_name = "tmp_dim_cumsum" new_dim = Dimension(new_name, dim.size) new_shape = x.shape.rename_dimension(dim.name, new_name) comparator = less if exclusive else less_equal m = cast( comparator(mtf_range(x.mesh, dim, dtype=tf.float32), mtf_range(x.mesh, new_dim, dtype=tf.float32)), x.dtype) ret = einsum([x, m], output_shape=new_shape) return reshape(ret, x.shape)
Cumulative sum. Args: x: a Tensor dim: a Dimension exclusive: a boolean Returns: a Tensor with the same shape as x.
juraj-google-style
def clone(self, uuid): request_url = self._client.base_api_url + self.clone_url.format( id=uuid ) response = self._client.session.post(request_url) self.validate_request_success( response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED, ) return self.response_data_to_model_instance(response.json())
Clone the task instance with given UUID. Args: uuid (str): The UUID of the task instance to clone. Returns: :class:`saltant.models.base_task_instance.BaseTaskInstance`: A task instance model instance representing the task instance created due to the clone.
juraj-google-style
def begin(self: EventSetOrNode) -> EventSetOrNode: from temporian.core.operators.begin import begin return begin(self)
Generates a single timestamp at the beginning of the [`EventSet`][temporian.EventSet], per index group. Usage example: ```python >>> a = tp.event_set( ... timestamps=[5, 6, 7, -1], ... features={"f": [50, 60, 70, -10], "idx": [1, 1, 1, 2]}, ... indexes=["idx"] ... ) >>> a_ini = a.begin() >>> a_ini indexes: [('idx', int64)] features: [] events: idx=1 (1 events): timestamps: [5.] idx=2 (1 events): timestamps: [-1.] ... ``` Returns: A feature-less EventSet with a single timestamp per index group.
github-repos
def process(self, element, *args, **kwargs): (text, uid), prediction = element embedding = prediction.inference l2_norm = np.linalg.norm(embedding) yield {'text': text, 'id': uid, 'embedding': embedding / l2_norm}
For each element in the input PCollection, normalize the embedding vector, and yield a new element with the normalized embedding added Args: element: The element to be processed.
github-repos
def error_log(self, msg='', level=20, traceback=False): sys.stderr.write(msg + '\n') sys.stderr.flush() if traceback: tblines = traceback_.format_exc() sys.stderr.write(tblines) sys.stderr.flush()
Write error message to log. Args: msg (str): error message level (int): logging level traceback (bool): add traceback to output or not
juraj-google-style
def dump_property(self, name): if (not hasattr(self, name)): raise ArgumentError(('Unknown property %s' % name)) value = getattr(self, name) if (name in self._complex_properties): value = self._complex_properties[name][0](value) return value
Serialize a property of this class by name. Args: name (str): The name of the property to dump. Returns: object: The serialized value of the property.
codesearchnet
def _generate_response(self, response: dict, request: dict) -> dict: response_template = deepcopy(self.response_template) response_template['sessionAttributes']['sessionId'] = request['session']['sessionId'] for (key, value) in response_template.items(): if (key not in response.keys()): response[key] = value return response
Populates generated response with additional data conforming Alexa response specification. Args: response: Raw user input extracted from Alexa request. request: Alexa request. Returns: response: Response conforming Alexa response specification.
codesearchnet
def peek_step(self, val: ArrayValue, sn: "DataNode") -> Tuple[Optional[Value], "DataNode"]: try: return val[self.index], sn except (IndexError, KeyError, TypeError): return None, sn
Return entry value addressed by the receiver + its schema node. Args: val: Current value (array). sn: Current schema node.
juraj-google-style
def is_packet_trace(path): path = os.path.abspath(path) if not os.path.isfile(path): return False try: f = open(path, 'rb') except: return False magic = f.read(4) f.close() return magic in FILE_TYPE_HANDLER
Determine if a file is a packet trace that is supported by this module. Args: path (str): path to the trace file. Returns: bool: True if the file is a valid packet trace.
juraj-google-style
def _ExpandUsersHomeDirectoryPathSegments(cls, path_segments, path_separator, user_accounts): if (not path_segments): return [] user_paths = [] first_path_segment = path_segments[0].lower() if (first_path_segment not in ('%%users.homedir%%', '%%users.userprofile%%')): if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' user_path = path_separator.join(path_segments) user_paths.append(user_path) else: for user_account in user_accounts: user_path_segments = user_account.GetUserDirectoryPathSegments() if (not user_path_segments): continue if cls._IsWindowsDrivePathSegment(user_path_segments[0]): user_path_segments[0] = '' if (not user_path_segments[(- 1)]): user_path_segments.pop() user_path_segments.extend(path_segments[1:]) user_path = path_separator.join(user_path_segments) user_paths.append(user_path) return user_paths
Expands a path to contain all users home or profile directories. Expands the artifacts path variable "%%users.homedir%%" or "%%users.userprofile%%". Args: path_segments (list[str]): path segments. path_separator (str): path segment separator. user_accounts (list[UserAccountArtifact]): user accounts. Returns: list[str]: paths returned for user accounts without a drive indicator.
codesearchnet
def add_asset(self, asset, asset_name, asset_type): if (not self.can_update()): self._tcex.handle_error(910, [self.type]) if (asset == 'PHONE'): return self.tc_requests.add_victim_phone_asset(self.unique_id, asset_name) if (asset == 'EMAIL'): return self.tc_requests.add_victim_email_asset(self.unique_id, asset_name, asset_type) if (asset == 'NETWORK'): return self.tc_requests.add_victim_network_asset(self.unique_id, asset_name, asset_type) if (asset == 'SOCIAL'): return self.tc_requests.add_victim_social_asset(self.unique_id, asset_name, asset_type) if (asset == 'WEB'): return self.tc_requests.add_victim_web_asset(self.unique_id, asset_name) self._tcex.handle_error(925, ['asset_type', 'add_asset', 'asset_type', 'asset_type', asset_type]) return None
Adds a asset to the Victim Valid asset_type: + PHONE + EMAIL + NETWORK + SOCIAL + WEB Args: asset: asset_name: asset_type: PHONE, EMAIL, NETWORK, SOCIAL, or WEB Returns:
codesearchnet
def get_consensus_module(module_name): module_package = module_name if (module_name == 'genesis'): module_package = 'sawtooth_validator.journal.consensus.genesis.genesis_consensus' elif (module_name == 'devmode'): module_package = 'sawtooth_validator.journal.consensus.dev_mode.dev_mode_consensus' try: return importlib.import_module(module_package) except ImportError: raise UnknownConsensusModuleError('Consensus module "{}" does not exist.'.format(module_name))
Returns a consensus module by name. Args: module_name (str): The name of the module to load. Returns: module: The consensus module. Raises: UnknownConsensusModuleError: Raised if the given module_name does not correspond to a consensus implementation.
codesearchnet
def export_pytorch(preprocessor: Union['PreTrainedTokenizer', 'FeatureExtractionMixin', 'ProcessorMixin'], model: 'PreTrainedModel', config: OnnxConfig, opset: int, output: Path, tokenizer: Optional['PreTrainedTokenizer']=None, device: str='cpu') -> Tuple[List[str], List[str]]: if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError('You cannot provide both a tokenizer and a preprocessor to export the model.') if tokenizer is not None: warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning) logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.') preprocessor = tokenizer if issubclass(type(model), PreTrainedModel): import torch from torch.onnx import export as onnx_export logger.info(f'Using framework PyTorch: {torch.__version__}') with torch.no_grad(): model.config.return_dict = True model.eval() if config.values_override is not None: logger.info(f'Overriding {len(config.values_override)} configuration item(s)') for override_config_key, override_config_value in config.values_override.items(): logger.info(f'\t- {override_config_key} -> {override_config_value}') setattr(model.config, override_config_key, override_config_value) model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH) device = torch.device(device) if device.type == 'cuda' and torch.cuda.is_available(): model.to(device) model_inputs_device = {} for k, v in model_inputs.items(): if isinstance(v, Tuple): model_inputs_device[k] = tuple((x.to(device) if isinstance(x, torch.Tensor) else None for x in v)) elif isinstance(v, List): model_inputs_device[k] = [tuple((x.to(device) if isinstance(x, torch.Tensor) else None for x in t)) for t in v] else: model_inputs_device[k] = v.to(device) model_inputs = model_inputs_device inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) if not inputs_match: raise ValueError("Model and config inputs doesn't match") config.patch_ops() onnx_export(model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), do_constant_folding=True, opset_version=opset) config.restore_ops() return (matched_inputs, onnx_outputs)
Export a PyTorch model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration.
github-repos
def _get_container_environment(self, **kwargs): environment = {} environment.update(self.primary_container['Environment']) environment['SAGEMAKER_BATCH'] = 'True' if ('MaxPayloadInMB' in kwargs): environment['SAGEMAKER_MAX_PAYLOAD_IN_MB'] = str(kwargs['MaxPayloadInMB']) if ('BatchStrategy' in kwargs): if (kwargs['BatchStrategy'] == 'SingleRecord'): strategy_env_value = 'SINGLE_RECORD' elif (kwargs['BatchStrategy'] == 'MultiRecord'): strategy_env_value = 'MULTI_RECORD' else: raise ValueError("Invalid BatchStrategy, must be 'SingleRecord' or 'MultiRecord'") environment['SAGEMAKER_BATCH_STRATEGY'] = strategy_env_value if (('MaxConcurrentTransforms' in kwargs) and (int(kwargs['MaxConcurrentTransforms']) > 1)): logger.warning('Local Mode only supports 1 ConcurrentTransform. Setting MaxConcurrentTransforms to 1') environment['SAGEMAKER_MAX_CONCURRENT_TRANSFORMS'] = '1' if ('Environment' in kwargs): environment.update(kwargs['Environment']) return environment
Get all the Environment variables that will be passed to the container Certain input fields such as BatchStrategy have different values for the API vs the Environment variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion. Args: **kwargs: existing transform arguments Returns: dict: All the environment variables that should be set in the container
codesearchnet
def _project_THn(self, hist: Hist) -> Any: projection_axes = [axis.axis_type.value for axis in self.projection_axes] if len(projection_axes) == 2: projection_axes.reverse() args = projection_axes + ["E"] logger.debug(f"hist: {hist.GetName()} args: {args}") if len(projection_axes) > 3: projected_hist = hist.ProjectionND(*args) else: projected_hist = hist.Projection(*args) return projected_hist
Perform the actual THn -> THn or TH1 projection. This projection could be to 1D, 2D, 3D, or ND. Args: hist (ROOT.THnBase): Histogram from which the projections should be performed. Returns: ROOT.THnBase or ROOT.TH1: The projected histogram.
juraj-google-style
def intersect(self, range_): new_slice = None if self.package_request.conflict: if (self.package_request.range is None): new_slice = self.solver._get_variant_slice(self.package_name, range_) else: new_range = (range_ - self.package_request.range) if (new_range is not None): new_slice = self.solver._get_variant_slice(self.package_name, new_range) else: new_slice = self.variant_slice.intersect(range_) if (new_slice is None): if self.pr: self.pr("%s intersected with range '%s' resulted in no packages", self, range_) return None if (new_slice is not self.variant_slice): scope = self._copy(new_slice) if self.pr: self.pr("%s was intersected to %s by range '%s'", self, scope, range_) return scope return self
Intersect this scope with a package range. Returns: A new copy of this scope, with variants whos version fall outside of the given range removed. If there were no removals, self is returned. If all variants were removed, None is returned.
codesearchnet
def _finish_connection_action(self, action): success = action.data['success'] conn_key = action.data['id'] if self._get_connection_state(conn_key) != self.Connecting: print("Invalid finish_connection action on a connection whose state is not Connecting, conn_key=%s" % str(conn_key)) return data = self._get_connection(conn_key) callback = data['callback'] conn_id = data['conn_id'] int_id = data['int_id'] if success is False: reason = action.data['reason'] if reason is None: reason = "No reason was given" del self._connections[conn_id] del self._int_connections[int_id] callback(conn_id, self.id, False, reason) else: data['state'] = self.Idle data['microstate'] = None data['callback'] = None callback(conn_id, self.id, True, None)
Finish a connection attempt Args: action (ConnectionAction): the action object describing what we are connecting to and what the result of the operation was
juraj-google-style
def GetEventTypeString(self, event_type): if (0 <= event_type < len(self._EVENT_TYPES)): return self._EVENT_TYPES[event_type] return 'Unknown {0:d}'.format(event_type)
Retrieves a string representation of the event type. Args: event_type (int): event type. Returns: str: description of the event type.
codesearchnet
def get_instances_with_configs(configs): serials = [] for c in configs: try: serials.append(c['serial']) except KeyError: raise Error('Required value "serial" is missing in AndroidDevice config %s.' % c) _validate_device_existence(serials) results = [] for c in configs: serial = c.pop('serial') is_required = c.get(KEY_DEVICE_REQUIRED, True) try: ad = AndroidDevice(serial) ad.load_config(c) except Exception: if is_required: raise ad.log.exception('Skipping this optional device due to error.') continue results.append(ad) return results
Create AndroidDevice instances from a list of dict configs. Each config should have the required key-value pair 'serial'. Args: configs: A list of dicts each representing the configuration of one android device. Returns: A list of AndroidDevice objects.
github-repos
def get_variation_from_id(self, experiment_key, variation_id): variation_map = self.variation_id_map.get(experiment_key) if variation_map: variation = variation_map.get(variation_id) if variation: return variation else: self.logger.error(('Variation ID "%s" is not in datafile.' % variation_id)) self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR)) return None self.logger.error(('Experiment key "%s" is not in datafile.' % experiment_key)) self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR)) return None
Get variation given experiment and variation ID. Args: experiment: Key representing parent experiment of variation. variation_id: ID representing the variation. Returns Object representing the variation.
codesearchnet