code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def binary_n(total_N, min_n=50): max_exp = np.log2(1.0 * total_N / min_n) max_exp = int(np.floor(max_exp)) return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)]
Creates a list of values by successively halving the total length total_N until the resulting value is less than min_n. Non-integer results are rounded down. Args: total_N (int): total length Kwargs: min_n (int): minimal length after division Returns: list of integers: total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n
juraj-google-style
def getMAC(self, bType=MacType.RandomMac): print '%s call getMAC' % self.port if self.isPowerDown: macAddr64 = self.mac else: if bType == MacType.FactoryMac: macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:HardwareAddress')[0]) elif bType == MacType.HashMac: macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:MACAddress')[0]) else: macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:ExtendedAddress')[0]) return int(macAddr64, 16)
get one specific type of MAC address currently OpenThreadWpan only supports Random MAC address Args: bType: indicate which kind of MAC address is required Returns: specific type of MAC address
juraj-google-style
def attach_tracer(tracer_name_template=None): if not _has_opentelemetry: return lambda cls: cls def decorator(cls): original_init = cls.__init__ @functools.wraps(original_init) def init_with_tracer(self, *args, **kwargs): original_init(self, *args, **kwargs) module_name = cls.__module__ class_name = cls.__qualname__ if tracer_name_template is None: if module_name.startswith('transformers.'): tracer_name = f'{module_name}.{class_name}' else: tracer_name = f'transformers.{module_name}.{class_name}' else: tracer_name = tracer_name_template.format(module=module_name, class_name=class_name) self.tracer = get_tracer(tracer_name) cls.__init__ = init_with_tracer return cls return decorator
Decorator that attaches a tracer to a class. This decorator should be applied to classes that need OpenTelemetry tracing. It adds a tracer attribute to the class instance that can be used by the traced decorator. Args: tracer_name_template: Optional template string for the tracer name. If provided, it should contain {module} which will be replaced with the class's full module path and {class_name} for the class name. If None, a default naming scheme will be used where: - If the module already starts with "transformers.", it will use that directly - Otherwise, it will prepend "transformers." to the module name Returns: Class decorator function
github-repos
def parse(src, preamble_len=0, single_node=True): module_node = gast.parse(src) nodes = module_node.body if preamble_len: nodes = nodes[preamble_len:] if single_node: if len(nodes) != 1: raise ValueError('expected exactly one node, got {}'.format(nodes)) return nodes[0] return nodes
Returns the AST of given piece of code. Args: src: Text preamble_len: Int, indicates leading nodes in the parsed AST which should be dropped. single_node: Bool, whether `src` is assumed to be represented by exactly one AST node. Returns: ast.AST
github-repos
def intersect(self, other): intersection = Rect() if lib.SDL_IntersectRect(self._ptr, self._ptr, intersection._ptr): return intersection else: return None
Calculate the intersection of this rectangle and another rectangle. Args: other (Rect): The other rectangle. Returns: Rect: The intersection of this rectangle and the given other rectangle, or None if there is no such intersection.
codesearchnet
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False): residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.is_training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def __repr__(self): return "ndio.remote.neuroRemote('{}', '{}')".format( self.hostname, self.protocol, self.meta_url, self.meta_protocol )
Return a string representation that can be used to reproduce this instance. `eval(repr(this))` should return an identical copy. Arguments: None Returns: str: Representation of reproducible instance.
juraj-google-style
def validate_inference_rewrite_for_variables(graph: ops.Graph): if not any((x.type == 'GuaranteeConst' for x in graph.get_operations())): raise RuntimeError('No GuaranteeConst ops found in the graph after running tpu.rewrite_for_inference(...). Please check that you are using tf.get_variable() to create and access variables in your tpu computation.')
Validates whether rewrite_for_inference() 'worked' for variables. The rewrite_for_inference() method is supposed to append GuaranteeConstOps after ReadVariableOps, but this mechanism works only if you are using tf.compat.v1.get_variable() to create and access variables in your tpu computation. This validation method can be called immediately after calling tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added to the graph. Typical usages: tpu.validate_inference_rewrite_for_variables( tf.compat.v1.get_default_graph()) tpu.validate_inference_rewrite_for_variables(sess.graph) Args: graph: The graph which needs to be validated. Raises: RuntimeError: if validation failed.
github-repos
def classes_in_module(module) -> List: md = module.__dict__ return [ md[c] for c in md if ( isinstance(md[c], type) and issubclass(md[c], ETKModule ) and md[c].__module__ == module.__name__) ]
Return all classes with super class ExtractionModule Args: module: Returns: List of classes
juraj-google-style
def WriteModifyTimestamp(self, timestamp): if timestamp is None: return True self.modify_time = None return self._WriteTimestamp(timestamp, self.modify_file)
Convenience method for writing the last modify timestamp. Args: timestamp: An int with the number of seconds since epoch. If timestamp is None, performs no action. Returns: A boolean indicating success of the write.
github-repos
def plugins_all(self): if (not self.loaded): self.load_modules() return get_plugins()[self.group]._filter(blacklist=self.blacklist, type_filter=self.type_filter)
All resulting versions of all plugins in the group filtered by ``blacklist`` Returns: dict: Nested dictionary of plugins accessible through dot-notation. Similar to :py:attr:`plugins`, but lowest level is a regular dictionary of all unfiltered plugin versions for the given plugin type and name. Parent types are always included. Child plugins will only be included if at least one valid, non-blacklisted plugin is available.
codesearchnet
def _powerset(iterable): s = list(iterable) return itertools.chain.from_iterable((itertools.combinations(s, r) for r in range(len(s) + 1)))
Helper for generating all possible reduction_axes arguments. Example: powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2) Args: iterable: An iterable of items to generate the powerset of. Returns: The powerset of all items in iterable.
github-repos
def get(self, timeout=None, tag=None): with self._queue_lock: while self._should_process_closures and self._queue.empty() and (tag is None or self._tagged_queue[tag].empty()): if not self._closures_queued_condition.wait(timeout=timeout): return None if not self._should_process_closures: return None if tag is not None and (not self._tagged_queue[tag].empty()): closure = self._tagged_queue[tag].get(block=False) return closure closure = self._queue.get(block=False) metric_utils.monitor_int('queued_closures', self._queue.qsize()) assert closure.tag is None assert tag is None or self._tagged_queue[tag].empty() self._queue_free_slot_condition.notify() self.inflight_closure_count += 1 return closure
Return a closure from the queue to be executed. It will try to fetch an item from the queue with the given tag. If this queue is empty, it will then check the global queue. Args: timeout: timeout when waiting for a closure to be put. tag: optional tag to specify which queue to query first before querying the global queue. Returns: a closure or None after timeout.
github-repos
def _sample_action(self, constraints: Dict[(str, Constraints)], default: Sequence[tf.Tensor], prob: float=0.3) -> Sequence[tf.Tensor]: ordering = self.compiler.rddl.domain.action_fluent_ordering dtypes = map(rddl2tf.utils.range_type_to_dtype, self.compiler.rddl.action_range_type) size = self.compiler.rddl.action_size action = [] for (name, dtype, size, default_value) in zip(ordering, dtypes, size, default): action_fluent = self._sample_action_fluent(name, dtype, size, constraints, default_value, prob) action.append(action_fluent) return tuple(action)
Samples action fluents respecting the given bound `constraints`. With probability `prob` it chooses the action fluent default value, with probability 1-`prob` it samples the fluent w.r.t. its bounds. Args: constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent. default (Sequence[tf.Tensor]): The default action fluents. prob (float): A probability measure. Returns: Sequence[tf.Tensor]: A tuple of action fluents.
codesearchnet
def unsubscribe(self, peer_jid): self.roster.unsubscribe(aioxmpp.JID.fromstr(peer_jid).bare())
Asks for unsubscription Args: peer_jid (str): the JID you ask for unsubscriptiion
codesearchnet
def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str='htk') -> Union[float, np.ndarray]: if mel_scale not in ['slaney', 'htk', 'kaldi']: raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".') if mel_scale == 'htk': return 700.0 * (np.power(10, mels / 2595.0) - 1.0) elif mel_scale == 'kaldi': return 700.0 * (np.exp(mels / 1127.0) - 1.0) min_log_hertz = 1000.0 min_log_mel = 15.0 logstep = np.log(6.4) / 27.0 freq = 200.0 * mels / 3.0 if isinstance(mels, np.ndarray): log_region = mels >= min_log_mel freq[log_region] = min_log_hertz * np.exp(logstep * (mels[log_region] - min_log_mel)) elif mels >= min_log_mel: freq = min_log_hertz * np.exp(logstep * (mels - min_log_mel)) return freq
Convert frequency from mels to hertz. Args: mels (`float` or `np.ndarray`): The frequency, or multiple frequencies, in mels. mel_scale (`str`, *optional*, `"htk"`): The mel frequency scale to use, `"htk"`, `"kaldi"` or `"slaney"`. Returns: `float` or `np.ndarray`: The frequencies in hertz.
github-repos
def _decode_doubles(message): binary = base64.b64decode(message) return struct.unpack('<' + ('d' * (len(binary)
Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array
juraj-google-style
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`.
juraj-google-style
def __get_request(self, host, soup): url = (URLHelper.make_absolute(host, self.__trim_grave_accent(soup['action'])) if soup.has_attr('action') else host) method_original = (soup['method'] if soup.has_attr('method') else 'get') method = ('post' if (method_original.lower() == 'post') else 'get') data = self.__get_form_data(soup) return Request(url, method, data)
Build a request from the given soup form. Args: host str: The URL of the current queue item. soup (obj): The BeautifulSoup form. Returns: :class:`nyawc.http.Request`: The new Request.
codesearchnet
def close(self, cancel_pending_enqueues=False, name=None): if name is None: name = '%s_BarrierClose' % self._name return gen_data_flow_ops.barrier_close(self._barrier_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name)
Closes this barrier. This operation signals that no more new key values will be inserted in the given barrier. Subsequent InsertMany operations with new keys will fail. InsertMany operations that just complement already existing keys with other components, will continue to succeed. Subsequent TakeMany operations will continue to succeed if sufficient elements remain in the barrier. Subsequent TakeMany operations that would block will fail immediately. If `cancel_pending_enqueues` is `True`, all pending requests to the underlying queue will also be canceled, and completing of already started values is also not acceptable anymore. Args: cancel_pending_enqueues: (Optional.) A boolean, defaulting to `False` (described above). name: Optional name for the op. Returns: The operation that closes the barrier.
github-repos
def _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_func, seed, name=None) -> DatasetV2: def maybe_warn_on_large_rejection(accept_dist, initial_dist): proportion_rejected = math_ops.reduce_sum((1 - accept_dist) * initial_dist) return cond.cond(math_ops.less(proportion_rejected, 0.5), lambda: accept_dist, lambda: logging_ops.Print(accept_dist, [proportion_rejected, initial_dist, accept_dist], message='Proportion of examples rejected by sampler is high: ', summarize=100, first_n=10)) acceptance_dist_ds = DatasetV2.zip((acceptance_dist_ds, initial_dist_ds), name=name).map(maybe_warn_on_large_rejection, name=name) def _gather_and_copy(acceptance_prob, data): if isinstance(data, tuple): class_val = class_func(*data) else: class_val = class_func(data) return (class_val, array_ops.gather(acceptance_prob, class_val), data) current_probabilities_and_class_and_data_ds = DatasetV2.zip((acceptance_dist_ds, dataset), name=name).map(_gather_and_copy, name=name) def _reject(unused_class_val, p, unused_data): return random_ops.random_uniform([], seed=seed, dtype=p.dtype) < p filtered_ds = current_probabilities_and_class_and_data_ds.filter(_reject, name=name) return filtered_ds.map(lambda class_value, _, data: (class_value, data), name=name)
Filters a dataset based on per-class acceptance probabilities. Args: dataset: The dataset to be filtered. acceptance_dist_ds: A dataset of acceptance probabilities. initial_dist_ds: A dataset of the initial probability distribution, given or estimated. class_func: A function mapping an element of the input dataset to a scalar `tf.int32` tensor. Values should be in `[0, num_classes)`. seed: (Optional.) Python integer seed for the resampler. name: (Optional.) A name for the tf.data operation. Returns: A dataset of (class value, data) after filtering.
github-repos
def _get_unique_function_name(function_type, functions): function_name = function_name_base = function_type count = 2 while function_name in functions: function_name = '{}_{}'.format(function_name_base, count) count += 1 return function_name
Get a unique function name. Args: function_type(str): Name of Function. Ex) Convolution, Affine functions(OrderedDict of (str, Function) Returns: str A unique function name
juraj-google-style
def pnum_to_processor_coordinates(mesh_shape, pnum): ret = [] for dimsize in mesh_shape.to_integer_list[::-1]: ret.append(pnum % dimsize) pnum return ret[::-1]
Coordinates of a processor in the mesh. Args: mesh_shape: a Shape pnum: an integer less than len(mesh_shape) Returns: a list of integers with length len(mesh_shape)
juraj-google-style
def triangle(times: np.ndarray, amp: complex, period: float, phase: float=0) -> np.ndarray: return (amp * (((- 2) * np.abs(sawtooth(times, 1, period, ((phase - (np.pi / 2)) / 2)))) + 1).astype(np.complex_))
Continuous triangle wave. Args: times: Times to output wave for. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. phase: Pulse phase.
codesearchnet
def from_tensor(cls, tensor, name=None): if isinstance(tensor, core_tf_types.Value): return TensorSpec(tensor.shape, tensor.dtype, name) elif isinstance(tensor, core_tf_types.Symbol): return TensorSpec(tensor.shape, tensor.dtype, name or tensor.op.name) else: raise ValueError(f'`tensor` should be a tf.Tensor, but got type {type(tensor)}.')
Returns a `TensorSpec` that describes `tensor`. >>> tf.TensorSpec.from_tensor(tf.constant([1, 2, 3])) TensorSpec(shape=(3,), dtype=tf.int32, name=None) Args: tensor: The `tf.Tensor` that should be described. name: A name for the `TensorSpec`. Defaults to `tensor.op.name`. Returns: A `TensorSpec` that describes `tensor`.
github-repos
def get_devices(ads, **kwargs): def _get_device_filter(ad): for k, v in kwargs.items(): if not hasattr(ad, k): return False elif getattr(ad, k) != v: return False return True filtered = filter_devices(ads, _get_device_filter) if not filtered: raise Error( 'Could not find a target device that matches condition: %s.' % kwargs) else: return filtered
Finds a list of AndroidDevice instance from a list that has specific attributes of certain values. Example: get_devices(android_devices, label='foo', phone_number='1234567890') get_devices(android_devices, model='angler') Args: ads: A list of AndroidDevice instances. kwargs: keyword arguments used to filter AndroidDevice instances. Returns: A list of target AndroidDevice instances. Raises: Error: No devices are matched.
juraj-google-style
def _or_join(self, terms): from six import text_type if isinstance(terms, (tuple, list)): if len(terms) > 1: return ' | '.join(text_type(t) for t in terms) else: return terms[0] else: return terms
Joins terms using OR operator. Args: terms (list): terms to join Examples: self._or_join(['term1', 'term2']) -> 'term1 | term2' Returns: str
juraj-google-style
def create_pane(widgets, horizontal, parent_widget=None, compact=False, compact_spacing=2): pane = (parent_widget or QtGui.QWidget()) type_ = (QtGui.QHBoxLayout if horizontal else QtGui.QVBoxLayout) layout = type_() if compact: layout.setSpacing(compact_spacing) layout.setContentsMargins(compact_spacing, compact_spacing, compact_spacing, compact_spacing) for widget in widgets: stretch = 0 if isinstance(widget, tuple): (widget, stretch) = widget if isinstance(widget, int): layout.addSpacing(widget) elif widget: layout.addWidget(widget, stretch) else: layout.addStretch() pane.setLayout(layout) return pane
Create a widget containing an aligned set of widgets. Args: widgets (list of `QWidget`). horizontal (bool). align (str): One of: - 'left', 'right' (horizontal); - 'top', 'bottom' (vertical) parent_widget (`QWidget`): Owner widget, QWidget is created if this is not provided. Returns: `QWidget`
codesearchnet
def AddEventAttribute(self, attribute_name, attribute_value): if attribute_name in self._extra_event_attributes: raise KeyError('Event attribute {0:s} already set'.format( attribute_name)) self._extra_event_attributes[attribute_name] = attribute_value
Adds an attribute that will be set on all events produced. Setting attributes using this method will cause events produced via this mediator to have an attribute with the provided name set with the provided value. Args: attribute_name (str): name of the attribute to add. attribute_value (str): value of the attribute to add. Raises: KeyError: if the event attribute is already set.
juraj-google-style
def send(query, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, ttl=DEFAULT_TTL, local_only=False, timeout_s=2): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) if local_only: sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, struct.pack('!L', LOCALHOST_ADDRESS)) sock.settimeout(timeout_s) sock.sendto(query.encode('utf-8'), (address, port)) recv_queue = queue.Queue() def _handle_responses(): while True: try: (data, address) = sock.recvfrom(MAX_MESSAGE_BYTES) data = data.decode('utf-8') except socket.timeout: recv_queue.put(None) break else: _LOG.debug('Multicast response to query "%s": %s:%s', query, address[0], data) recv_queue.put((address[0], str(data))) response_thread = threading.Thread(target=_handle_responses) response_thread.start() while response_thread.is_alive(): recv_tuple = recv_queue.get() if (not recv_tuple): break (yield recv_tuple) response_thread.join()
Sends a query to the given multicast socket and returns responses. Args: query: The string query to send. address: Multicast IP address component of the socket to send to. port: Multicast UDP port component of the socket to send to. ttl: TTL for multicast messages. 1 to keep traffic in-network. timeout_s: Seconds to wait for responses. Returns: A set of all responses that arrived before the timeout expired. Responses are tuples of (sender_address, message).
codesearchnet
def get_value(self, name=None): raise NotImplementedError('Optional.get_value()')
Returns the value wrapped by this optional. If this optional does not have a value (i.e. `self.has_value()` evaluates to `False`), this operation will raise `tf.errors.InvalidArgumentError` at runtime. >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.get_value()) tf.Tensor(42, shape=(), dtype=int32) Args: name: (Optional.) A name for the created operation. Returns: The wrapped value.
github-repos
def QA_data_tick_resample(tick, type_='1min'): tick = tick.assign(amount=tick.price * tick.vol) resx = pd.DataFrame() _temp = set(tick.index.date) for item in _temp: _data = tick.loc[str(item)] _data1 = _data[time(9, 31):time(11, 30)].resample( type_, closed='right', base=30, loffset=type_ ).apply( { 'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum' } ) _data2 = _data[time(13, 1):time(15, 0)].resample( type_, closed='right', loffset=type_ ).apply( { 'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum' } ) resx = resx.append(_data1).append(_data2) resx.columns = resx.columns.droplevel(0) return resx.reset_index().drop_duplicates().set_index(['datetime', 'code'])
tick采样成任意级别分钟线 Arguments: tick {[type]} -- transaction Returns: [type] -- [description]
juraj-google-style
def _message_received(self, msg): msg = Message.from_node(msg) return self.dispatch(msg)
Callback run when an XMPP Message is reveived. This callback delivers the message to every behaviour that is waiting for it. First, the aioxmpp.Message is converted to spade.message.Message Args: msg (aioxmpp.Messagge): the message just received. Returns: list(asyncio.Future): a list of futures of the append of the message at each matched behaviour.
juraj-google-style
def visualize( logdir, outdir, num_agents, num_episodes, checkpoint=None, env_processes=True): config = utility.load_config(logdir) with tf.device('/cpu:0'): batch_env = utility.define_batch_env( lambda: _create_environment(config, outdir), num_agents, env_processes) graph = utility.define_simulation_graph( batch_env, config.algorithm, config) total_steps = num_episodes * config.max_length loop = _define_loop(graph, total_steps) saver = utility.define_saver( exclude=(r'.*_temporary.*', r'global_step')) sess_config = tf.ConfigProto(allow_soft_placement=True) sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: utility.initialize_variables( sess, saver, config.logdir, checkpoint, resume=True) for unused_score in loop.run(sess, saver, total_steps): pass batch_env.close()
Recover checkpoint and render videos from it. Args: logdir: Logging directory of the trained algorithm. outdir: Directory to store rendered videos in. num_agents: Number of environments to simulate in parallel. num_episodes: Total number of episodes to simulate. checkpoint: Checkpoint name to load; defaults to most recent. env_processes: Whether to step environments in separate processes.
juraj-google-style
def combine_last_two_dimensions(x): x_shape = common_layers.shape_list(x) a, b = x_shape[-2:] return tf.reshape(x, x_shape[:-2] + [a * b])
Reshape x so that the last two dimension become one. Args: x: a Tensor with shape [..., a, b] Returns: a Tensor with shape [..., ab]
juraj-google-style
def with_contest_type(self, contest_type): self._validate_contest_type(contest_type) if contest_type.lower() in ('by', 'by election', 'by-election'): self.contest_type = 'by' return self
Add a contest_type segment Args: contest_type (str): Invoke with ``contest_type='by'`` or ``contest_type='by-election'`` to add a 'by' segment to the ballot_id. Invoking with ``contest_type='election'`` is valid syntax but has no effect. Returns: IdBuilder Raises: ValueError
juraj-google-style
def _ParseValue(value, index, arg, metadata): parse_fn = parser.DefaultParseValue parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default = parse_fns['default'] positional = parse_fns['positional'] named = parse_fns['named'] if index is not None and 0 <= index < len(positional): parse_fn = positional[index] elif arg in named: parse_fn = named[arg] elif default is not None: parse_fn = default return parse_fn(value)
Parses value, a string, into the appropriate type. The function used to parse value is determined by the remaining arguments. Args: value: The string value to be parsed, typically a command line argument. index: The index of the value in the function's argspec. arg: The name of the argument the value is being parsed for. metadata: Metadata about the function, typically from Fire decorators. Returns: value, parsed into the appropriate type for calling a function.
github-repos
def ParseSmsRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) sms_read = self._GetRowValue(query_hash, row, 'read') sms_type = self._GetRowValue(query_hash, row, 'type') event_data = AndroidSMSEventData() event_data.address = self._GetRowValue(query_hash, row, 'address') event_data.body = self._GetRowValue(query_hash, row, 'body') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.sms_read = self.SMS_READ.get(sms_read, 'UNKNOWN') event_data.sms_type = self.SMS_TYPE.get(sms_type, 'UNKNOWN') timestamp = self._GetRowValue(query_hash, row, 'date') date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an SMS row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def __init__(self, parent, discord_token, discord_client_id): super(Frame, self).__init__(parent) logger.debug("Initialising frame") statusbar = StatusBar(self) statusbar.grid(column=0, row=1, sticky="W E S") nav = ttk.Notebook(self) module_frame = ModuleFrame(nav) nav.add(GlobalFrame(nav, discord_token, discord_client_id, module_frame, statusbar), text="Global") nav.add(module_frame, text="Modules") nav.grid(column=0, row=0, sticky="W E N S") def on_closing(): try: from ._client import client if client.loop: asyncio.run_coroutine_threadsafe(client.logout(), client.loop) except RuntimeError: pass except Exception as e: logger.exception(e) parent.destroy() import sys sys.exit(0) parent.protocol("WM_DELETE_WINDOW", on_closing) self.columnconfigure(0, weight=1) self.rowconfigure(0, weight=1) logger.info("Welcome to Modis v{} ({})".format(datatools.version, datatools.version_nickname)) state, response = datatools.get_compare_version() logger.info("{}\n".format(response))
Create a new main window frame. Args: parent: A tk or ttk object
juraj-google-style
def save(self, new_export_dir=None): is_input_text_proto = file_io.file_exists(file_io.join(compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))) if not new_export_dir: new_export_dir = self._export_dir if is_input_text_proto: path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) file_io.write_string_to_file(path, str(self._saved_model)) else: path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file(path, self._saved_model.SerializeToString(deterministic=True)) tf_logging.info('SavedModel written to: %s', compat.as_text(path))
Saves the updated `SavedModel`. Args: new_export_dir: Path where the updated `SavedModel` will be saved. If None, the input `SavedModel` will be overriden with the updates. Raises: errors.OpError: If there are errors during the file save operation.
github-repos
async def remove(self, *, node_id: str, force: bool = False) -> Mapping[str, Any]: params = {"force": force} response = await self.docker._query_json( "nodes/{node_id}".format(node_id=node_id), method="DELETE", params=params ) return response
Remove a node from a swarm. Args: node_id: The ID or name of the node
juraj-google-style
def prepare_image_transforms(element, image_columns): import base64 import cStringIO from PIL import Image from tensorflow.python.lib.io import file_io as tf_file_io from apache_beam.metrics import Metrics img_error_count = Metrics.counter('main', 'ImgErrorCount') img_missing_count = Metrics.counter('main', 'ImgMissingCount') for name in image_columns: uri = element[name] if (not uri): img_missing_count.inc() continue try: with tf_file_io.FileIO(uri, 'r') as f: img = Image.open(f).convert('RGB') except Exception as e: logging.exception('Error processing image %s: %s', uri, str(e)) img_error_count.inc() return output = cStringIO.StringIO() img.save(output, 'jpeg') element[name] = base64.urlsafe_b64encode(output.getvalue()) return element
Replace an images url with its jpeg bytes. Args: element: one input row, as a dict image_columns: list of columns that are image paths Return: element, where each image file path has been replaced by a base64 image.
codesearchnet
def get_function_def(self, name): if is_oss: with c_api_util.tf_buffer() as buffer_: pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_) proto_data = pywrap_tf_session.TF_GetBuffer(buffer_) function_def = function_pb2.FunctionDef() function_def.ParseFromString(proto_data) else: function_def = pywrap_tfe.TFE_ContextGetFunctionDefNoSerialization(self._handle, name) return function_def
Get a function definition from the context. Args: name: function signature name. Returns: The requested FunctionDef. Raises: tf.errors.NotFoundError: if name is not the name of a registered function.
github-repos
def _NthElementGrad(op: ops.Operation, grad): input = op.inputs[0] output = op.outputs[0] indicators = math_ops.cast(math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype) grad = array_ops.expand_dims(grad, -1) num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1) return [math_ops.divide(indicators, num_selected) * grad, None]
Return the gradients for NthElement. Args: op: The NthElementOp for which we need to generate gradients. grad: Tensor. The gradients passed to the NthElementOp Returns: A list of two tensors, the first being the gradient w.r.t. the input, the second being the gradient w.r.t. the N (None).
github-repos
def get_updated(node): if isinstance(node, gast.Assign): return set.union(*(_get_target(target) for target in node.targets)) elif isinstance(node, (gast.For, gast.AugAssign)): return _get_target(node.target) elif isinstance(node, gast.arguments): targets = set(arg.id for arg in node.args + node.kwonlyargs) if node.vararg: targets.add(node.vararg.id) if node.kwarg: targets.add(node.kwarg.id) return targets else: return set()
Return the variable names created or mutated by this statement. This function considers assign statements, augmented assign statements, and the targets of for loops, as well as function arguments. For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and `y`, `for i in range(x)` will return `i`, etc. Args: node: An AST node Returns: A set of variable names (strings) of all the variables created or mutated.
juraj-google-style
def get_collections(self, unit, names=None, merge=False, sampling_rate=None, **entities): nodes = self.get_nodes(unit, entities) var_sets = [] for n in nodes: var_set = list(n.variables.values()) var_set = [v for v in var_set if v.matches_entities(entities)] if (names is not None): var_set = [v for v in var_set if (v.name in names)] if (unit != 'run'): var_set = [v.filter(entities) for v in var_set] var_sets.append(var_set) if merge: var_sets = [list(chain(*var_sets))] results = [] for vs in var_sets: if (not vs): continue if (unit == 'run'): vs = clc.BIDSRunVariableCollection(vs, sampling_rate) else: vs = clc.BIDSVariableCollection(vs) results.append(vs) if merge: return (results[0] if results else None) return results
Retrieve variable data for a specified level in the Dataset. Args: unit (str): The unit of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. names (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current unit. E.g., if unit='subject' and return_type= 'collection', variablesfrom all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If unit='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. entities: Optional constraints used to limit what gets returned. Returns:
codesearchnet
def coords(self): if (self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}): raise AttributeError(_wrong_prop.format(self.type)) x = self._libinput.libinput_event_touch_get_x(self._handle) y = self._libinput.libinput_event_touch_get_y(self._handle) return (x, y)
The current absolute coordinates of the touch event, in mm from the top left corner of the device. To get the corresponding output screen coordinates, use :meth:`transform_coords`. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_MOTION`, this property raises :exc:`AttributeError`. Returns: (float, float): The current absolute (x, y) coordinates. Raises: AttributeError
codesearchnet
def get_intra_op_parallelism_threads(): return context.context().intra_op_parallelism_threads
Get number of threads used within an individual op for parallelism. Certain operations like matrix multiplication and reductions can utilize parallel threads for speed ups. A value of 0 means the system picks an appropriate number. Returns: Number of parallel threads
github-repos
def limit_string_length(string, max_len=50): if max_len is None or len(string) <= max_len: return string else: return '...' + string[len(string) - max_len:]
Limit the length of input string. Args: string: Input string. max_len: (int or None) If int, the length limit. If None, no limit. Returns: Possibly length-limited string.
github-repos
def __init__(self, argv_or_options, command_line=False): argument_parser = make_parser() if command_line: assert isinstance(argv_or_options, list) options = argument_parser.parse_args(argv_or_options) else: if isinstance(argv_or_options, list): raise TypeError('Do not construct an Options object directly; call Options.create() instead.') options = argv_or_options for name, default in _LIBRARY_ONLY_OPTIONS.items(): if not hasattr(options, name): setattr(options, name, default) names = set(vars(options)) opt_map = {k: v.option_strings[-1] for k, v in argument_parser.actions.items() if v.option_strings} try: Postprocessor(names, opt_map, options, self).process() except PostprocessingError as e: if command_line: argument_parser.error(str(e)) else: raise
Parse and encapsulate the configuration options. Also sets up some basic logger configuration. IMPORTANT: If creating an Options object from code, do not construct it directly! Call Options.create() instead. Args: argv_or_options: Either sys.argv[1:] (sys.argv[0] is the main script), or already parsed options object returned by ArgumentParser.parse_args. command_line: Set this to true when argv_or_options == sys.argv[1:]. Raises: sys.exit(2): bad option or input filenames.
github-repos
def query_band(self, value): self._query_band = value if value is None: try: del self._connectionXML.attrib['query-band-spec'] except KeyError: pass else: self._connectionXML.set('query-band-spec', value)
Set the connection's query_band property. Args: value: New query_band value. String. Returns: Nothing.
juraj-google-style
def _ParseRecord(self, parser_mediator, page_data, record_offset): record_header_map = self._GetDataTypeMap('binarycookies_record_header') try: record_header = self._ReadStructureFromByteStream(page_data[record_offset:], record_offset, record_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to map record header data at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception)) event_data = SafariBinaryCookieEventData() event_data.flags = record_header.flags if record_header.url_offset: data_offset = (record_offset + record_header.url_offset) event_data.url = self._ParseCString(page_data, data_offset) if record_header.name_offset: data_offset = (record_offset + record_header.name_offset) event_data.cookie_name = self._ParseCString(page_data, data_offset) if record_header.path_offset: data_offset = (record_offset + record_header.path_offset) event_data.path = self._ParseCString(page_data, data_offset) if record_header.value_offset: data_offset = (record_offset + record_header.value_offset) event_data.cookie_value = self._ParseCString(page_data, data_offset) if record_header.creation_time: date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=record_header.creation_time) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) if record_header.expiration_time: date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=record_header.expiration_time) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) for plugin in self._cookie_plugins: if parser_mediator.abort: break if (event_data.cookie_name != plugin.COOKIE_NAME): continue try: plugin.UpdateChainAndProcess(parser_mediator, cookie_name=event_data.cookie_name, cookie_data=event_data.cookie_value, url=event_data.url) except Exception as exception: parser_mediator.ProduceExtractionWarning('plugin: {0:s} unable to parse cookie with error: {1!s}'.format(plugin.NAME, exception))
Parses a record from the page data. Args: parser_mediator (ParserMediator): parser mediator. page_data (bytes): page data. record_offset (int): offset of the record relative to the start of the page. Raises: ParseError: when the record cannot be parsed.
codesearchnet
def createThread(parent, worker, deleteWorkerLater=False): thread = QtCore.QThread(parent) thread.started.connect(worker.doWork) worker.finished.connect(thread.quit) if deleteWorkerLater: thread.finished.connect(worker.deleteLater) worker.moveToThread(thread) worker.setParent(parent) return thread
Create a new thread for given worker. Args: parent (QObject): parent of thread and worker. worker (ProgressWorker): worker to use in thread. deleteWorkerLater (bool, optional): delete the worker if thread finishes. Returns: QThread
juraj-google-style
def download(url, fname=None): if fname is None: fname = url.split('/')[-1] with contextlib.closing(requests.get(url, stream=True)) as r: try: r.raise_for_status() except requests.exceptions.HTTPError as error: print('Error connecting to URL: "{}"'.format(url)) print(r.text) raise error with open(fname, 'wb') as f: shutil.copyfileobj(r.raw, f) return fname
Downloads a file. Args: url (str): The URL to download. fname (Optional[str]): The filename to store the downloaded file in. If `None`, take the filename from the URL. Defaults to `None`. Returns: The filename the URL was downloaded to. Raises: requests.exceptions.HTTPError: There was a problem connecting to the URL.
juraj-google-style
def set_logical_cpu_devices(self, num_cpus, prefix=''): server_def = self._server_def or self._collective_ops_server_def local_prefix = ['/device'] if server_def is not None: local_prefix.append('/job:%s/replica:0/task:%d' % (server_def.job_name, server_def.task_index)) logical_local_devices = [d for d in self.list_logical_devices('CPU') if d.name.startswith(tuple(local_prefix))] self.ensure_initialized() if len(logical_local_devices) > 1: raise RuntimeError('Virtual CPUs already set, cannot modify again.') pywrap_tfe.TFE_SetLogicalCpuDevices(self._context_handle, num_cpus, prefix) self._initialize_logical_devices()
Set virtual CPU devices in context. If virtual CPU devices are already configured at context initialization by tf.config.set_logical_device_configuration(), this method should not be called. Args: num_cpus: Number of virtual CPUs. prefix: Device name prefix. Raises: RuntimeError: If virtual CPUs are already configured at context initialization.
github-repos
def from_string(string): lines = string.split("\n") toks = lines[0].split() lengths = [float(i) for i in toks] toks = lines[1].split() angles = [float(i) for i in toks[0:3]] latt = Lattice.from_lengths_and_angles(lengths, angles) sp = [] coords = [] for l in lines[4:]: m = re.match( r"\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)", l.strip()) if m: sp.append(m.group(1)) coords.append([float(m.group(i)) for i in range(2, 5)]) return Cssr(Structure(latt, sp, coords))
Reads a string representation to a Cssr object. Args: string (str): A string representation of a CSSR. Returns: Cssr object.
juraj-google-style
def distance(self, other): return np.linalg.norm((other.coords - self.coords))
Get distance between two sites. Args: other: Other site. Returns: Distance (float)
codesearchnet
def template_file(task: Task, template: str, path: str, jinja_filters: FiltersDict=None, **kwargs: Any) -> Result: jinja_filters = (jinja_filters or {} or task.nornir.config.jinja2.filters) text = jinja_helper.render_from_file(template=template, path=path, host=task.host, jinja_filters=jinja_filters, **kwargs) return Result(host=task.host, result=text)
Renders contants of a file with jinja2. All the host data is available in the template Arguments: template: filename path: path to dir with templates jinja_filters: jinja filters to enable. Defaults to nornir.config.jinja2.filters **kwargs: additional data to pass to the template Returns: Result object with the following attributes set: * result (``string``): rendered string
codesearchnet
def _VerifyHMAC(self, comms=None): if self.hmac_type == "SIMPLE_HMAC": msg = comms.encrypted digest = comms.hmac elif self.hmac_type == "FULL_HMAC": msg = b"".join([ comms.encrypted, comms.encrypted_cipher, comms.encrypted_cipher_metadata, comms.packet_iv.SerializeToString(), struct.pack("<I", comms.api_version) ]) digest = comms.full_hmac else: raise DecryptionError("HMAC type no supported.") try: rdf_crypto.HMAC(self.cipher.hmac_key).Verify(msg, digest) except rdf_crypto.VerificationError as e: raise DecryptionError("HMAC verification failed: %s" % e) return True
Verifies the HMAC. This method raises a DecryptionError if the received HMAC does not verify. If the HMAC verifies correctly, True is returned. Args: comms: The comms RdfValue to verify. Raises: DecryptionError: The HMAC did not verify. Returns: True
juraj-google-style
def to_bytesize(value, default_unit=None, base=DEFAULT_BASE): if isinstance(value, (int, float)): return unitized(value, default_unit, base) if value is None: return None try: if value[-1].lower() == "b": value = value[:-1] unit = value[-1:].lower() if unit.isdigit(): unit = default_unit else: value = value[:-1] return unitized(to_number(float, value), unit, base) except (IndexError, TypeError, ValueError): return None
Convert `value` to bytes, accepts notations such as "4k" to mean 4096 bytes Args: value (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS default_unit (str | unicode | None): Default unit to use for unqualified values base (int): Base to use (usually 1024) Returns: (int | None): Deduced bytesize value, if possible
juraj-google-style
def unbatch(): def _apply_fn(dataset): return dataset.unbatch() return _apply_fn
Splits elements of a dataset into multiple elements on the batch dimension. For example, if elements of the dataset are shaped `[B, a0, a1, ...]`, where `B` may vary for each input element, then for each element in the dataset, the unbatched dataset will contain `B` consecutive elements of shape `[a0, a1, ...]`. ```python # NOTE: The following example uses `{ ... }` to represent the contents # of a dataset. a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] } a.unbatch() == { 'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'} ``` Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
github-repos
def get_variable_scope_name(value): value = getattr(value, 'variable_scope', value) if isinstance(value, tf.VariableScope): return value.name elif isinstance(value, six.string_types): return value else: raise ValueError('Not a variable scope: {}'.format(value))
Returns the name of the variable scope indicated by the given value. Args: value: String, variable scope, or object with `variable_scope` attribute (e.g., Sonnet module). Returns: The name (a string) of the corresponding variable scope. Raises: ValueError: If `value` does not identify a variable scope.
codesearchnet
def vstack(tup): arrays = list(tup) for i in range(len(arrays)): if arrays[i].ndim is 1: arrays[i] = arrays[i][np.newaxis, :] return concatenate(tup, axis=0)
Stack arrays in sequence vertically (row wise), handling ``RemoteArray`` and ``DistArray`` without moving data. Args: tup (sequence of array_like) Returns: res: `ndarray`, if inputs were all local `RemoteArray`, if inputs were all on the same remote engine `DistArray`, if inputs were already scattered on different engines
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] else: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id]
Build model inputs from a sequence or a pair of sequence for sequence classification tasks. A sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def add_review(self, reviewer, product, review, date=None): if (not isinstance(reviewer, self._reviewer_cls)): raise TypeError("Type of given reviewer isn't acceptable:", reviewer, ', expected:', self._reviewer_cls) elif (not isinstance(product, self._product_cls)): raise TypeError("Type of given product isn't acceptable:", product, ', expected:', self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r
Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed.
codesearchnet
def session_end_pb(status, end_time_secs=None): if (end_time_secs is None): end_time_secs = time.time() session_end_info = plugin_data_pb2.SessionEndInfo(status=status, end_time_secs=end_time_secs) return _summary(metadata.SESSION_END_INFO_TAG, plugin_data_pb2.HParamsPluginData(session_end_info=session_end_info))
Constructs a SessionEndInfo protobuffer. Creates a summary that contains status information for a completed training session. Should be exported after the training session is completed. One such summary per training session should be created. Each should have a different run. Args: status: A tensorboard.hparams.Status enumeration value denoting the status of the session. end_time_secs: float. The time to use as the session end time. Represented as seconds since the unix epoch. If None uses the current time. Returns: The summary protobuffer mentioned above.
codesearchnet
def calc_shape_step(self, stat_names, time): ti = np.where(self.times == time)[0][0] props = regionprops(self.masks[ti], self.timesteps[ti])[0] shape_stats = [] for stat_name in stat_names: if "moments_hu" in stat_name: hu_index = int(stat_name.split("_")[-1]) hu_name = "_".join(stat_name.split("_")[:-1]) hu_val = np.log(props[hu_name][hu_index]) if np.isnan(hu_val): shape_stats.append(0) else: shape_stats.append(hu_val) else: shape_stats.append(props[stat_name]) return shape_stats
Calculate shape statistics for a single time step Args: stat_names: List of shape statistics calculated from region props time: Time being investigated Returns: List of shape statistics
juraj-google-style
def callEventWaitAndGetRpc(self, callback_id, event_name, timeout_sec): timeout_ms = int(timeout_sec * 1000) try: return self._event_client.eventWaitAndGet(callback_id, event_name, timeout_ms) except Exception as e: if TIMEOUT_ERROR_MESSAGE in str(e): raise errors.CallbackHandlerTimeoutError(self._device, f'Timed out after waiting {timeout_sec}s for event "{event_name}" triggered by {self._method_name} ({self.callback_id}).') from e raise
Waits and returns an existing CallbackEvent for the specified identifier. This function calls snippet lib's eventWaitAndGet RPC. Args: callback_id: str, the callback identifier. event_name: str, the callback name. timeout_sec: float, the number of seconds to wait for the event. Returns: The event dictionary. Raises: errors.CallbackHandlerTimeoutError: The expected event does not occur within the time limit.
github-repos
def GetDefaultToken(token): if (token is None): token = default_token if (not isinstance(token, access_control.ACLToken)): raise access_control.UnauthorizedAccess('Token is not properly specified. It should be an instance of grr.lib.access_control.ACLToken()') return token
Returns the provided token or the default token. Args: token: A token or None. Raises: access_control.UnauthorizedAccess: no token was provided.
codesearchnet
def _wrap_and_check_metrics(self, metrics): if not isinstance(metrics, dict): metrics = {self.METRICS_NAME: metrics} outputs = {} for key, value in metrics.items(): if isinstance(value, tuple): metric_val, metric_op = value else: metric_val = value.result() assert len(value.updates) == 1 metric_op = value.updates[0] key = self._check_output_key(key, self.METRICS_NAME) key = self._prefix_key(key, self.METRICS_NAME) val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX if not isinstance(metric_val, tensor.Tensor): raise ValueError('{} output value must be a Tensor; got {}.'.format(key, metric_val)) if not (tensor_util.is_tensor(metric_op) or isinstance(metric_op, ops.Operation)): raise ValueError('{} update_op must be a Tensor or Operation; got {}.'.format(key, metric_op)) metric_op_tensor = metric_op if not isinstance(metric_op, tensor.Tensor): with ops.control_dependencies([metric_op]): metric_op_tensor = constant_op.constant([], name='metric_op_wrapper') outputs[val_name] = metric_val outputs[op_name] = metric_op_tensor return outputs
Handle the saving of metrics. Metrics is either a tuple of (value, update_op), or a dict of such tuples. Here, we separate out the tuples and create a dict with names to tensors. Args: metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of `Metric` class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Returns: dict of output_names to tensors Raises: ValueError: if the dict key is not a string, or the metric values or ops are not tensors.
github-repos
def file(self, owner=None, **kwargs): return File(self.tcex, owner=owner, **kwargs)
Create the File TI object. Args: owner: **kwargs: Return:
codesearchnet
def _to_ascii(s): from six import text_type, binary_type if isinstance(s, text_type): ascii_ = s.encode('ascii', 'ignore') elif isinstance(s, binary_type): ascii_ = s.decode('utf-8').encode('ascii', 'ignore') else: raise Exception('Unknown text type - {}'.format(type(s))) return ascii_
Converts given string to ascii ignoring non ascii. Args: s (text or binary): Returns: str:
juraj-google-style
def get_property(self, name): for prop in self.resource.properties: if (prop.name == name): return prop raise AttributeError(name)
Return a named property for a resource, if available. Will raise an `AttributeError` if the property does not exist Args: name (str): Name of the property to return Returns: `ResourceProperty`
codesearchnet
def fit_transform(self, X, y): self.target_encoders = [None] * X.shape[1] self.target_mean = y.mean() for i, col in enumerate(X.columns): self.target_encoders[i] = self._get_target_encoder(X[col], y) X.loc[:, col] = X[col].fillna(NAN_INT).map(self.target_encoders[i]).fillna(self.target_mean) return X
Encode categorical columns into average target values. Args: X (pandas.DataFrame): categorical columns to encode y (pandas.Series): the target column Returns: X (pandas.DataFrame): encoded columns
juraj-google-style
def retrieve_object_from_file(file_name, save_key, file_location): shelve_store = None file = __os.path.join(file_location, file_name) try: shelve_store = __shelve.open(file) except Exception as e: LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e)) __sys.exit('Storage dB is not readable, closing App!!') stored_object = shelve_store.get(save_key) shelve_store.close() return stored_object
Function to retrieve objects from a shelve Args: file_name: Shelve storage file name save_key: The name of the key the item is stored in file_location: The location of the file, derive from the os module Returns: Returns the stored object
juraj-google-style
def GetYearFromPosixTime(posix_time, timezone=pytz.UTC): datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone) return datetime_object.year
Gets the year from a POSIX timestamp The POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC. Args: posix_time: An integer containing the number of seconds since 1970-01-01 00:00:00 UTC. timezone: Optional timezone of the POSIX timestamp. Returns: The year of the POSIX timestamp. Raises: ValueError: If the posix timestamp is out of the range of supported values.
codesearchnet
def seek(self, relative_position): self._player_interface.Seek(Int64(1000.0 * 1000 * relative_position)) self.seekEvent(self, relative_position)
Seek the video by `relative_position` seconds Args: relative_position (float): The position in seconds to seek to.
juraj-google-style
def get_completions(prefix, paths=None, family_only=False): op = None if prefix: if (prefix[0] in ('!', '~')): if family_only: return set() op = prefix[0] prefix = prefix[1:] fam = None for ch in ('-', '@', ' if (ch in prefix): if family_only: return set() fam = prefix.split(ch)[0] break words = set() if (not fam): words = set((x.name for x in iter_package_families(paths=paths) if x.name.startswith(prefix))) if (len(words) == 1): fam = iter(words).next() if family_only: return words if fam: it = iter_packages(fam, paths=paths) words.update((x.qualified_name for x in it if x.qualified_name.startswith(prefix))) if op: words = set(((op + x) for x in words)) return words
Get autocompletion options given a prefix string. Example: >>> get_completions("may") set(["maya", "maya_utils"]) >>> get_completions("maya-") set(["maya-2013.1", "maya-2015.0.sp1"]) Args: prefix (str): Prefix to match. paths (list of str): paths to search for packages, defaults to `config.packages_path`. family_only (bool): If True, only match package names, do not include version component. Returns: Set of strings, may be empty.
codesearchnet
async def _check_resolver_ans( self, dns_answer_list, record_name, record_data_list, record_ttl, record_type_code): type_filtered_list = [ ans for ans in dns_answer_list if ans.qtype == record_type_code ] if len(type_filtered_list) != len(record_data_list): return False for rec in type_filtered_list: conditions = [rec.name == record_name, rec.ttl == record_ttl, rec.data in record_data_list] if not all(conditions): return False return True
Check if resolver answer is equal to record data. Args: dns_answer_list (list): DNS answer list contains record objects. record_name (str): Record name. record_data_list (list): List of data values for the record. record_ttl (int): Record time-to-live info. record_type_code (int): Record type code. Returns: boolean indicating if DNS answer data is equal to record data.
juraj-google-style
def start(self, timeout=None): assert self.state == STOPPED, "Process already started" self.state = STARTING should_publish = self._start_controllers( self._controllers.values(), timeout) if should_publish: self._publish_controllers(timeout) self.state = STARTED
Start the process going Args: timeout (float): Maximum amount of time to wait for each spawned process. None means forever
juraj-google-style
def extract_element_internationalized_comment(element): element_entry_comment = get_element_attribute_or_empty(element, 'userLabel') if element_entry_comment == "": try: element_entry_comment = element.getElementsByTagName('string')[0].firstChild.nodeValue except Exception: element_entry_comment = "" if not element_entry_comment.lower().startswith(JT_INTERNATIONALIZED_COMMENT_PREFIX): return None else: return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]
Extracts the xib element's comment, if the element has been internationalized. Args: element (element): The element from which to extract the comment. Returns: The element's internationalized comment, None if it does not exist, or hasn't been internationalized (according to the JTLocalize definitions).
juraj-google-style
def get_output_file_info(filename: str, input_base_dir: str='', out_pattern: t.Optional[str]=None, out_dir: t.Optional[str]=None, formatting: str='') -> OutFileInfo: split_name, ending = os.path.splitext(filename) if ending in GRIB_FILE_ENDINGS or ending in NETCDF_FILE_ENDINGS: filename = split_name else: ending = '' if out_dir and (not formatting): raise ValueError('No formatting specified when using --output-dir.') if out_dir: return OutFileInfo(f'{filename.replace(input_base_dir, out_dir)}', formatting, ending, []) if out_pattern: in_sections = [] path = filename while path: path, tail = os.path.split(path) in_sections.append(tail) return OutFileInfo(out_pattern, '', '', in_sections) raise ValueError('no output specified.')
Construct the base output file name by applying the out_pattern to the filename. Example: filename = 'gs://my_bucket/data_to_split/2020/01/21.nc' out_pattern = 'gs://my_bucket/splits/{2}-{1}-{0}_old_data.' resulting output base = 'gs://my_bucket/splits/2020-01-21_old_data.' resulting file ending = '.nc' Args: filename: input file to be split out_pattern: pattern to apply when creating output file out_dir: directory to replace input base directory formatting: output formatting of split fields. Required when using out_dir, ignored when using out_pattern. input_base_dir: used if out_pattern does not contain any '{}' substitutions. The output file is then created by replacing this part of the input name with the output pattern.
github-repos
def process(self, element): text_line = element.strip() return re.findall("[\\w\\']+", text_line)
Returns an iterator over the words of this element. The element is a line of text. If the line is blank, note that, too. Args: element: the element being processed Returns: The processed element.
github-repos
def create_and_tag_model_card(repo_id: str, tags: Optional[list[str]]=None, token: Optional[str]=None, ignore_metadata_errors: bool=False): try: model_card = ModelCard.load(repo_id, token=token, ignore_metadata_errors=ignore_metadata_errors) except EntryNotFoundError: model_description = 'This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.' card_data = ModelCardData(tags=[] if tags is None else tags, library_name='transformers') model_card = ModelCard.from_template(card_data, model_description=model_description) if tags is not None: if model_card.data.tags is None: model_card.data.tags = [] for model_tag in tags: if model_tag not in model_card.data.tags: model_card.data.tags.append(model_tag) return model_card
Creates or loads an existing model card and tags it. Args: repo_id (`str`): The repo_id where to look for the model card. tags (`List[str]`, *optional*): The list of tags to add in the model card token (`str`, *optional*): Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token. ignore_metadata_errors (`bool`, *optional*, defaults to `False`): If True, errors while parsing the metadata section will be ignored. Some information might be lost during the process. Use it at your own risk.
github-repos
def WriteArtifactsFile(self, artifacts, filename): with open(filename, 'w') as file_object: file_object.write(self.FormatArtifacts(artifacts))
Writes artifact definitions to a file. Args: artifacts (list[ArtifactDefinition]): artifact definitions to be written. filename (str): name of the file to write artifacts to.
codesearchnet
def meminfo(): (vm_total, vm_available, vm_percent, vm_used, vm_free) = psutil.virtual_memory() (swp_total, swp_used, swp_free, swp_percent, _, _) = psutil.swap_memory() def get_unit_value(memory): symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for (i, s) in enumerate(symbols): prefix[s] = (1 << ((i + 1) * 10)) for s in reversed(symbols): if (memory >= prefix[s]): value = (float(memory) / prefix[s]) return {'unit': s, 'value': value} return {'unit': 'B', 'value': memory} return {'VmallocTotal': get_unit_value(vm_total), 'VmallocUsed': get_unit_value(vm_used), 'VmallocFree': get_unit_value(vm_free), 'VmallocAvail': get_unit_value(vm_available), 'SwapTotal': get_unit_value(swp_total), 'SwapUsed': get_unit_value(swp_used), 'SwapFree': get_unit_value(swp_free)}
Return information about physical and virtual memory on the system Returns: dict: A dictionary of information about memory on the system CLI Example: .. code-block:: bash salt * status.meminfo
codesearchnet
def get_servo_status_detail(self): data = [] data.append(9) data.append(self.servoid) data.append(RAM_READ_REQ) data.append(STATUS_DETAIL_RAM) data.append(BYTE1) send_data(data) rxdata = [] try: rxdata = SERPORT.read(12) return (ord(rxdata[9]) & 255) except HerkulexError: raise HerkulexError('could not communicate with motors')
Get the detailed error status of servo This function gets the detailed error status (if any) of the servo Args: none Returns: int: an integer corresponding to the servo status * refer datasheet
codesearchnet
def swd_read8(self, offset): value = self._dll.JLINK_SWD_GetU8(offset) return ctypes.c_uint8(value).value
Gets a unit of ``8`` bits from the input buffer. Args: self (JLink): the ``JLink`` instance offset (int): the offset (in bits) from which to start reading Returns: The integer read from the input buffer.
juraj-google-style
def setitem(self, axis, key, value): def setitem(df, internal_indices=[]): def _setitem(): if (len(internal_indices) == 1): if (axis == 0): df[df.columns[internal_indices[0]]] = value else: df.iloc[internal_indices[0]] = value elif (axis == 0): df[df.columns[internal_indices]] = value else: df.iloc[internal_indices] = value try: _setitem() except ValueError: df = df.copy() _setitem() return df if (axis == 0): numeric_indices = list(self.columns.get_indexer_for([key])) else: numeric_indices = list(self.index.get_indexer_for([key])) prepared_func = self._prepare_method(setitem) if is_list_like(value): new_data = self.data.apply_func_to_select_indices_along_full_axis(axis, prepared_func, numeric_indices, keep_remaining=True) else: new_data = self.data.apply_func_to_select_indices(axis, prepared_func, numeric_indices, keep_remaining=True) return self.__constructor__(new_data, self.index, self.columns)
Set the column defined by `key` to the `value` provided. Args: key: The column name to set. value: The value to set the column to. Returns: A new QueryCompiler
codesearchnet
def set_file_idx_offset(self, file_idx_offset=0): if isinstance(file_idx_offset, int): self.file_idx_offset = file_idx_offset elif file_idx_offset == 'auto': self.file_idx_offset = self.storage.max_file_idx() else: raise ValueError('"file_idx_offset" must be an integer or `auto`')
Set offset of file index. Args: file_idx_offset: It can be either an integer or 'auto'. If set to an integer, the filename will start from ``file_idx_offset`` + 1. If set to ``'auto'``, the filename will start from existing max file index plus 1.
juraj-google-style
def Collect(self, knowledge_base): environment_variable = knowledge_base.GetEnvironmentVariable( 'programdata') allusersprofile = getattr(environment_variable, 'value', None) if not allusersprofile: environment_variable = knowledge_base.GetEnvironmentVariable( 'allusersprofile') allusersprofile = getattr(environment_variable, 'value', None) if allusersprofile: environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='programdata', value=allusersprofile) try: logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format( 'programdata', allusersprofile)) knowledge_base.AddEnvironmentVariable(environment_variable) except KeyError: pass
Collects values from the knowledge base. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. Raises: PreProcessFail: if the preprocessing fails.
juraj-google-style
def op_or(self, *elements): expression = self.add_operator(Operator(',')) for element in elements: expression.add_element(element) return expression
Update the ``Expression`` by joining the specified additional ``elements`` using an "OR" ``Operator`` Args: *elements (BaseExpression): The ``Expression`` and/or ``Constraint`` elements which the "OR" ``Operator`` applies to. Returns: Expression: ``self`` or related ``Expression``.
juraj-google-style
def get_proj_info(self, token): r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json()
Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info
juraj-google-style
def get_stored_metadata(self, temp_ver): with open(self._prefixed(('%s.metadata' % temp_ver.name))) as f: return json.load(f)
Retrieves the metadata for the given template version from the store Args: temp_ver (TemplateVersion): template version to retrieve the metadata for Returns: dict: the metadata of the given template version
codesearchnet
def find_documents(self, sentence, limit=None, must_sort=True, search_type='fuzzy'): sentence = sentence.strip() sentence = strip_accents(sentence) if sentence == u"": return self.get_all_docs() result_list_list = [] total_results = 0 for query_parser in self.search_param_list[search_type]: query = query_parser["query_parser"].parse(sentence) sortedby = None if must_sort and "sortedby" in query_parser: sortedby = query_parser['sortedby'] if sortedby: results = self.__searcher.search( query, limit=limit, sortedby=sortedby ) else: results = self.__searcher.search( query, limit=limit ) results = [ (result['docid'], result['doctype']) for result in results ] result_list_list.append(results) total_results += len(results) if not must_sort and total_results >= limit: break docs = set() for result_intermediate in result_list_list: for result in result_intermediate: doc = self._docs_by_id.get(result[0]) if doc is None: continue docs.add(doc) docs = [d for d in docs] if not must_sort and limit is not None: docs = docs[:limit] return docs
Returns all the documents matching the given keywords Arguments: sentence --- a sentenced query Returns: An array of document (doc objects)
juraj-google-style
def _apply_mask(self): w = self._w w_shape = w.get_shape() mask_shape = self._mask.get_shape() if (mask_shape.ndims > w_shape.ndims): raise base.IncompatibleShapeError('Invalid mask shape: {}. Max shape: {}'.format(mask_shape.ndims, len(self._data_format))) if (mask_shape != w_shape[:mask_shape.ndims]): raise base.IncompatibleShapeError('Invalid mask shape: {}. Weight shape: {}'.format(mask_shape, w_shape)) while (self._mask.get_shape().ndims < w_shape.ndims): self._mask = tf.expand_dims(self._mask, (- 1)) w = (w * self._mask) return w
Applies the passed-in mask to the convolution matrix. Returns: w: A copy of the convolution matrix that has had the mask applied. Raises: base.IncompatibleShapeError: If the mask shape has more dimensions than the weight matrix. base.IncompatibleShapeError: If the mask and the weight matrix don't match on shape.
codesearchnet
def get_path_spec(self, path, action=None): path_spec = None path_name = None for base_path in self.paths.keys(): if path == base_path: path_spec = self.paths[base_path] path_name = base_path if path_spec is None: for base_path in self.paths.keys(): regex_from_path = re.compile(re.sub('{[^/]*}', '([^/]*)', base_path) + r'$') if re.match(regex_from_path, path): path_spec = self.paths[base_path] path_name = base_path if path_spec is not None and action is not None: if action not in path_spec.keys(): return (None, None) else: path_spec = path_spec[action] return (path_name, path_spec)
Get the specification matching with the given path. Args: path: path we want the specification. action: get the specification for the given action. Returns: A tuple with the base name of the path and the specification. Or (None, None) if no specification is found.
juraj-google-style
def search(self, term): return self._result( self._get(self._url("/images/search"), params={'term': term}), True )
Search for images on Docker Hub. Similar to the ``docker search`` command. Args: term (str): A term to search for. Returns: (list of dicts): The response of the search. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def GetProperties(cls, path_spec): properties = {} for property_name in cls.PROPERTY_NAMES: if hasattr(path_spec, property_name): properties[property_name] = getattr(path_spec, property_name) return properties
Retrieves a dictionary containing the path specification properties. Args: path_spec (PathSpec): path specification. Returns: dict[str, str]: path specification properties. Raises: dict: path specification properties.
codesearchnet
def etherscan_verify_contract(chain_id: int, apikey: str, source_module: DeploymentModule, contract_name: str): etherscan_api = api_of_chain_id[chain_id] deployment_info = get_contracts_deployment_info(chain_id=chain_id, module=source_module) if (deployment_info is None): raise FileNotFoundError(f'Deployment file not found for chain_id={chain_id} and module={source_module}') contract_manager = ContractManager(contracts_precompiled_path()) data = post_data_for_etherscan_verification(apikey=apikey, deployment_info=deployment_info['contracts'][contract_name], source=join_sources(source_module=source_module, contract_name=contract_name), contract_name=contract_name, metadata=json.loads(contract_manager.contracts[contract_name]['metadata']), constructor_args=get_constructor_args(deployment_info=deployment_info, contract_name=contract_name, contract_manager=contract_manager)) response = requests.post(etherscan_api, data=data) content = json.loads(response.content.decode()) print(content) print(f"Status: {content['status']}; {content['message']} ; GUID = {content['result']}") etherscan_url = etherscan_api.replace('api-', '').replace('api', '') etherscan_url += ('/verifyContract2?a=' + data['contractaddress']) manual_submission_guide = f if (content['status'] != '1'): if (content['result'] == 'Contract source code already verified'): return else: raise ValueError(('Etherscan submission failed for an unknown reason\n' + manual_submission_guide)) guid = content['result'] status = '0' retries = 10 while ((status == '0') and (retries > 0)): retries -= 1 r = guid_status(etherscan_api=etherscan_api, guid=guid) status = r['status'] if (r['result'] == 'Fail - Unable to verify'): raise ValueError(manual_submission_guide) if (r['result'] == 'Pass - Verified'): return print('Retrying...') sleep(5) raise TimeoutError(manual_submission_guide)
Calls Etherscan API for verifying the Solidity source of a contract. Args: chain_id: EIP-155 chain id of the Ethereum chain apikey: key for calling Etherscan API source_module: a module name to look up contracts_source_path() contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
codesearchnet