code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _has_next_page(self): if (self.page_number == 0): return True if (self.max_results is not None): if (self.num_results >= self.max_results): return False return (True if self.next_page_token else False)
Determines whether or not there are more pages with results. Returns: bool: Whether the iterator has more pages.
codesearchnet
def _read_wrappers(self, name): io_attr = getattr(self._io, name) def read_wrapper(*args, **kwargs): self._io.seek(self._read_seek, self._read_whence) ret_value = io_attr(*args, **kwargs) self._read_seek = self._io.tell() self._read_whence = 0 self._io.seek(0, 2) return ret_value return read_wrapper
Wrap a stream attribute in a read wrapper. Returns a read_wrapper which tracks our own read pointer since the stream object has no concept of a different read and write pointer. Args: name: The name of the attribute to wrap. Should be a read call. Returns: The read_wrapper function.
juraj-google-style
def run(self, include_reset=True, accelerated=True): self._start_tick = self.tick_count if self._check_stop_conditions(self.sensor_graph): return if include_reset: pass i = None for (i, stim) in enumerate(self.stimuli): if (stim.time != 0): break reading = IOTileReading(self.tick_count, stim.stream.encode(), stim.value) self.sensor_graph.process_input(stim.stream, reading, self.rpc_executor) if ((i is not None) and (i > 0)): self.stimuli = self.stimuli[i:] while (not self._check_stop_conditions(self.sensor_graph)): now = monotonic() next_tick = (now + 1.0) self.tick_count += 1 i = None for (i, stim) in enumerate(self.stimuli): if (stim.time != self.tick_count): break reading = IOTileReading(self.tick_count, stim.stream.encode(), stim.value) self.sensor_graph.process_input(stim.stream, reading, self.rpc_executor) if ((i is not None) and (i > 0)): self.stimuli = self.stimuli[i:] self._check_additional_ticks(self.tick_count) if ((self.tick_count % 10) == 0): reading = IOTileReading(self.tick_count, system_tick.encode(), self.tick_count) self.sensor_graph.process_input(system_tick, reading, self.rpc_executor) reading = IOTileReading(self.tick_count, battery_voltage.encode(), int((self.voltage * 65536))) self.sensor_graph.process_input(battery_voltage, reading, self.rpc_executor) now = monotonic() if ((not accelerated) and (now < next_tick)): time.sleep((next_tick - now))
Run this sensor graph until a stop condition is hit. Multiple calls to this function are useful only if there has been some change in the stop conditions that would cause the second call to not exit immediately. Args: include_reset (bool): Start the sensor graph run with a reset event to match what would happen when an actual device powers on. accelerated (bool): Whether to run this sensor graph as fast as possible or to delay tick events to simulate the actual passage of wall clock time.
codesearchnet
def disconnect_async(self, conn_id, callback): found_handle = None for handle, conn in self._connections.items(): if conn['connection_id'] == conn_id: found_handle = handle if found_handle is None: callback(conn_id, self.id, False, 'Invalid connection_id') return self._command_task.async_command(['_disconnect', found_handle], self._on_disconnect, {'connection_id': conn_id, 'handle': found_handle, 'callback': callback})
Asynchronously disconnect from a device that has previously been connected Args: conn_id (int): a unique identifier for this connection on the DeviceManager that owns this adapter. callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason) when the disconnection finishes. Disconnection can only either succeed or timeout.
juraj-google-style
def set_dtype_conversion_mode(dtype_conversion_mode) -> None: global _dtype_conversion_mode _dtype_conversion_mode = _get_promo_mode_enum(dtype_conversion_mode)
Enables the specified dtype conversion mode. Args: dtype_conversion_mode: a string that specifies dtype conversion mode. This string corresponds to a PromoMode Enum and can be 'off', 'legacy', 'safe' or 'all'.
github-repos
def register(self, username=""): if not username: username = utils.mxid2localpart(self.identity) content = { "type": "m.login.application_service", "username": username, } return self._send("POST", "/register", content, api_path=MATRIX_V2_API_PATH)
Performs /register with type: m.login.application_service Args: username(str): Username to register.
juraj-google-style
def _read_ipv6_opts_options(self, length): counter = 0 optkind = list() options = dict() while (counter < length): code = self._read_unpack(1) if (not code): break (abbr, desc) = _IPv6_Opts_OPT.get(code, ('None', 'Unassigned')) data = _IPv6_Opts_PROC(abbr)(self, code, desc=desc) enum = _OPT_TYPE.get(code) counter += data['length'] if (enum in optkind): if isinstance(options[abbr], tuple): options[abbr] += (Info(data),) else: options[abbr] = (Info(options[abbr]), Info(data)) else: optkind.append(enum) options[abbr] = data if (counter != length): raise ProtocolError(f'{self.alias}: invalid format') return (tuple(optkind), options)
Read IPv6_Opts options. Positional arguments: * length -- int, length of options Returns: * dict -- extracted IPv6_Opts options
codesearchnet
def lists(self, **kwargs): path = self._get_path('lists') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the top-level lists available from the API. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def _get_client_by_id(self, client_id): client = self.grr_api.Client(client_id) print('Checking for client approval') self._check_approval_wrapper(client, client.ListFlows) print('{0:s}: Client approval is valid'.format(client_id)) return client.Get()
Get GRR client dictionary and make sure valid approvals exist. Args: client_id: GRR client ID. Returns: GRR API Client object
codesearchnet
def _write_submit_script(self, template, script_filename, job_name, configs): try: submit_script = Template(template).substitute(jobname=job_name, **configs) with open(script_filename, 'w') as f: f.write(submit_script) except KeyError as e: logger.error('Missing keys for submit script : %s', e) raise SchedulerMissingArgs(e.args, self.sitename) except IOError as e: logger.error('Failed writing to submit script: %s', script_filename) raise ScriptPathError(script_filename, e) except Exception as e: print('Template : ', template) print('Args : ', job_name) print('Kwargs : ', configs) logger.error('Uncategorized error: %s', e) raise e return True
Generate submit script and write it to a file. Args: - template (string) : The template string to be used for the writing submit script - script_filename (string) : Name of the submit script - job_name (string) : job name - configs (dict) : configs that get pushed into the template Returns: - True: on success Raises: SchedulerMissingArgs : If template is missing args ScriptPathError : Unable to write submit script out
codesearchnet
def depth(self, local: bool=True) -> int: G = self.graph if (not local): def remove_local(dagc: DAGCircuit) -> Generator[(Operation, None, None)]: for elem in dagc: if (dagc.graph.degree[elem] > 2): (yield elem) G = DAGCircuit(remove_local(self)).graph return (nx.dag_longest_path_length(G) - 1)
Return the circuit depth. Args: local: If True include local one-qubit gates in depth calculation. Else return the multi-qubit gate depth.
codesearchnet
def mkfile_uchroot(filepath, root="."): from benchbuild.utils.uchroot import no_args, uretry uchroot = no_args() uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"] uchroot = uchroot[os.path.abspath(root)] uretry(uchroot["--", "/bin/touch", filepath])
Create a file inside a uchroot env. You will want to use this when you need to create a file with apropriate rights inside a uchroot container with subuid/subgid handling enabled. Args: filepath: The filepath that should be created. Absolute inside the uchroot container. root: The root PATH of the container filesystem as seen outside of the container.
juraj-google-style
def locate_point(nodes, x_val, y_val): zero1 = (_curve_helpers.full_reduce(nodes[([0], :)]) - x_val) zero2 = (_curve_helpers.full_reduce(nodes[([1], :)]) - y_val) if (zero1.shape[1] > zero2.shape[1]): (zero1, zero2) = (zero2, zero1) if (zero1.shape[1] == 1): (zero1, zero2) = (zero2, zero1) power_basis1 = poly_to_power_basis(zero1[(0, :)]) all_roots = roots_in_unit_interval(power_basis1) if (all_roots.size == 0): return None power_basis2 = normalize_polynomial(poly_to_power_basis(zero2[(0, :)])) near_zero = np.abs(polynomial.polyval(all_roots, power_basis2)) index = np.argmin(near_zero) if (near_zero[index] < _ZERO_THRESHOLD): return all_roots[index] return None
r"""Find the parameter corresponding to a point on a curve. .. note:: This assumes that the curve :math:`B(s, t)` defined by ``nodes`` lives in :math:`\mathbf{R}^2`. Args: nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve. x_val (float): The :math:`x`-coordinate of the point. y_val (float): The :math:`y`-coordinate of the point. Returns: Optional[float]: The parameter on the curve (if it exists).
codesearchnet
def ic45(msg): d = hex2bin(data(msg)) if d[9] == '0': return None ic = bin2int(d[10:12]) return ic
Icing. Args: msg (String): 28 bytes hexadecimal message string Returns: int: Icing level. 0=NIL, 1=Light, 2=Moderate, 3=Severe
juraj-google-style
def reset_trial(self, trial, new_config, new_experiment_tag): trial.experiment_tag = new_experiment_tag trial.config = new_config trainable = trial.runner with warn_if_slow("reset_config"): reset_val = ray.get(trainable.reset_config.remote(new_config)) return reset_val
Tries to invoke `Trainable.reset_config()` to reset trial. Args: trial (Trial): Trial to be reset. new_config (dict): New configuration for Trial trainable. new_experiment_tag (str): New experiment name for trial. Returns: True if `reset_config` is successful else False.
juraj-google-style
def get_context_from_cmdln(args, desc='Run scriptworker'): context = Context() parser = argparse.ArgumentParser(description=desc) parser.add_argument('config_path', type=str, nargs='?', default='scriptworker.yaml', help='the path to the config file') parsed_args = parser.parse_args(args) (context.config, credentials) = create_config(config_path=parsed_args.config_path) update_logging_config(context) return (context, credentials)
Create a Context object from args. Args: args (list): the commandline args. Generally sys.argv Returns: tuple: ``scriptworker.context.Context`` with populated config, and credentials frozendict
codesearchnet
def usergroups_users_update( self, *, usergroup: str, users: List[str], **kwargs ) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"usergroup": usergroup, "users": users}) return self.api_call("usergroups.users.update", json=kwargs)
Update the list of users for a User Group Args: usergroup (str): The encoded ID of the User Group to update. e.g. 'S0604QSJC' users (list): A list user IDs that represent the entire list of users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
juraj-google-style
def video_augmentation(features, hue=False, saturate=False, contrast=False): inputs, targets = features["inputs"], features["targets"] in_steps = common_layers.shape_list(inputs)[0] video = tf.concat((inputs, targets), axis=0) if hue: video = tf.image.random_hue(video, max_delta=0.2) if saturate: video = tf.image.random_saturation(video, lower=0.5, upper=1.5) if contrast: video = tf.image.random_contrast(video, lower=0.5, upper=1.5) features["inputs"], features["targets"] = video[:in_steps], video[in_steps:] return features
Augments video with optional hue, saturation and constrast. Args: features: dict, with keys "inputs", "targets". features["inputs"], 4-D Tensor, shape=(THWC) features["targets"], 4-D Tensor, shape=(THWC) hue: bool, apply hue_transform. saturate: bool, apply saturation transform. contrast: bool, apply constrast transform. Returns: augment_features: dict with transformed "inputs" and "targets".
juraj-google-style
def _find_workflows(mcs, attrs): workflows = {} for (attribute, value) in attrs.items(): if isinstance(value, Workflow): workflows[attribute] = StateField(value) return workflows
Finds all occurrences of a workflow in the attributes definitions. Returns: dict(str => StateField): maps an attribute name to a StateField describing the related Workflow.
codesearchnet
def search_features(self, search): if isinstance(search, string_types): search = [search] search = [s.replace('*', '.*') for s in search] cols = list(self.data.columns) results = [] for s in search: results.extend([f for f in cols if re.match(s + '$', f)]) return list(set(results))
Returns all features that match any of the elements in the input list. Args: search (str, list): A string or list of strings defining the query. Returns: A list of matching feature names.
juraj-google-style
def _ParseTensorName(tensor_name): components = tensor_name.split(':') if len(components) == 2: try: output_index = int(components[1]) except ValueError: raise ValueError(f'Cannot convert {tensor_name!r} to a tensor name. Second component of the name following the `:` should be an int. Got {components[1]}.') return (components[0], output_index) elif len(components) == 1: return (components[0], 0) else: raise ValueError(f"Cannot convert '{tensor_name}' to a tensor name. Tensor names should not contain more than 1 `:`. Obtained {len(components) - 1}")
Parses a tensor name into an operation name and output index. This function will canonicalize tensor names as follows: * "foo:0" -> ("foo", 0) * "foo:7" -> ("foo", 7) * "foo" -> ("foo", 0) * "foo:bar:baz" -> ValueError Args: tensor_name: The name of a tensor. Returns: A tuple containing the operation name, and the output index. Raises: ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.
github-repos
def _run_single(self, thread_id, agent, environment, deterministic=False, max_episode_timesteps=(- 1), episode_finished=None, testing=False, sleep=None): old_episode_finished = False if ((episode_finished is not None) and (len(getargspec(episode_finished).args) == 1)): old_episode_finished = True episode = 0 while (not self.should_stop): state = environment.reset() agent.reset() (self.global_timestep, self.global_episode) = (agent.timestep, agent.episode) episode_reward = 0 time_step = 0 time_start = time.time() while True: (action, internals, states) = agent.act(states=state, deterministic=deterministic, buffered=False) reward = 0 for repeat in xrange(self.repeat_actions): (state, terminal, step_reward) = environment.execute(action=action) reward += step_reward if terminal: break if (not testing): agent.atomic_observe(states=state, actions=action, internals=internals, reward=reward, terminal=terminal) if (sleep is not None): time.sleep(sleep) time_step += 1 episode_reward += reward if (terminal or (time_step == max_episode_timesteps)): break if self.should_stop: return self.global_timestep += time_step self.episode_list_lock.acquire() self.episode_rewards.append(episode_reward) self.episode_timesteps.append(time_step) self.episode_times.append((time.time() - time_start)) self.episode_list_lock.release() if (episode_finished is not None): if old_episode_finished: summary_data = {'thread_id': thread_id, 'episode': episode, 'timestep': time_step, 'episode_reward': episode_reward} if (not episode_finished(summary_data)): return elif (not episode_finished(self, thread_id)): return episode += 1
The target function for a thread, runs an agent and environment until signaled to stop. Adds rewards to shared episode rewards list. Args: thread_id (int): The ID of the thread that's running this target function. agent (Agent): The Agent object that this particular thread uses. environment (Environment): The Environment object that this particular thread uses. max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes. episode_finished (callable): Function called after each episode that takes an episode summary spec and returns False, if this single run should terminate after this episode. Can be used e.g. to set a particular mean reward threshold.
codesearchnet
def _MergeIdentical(self, a, b): if a != b: raise MergeError("values must be identical ('%s' vs '%s')" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))) return b
Tries to merge two values. The values are required to be identical. Args: a: The first value. b: The second value. Returns: The trivially merged value. Raises: MergeError: The values were not identical.
juraj-google-style
def element(self, using, value): return self._execute(Command.FIND_ELEMENT, { 'using': using, 'value': value })
Find an element in the current context. Support: Android iOS Web(WebView) Args: using(str): The element location strategy. value(str): The value of the location strategy. Returns: WebElement Object. Raises: WebDriverException.
juraj-google-style
def filter_by_analysis_period(self, analysis_period): self._check_analysis_period(analysis_period) _filtered_data = self.filter_by_moys(analysis_period.moys) _filtered_data.header._analysis_period = analysis_period return _filtered_data
Filter a Data Collection based on an analysis period. Args: analysis period: A Ladybug analysis period Return: A new Data Collection with filtered data
codesearchnet
def word_fts(self, word): return list(map(self.fts, self.segs(word)))
Return featural analysis of `word` Args: word (unicode): one or more IPA segments Returns: list: list of lists (value, feature) tuples where each inner list corresponds to a segment in `word`
juraj-google-style
def path_walk( p: tcod.path.AStar, recompute: bool ) -> Union[Tuple[int, int], Tuple[None, None]]: x = ffi.new("int *") y = ffi.new("int *") if lib.TCOD_path_walk(p._path_c, x, y, recompute): return x[0], y[0] return None, None
Return the next (x, y) point in a path, or (None, None) if it's empty. When ``recompute`` is True and a previously valid path reaches a point where it is now blocked, a new path will automatically be found. Args: p (AStar): An AStar instance. recompute (bool): Recompute the path automatically. Returns: Union[Tuple[int, int], Tuple[None, None]]: A single (x, y) point, or (None, None)
juraj-google-style
def apply_region_configs(env_config): new_config = env_config.copy() for region in env_config.get('regions', REGIONS): if isinstance(env_config.get('regions'), dict): region_specific_config = env_config['regions'][region] new_config[region] = dict(DeepChainMap(region_specific_config, env_config)) else: new_config[region] = env_config.copy() LOG.debug('Region Specific Config:\n%s', new_config) return new_config
Override default env configs with region specific configs and nest all values under a region Args: env_config (dict): The environment specific config. Return: dict: Newly updated dictionary with region overrides applied.
juraj-google-style
def read(self, face, *, alignment=1) -> bytes: return self.mglo.read(face, alignment)
Read a face from the cubemap texture. Args: face (int): The face to read. Keyword Args: alignment (int): The byte alignment of the pixels.
juraj-google-style
def get_merged_env(self, include_os=False): env = {} if include_os: env.update(os.environ.copy()) for level in range(3): env.update(self.pipeline.data.env_list[level].copy()) return env
Copying and merging environment variables. Args: include_os (bool): when true then include the environment variables (default: False) Returns: dict: environment variables as defined in the pipeline (optional including system environment variables).
juraj-google-style
def set_permitted_ip(address=None, deploy=False): if not address: raise CommandExecutionError("Address option must not be empty.") ret = {} query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip', 'element': '<entry name=\'{0}\'></entry>'.format(address)} ret.update(__proxy__['panos.call'](query)) if deploy is True: ret.update(commit()) return ret
Add an IPv4 address or network to the permitted IP list. CLI Example: Args: address (str): The IPv4 address or network to allow access to add to the Palo Alto device. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_permitted_ip 10.0.0.1 salt '*' panos.set_permitted_ip 10.0.0.0/24 salt '*' panos.set_permitted_ip 10.0.0.1 deploy=True
juraj-google-style
def StatFS(self, path=None): if platform.system() == "Windows": raise RuntimeError("os.statvfs not available on Windows") local_path = client_utils.CanonicalPathToLocalPath(path or self.path) return os.statvfs(local_path)
Call os.statvfs for a given list of rdf_paths. OS X and Linux only. Note that a statvfs call for a network filesystem (e.g. NFS) that is unavailable, e.g. due to no network, will result in the call blocking. Args: path: a Unicode string containing the path or None. If path is None the value in self.path is used. Returns: posix.statvfs_result object Raises: RuntimeError: if called on windows
juraj-google-style
def _PromptUserForPartitionIdentifiers(self, volume_system, volume_identifiers): print_header = True while True: if print_header: self._PrintTSKPartitionIdentifiersOverview(volume_system, volume_identifiers) print_header = False lines = self._textwrapper.wrap(self._USER_PROMPT_TSK) self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\nPartition identifiers: ') try: selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='p') if (selected_volumes and (not set(selected_volumes).difference(volume_identifiers))): break except ValueError: pass self._output_writer.Write('\n') lines = self._textwrapper.wrap('Unsupported partition identifier(s), please try again or abort with Ctrl^C.') self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\n') return selected_volumes
Prompts the user to provide partition identifiers. Args: volume_system (dfvfs.TSKVolumeSystem): volume system. volume_identifiers (list[str]): volume identifiers including prefix. Returns: list[str]: selected volume identifiers including prefix or None.
codesearchnet
def write_data(data): sorted_dict = sort_recursive(data) with open(_datafile, 'w') as file: _json.dump(sorted_dict, file, indent=2)
Write the data to the data.json file Args: data (dict): The updated data dictionary for Modis
codesearchnet
def get_tensor_device(self, tensor_name): tensor = self._name_to_tensor(tensor_name) if isinstance(tensor, tf.Tensor): return tensor.device else: return None
The device of a tensor. Note that only tf tensors have device assignments. Args: tensor_name: a string, name of a tensor in the graph. Returns: a string or None, representing the device name.
codesearchnet
def _add_op_node(self, op, qargs, cargs, condition=None): node_properties = {'type': 'op', 'op': op, 'name': op.name, 'qargs': qargs, 'cargs': cargs, 'condition': condition} self._max_node_id += 1 new_node = DAGNode(data_dict=node_properties, nid=self._max_node_id) self._multi_graph.add_node(new_node) self._id_to_node[self._max_node_id] = new_node
Add a new operation node to the graph and assign properties. Args: op (Instruction): the operation associated with the DAG node qargs (list): list of quantum wires to attach to. cargs (list): list of classical wires to attach to. condition (tuple or None): optional condition (ClassicalRegister, int)
codesearchnet
def set_load_handler(load_handler: Optional[Callable[..., Any]]) -> Optional[Callable[..., Any]]: if load_handler and (not callable(load_handler)): raise ValueError('`load_handler` must be callable.') global _LOAD_HANDLER old_handler = _LOAD_HANDLER _LOAD_HANDLER = load_handler return old_handler
Sets global load handler. Args: load_handler: A callable object that takes arbitrary arguments and returns a value. `symbolic.load` method will pass through all arguments to this handler and return its return value. Returns: Previous global load handler.
github-repos
def request(self, batch: Sequence[Any], model: aiplatform.Endpoint, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]: prediction = model.predict(instances=list(batch), parameters=inference_args) return utils._convert_to_result(batch, prediction.predictions, prediction.deployed_model_id)
Sends a prediction request to a Vertex AI endpoint containing batch of inputs and matches that input with the prediction response from the endpoint as an iterable of PredictionResults. Args: batch: a sequence of any values to be passed to the Vertex AI endpoint. Should be encoded as the model expects. model: an aiplatform.Endpoint object configured to access the desired model. inference_args: any additional arguments to send as part of the prediction request. Returns: An iterable of Predictions.
github-repos
def commutator(A, B=None): if B: return ((A * B) - (B * A)) return (SPre(A) - SPost(A))
Commutator of `A` and `B` If ``B != None``, return the commutator :math:`[A,B]`, otherwise return the super-operator :math:`[A,\cdot]`. The super-operator :math:`[A,\cdot]` maps any other operator ``B`` to the commutator :math:`[A, B] = A B - B A`. Args: A: The first operator to form the commutator of. B: The second operator to form the commutator of, or None. Returns: SuperOperator: The linear superoperator :math:`[A,\cdot]`
codesearchnet
def _locate_point(nodes, degree, x_val, y_val): candidates = [(1.0, 1.0, 1.0, nodes)] for _ in six.moves.xrange((MAX_LOCATE_SUBDIVISIONS + 1)): next_candidates = [] for candidate in candidates: update_locate_candidates(candidate, next_candidates, x_val, y_val, degree) candidates = next_candidates if (not candidates): return None (s_approx, t_approx) = mean_centroid(candidates) (s, t) = newton_refine(nodes, degree, x_val, y_val, s_approx, t_approx) actual = _surface_helpers.evaluate_barycentric(nodes, degree, ((1.0 - s) - t), s, t) expected = np.asfortranarray([x_val, y_val]) if (not _helpers.vector_close(actual.ravel(order='F'), expected, eps=LOCATE_EPS)): (s, t) = newton_refine(nodes, degree, x_val, y_val, s, t) return (s, t)
r"""Locate a point on a surface. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Does so by recursively subdividing the surface and rejecting sub-surfaces with bounding boxes that don't contain the point. After the sub-surfaces are sufficiently small, uses Newton's method to narrow in on the pre-image of the point. Args: nodes (numpy.ndarray): Control points for B |eacute| zier surface (assumed to be two-dimensional). degree (int): The degree of the surface. x_val (float): The :math:`x`-coordinate of a point on the surface. y_val (float): The :math:`y`-coordinate of a point on the surface. Returns: Optional[Tuple[float, float]]: The :math:`s` and :math:`t` values corresponding to ``x_val`` and ``y_val`` or :data:`None` if the point is not on the ``surface``.
codesearchnet
def get_structure_by_formula(self, formula, **kwargs): structures = [] sql = 'select file, sg from data where formula="- %s -"' % \ Composition(formula).hill_formula text = self.query(sql).split("\n") text.pop(0) for l in text: if l.strip(): cod_id, sg = l.split("\t") r = requests.get("http: % cod_id.strip()) try: s = Structure.from_str(r.text, fmt="cif", **kwargs) structures.append({"structure": s, "cod_id": int(cod_id), "sg": sg}) except Exception: import warnings warnings.warn("\nStructure.from_str failed while parsing CIF file:\n%s" % r.text) raise return structures
Queries the COD for structures by formula. Requires mysql executable to be in the path. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A list of dict of the format [{"structure": Structure, "cod_id": cod_id, "sg": "P n m a"}]
juraj-google-style
def Substitute(self, pattern): if isinstance(pattern, bytes): substs = [re.escape(subst.encode('ascii')) for subst in self._substs] regex = re.compile(b'|'.join(substs)) def Replacement(match): key = match.group(0).decode('ascii') return self._substs[key].encode('utf-8') elif isinstance(pattern, Text): substs = [re.escape(subst) for subst in self._substs] regex = re.compile('|'.join(substs)) def Replacement(match): key = match.group(0) return self._substs[key] else: raise TypeError("Unexpected pattern type '{}'".format(type(pattern))) if (not substs): return pattern else: return regex.sub(Replacement, pattern)
Formats given pattern with this substitution environment. A pattern can contain placeholders for variables (`%%foo%%`) and scopes (`%%bar.baz%%`) that are replaced with concrete values in this substiution environment (specified in the constructor). Args: pattern: A pattern with placeholders to substitute. Returns: A pattern with placeholders substituted with concrete values.
codesearchnet
def assert_almost_equal(first, second, places=None, msg=None, delta=None, extras=None): _call_unittest_assertion(_pyunit_proxy.assertAlmostEqual, first, second, places=places, msg=msg, delta=delta, extras=extras)
Asserts that first is almost equal to second. Fails if the two objects are unequal as determined by their difference rounded to the given number of decimal places (default 7) and comparing to zero, or by comparing that the difference between the two objects is more than the given delta. If the two objects compare equal then they automatically compare almost equal. Args: first: The first value to compare. second: The second value to compare. places: How many decimal places to take into account for comparison. Note that decimal places (from zero) are usually not the same as significant digits (measured from the most significant digit). msg: A string that adds additional info about the failure. delta: Delta to use for comparison instead of decimal places. extras: An optional field for extra information to be included in test result.
github-repos
def document(obj, doc): try: obj.__doc__ = doc except AttributeError: _EXTRA_DOCS[id(obj)] = doc
Adds a docstring to typealias by overriding the `__doc__` attribute. Note: Overriding `__doc__` is only possible after python 3.7. Args: obj: Typealias object that needs to be documented. doc: Docstring of the typealias. It should follow the standard pystyle docstring rules.
github-repos
def process_buffer(buffer, n_channels): samples = np.concatenate(buffer) if (n_channels > 1): samples = samples.reshape(((- 1), n_channels)).T samples = librosa.to_mono(samples) return samples
Merge the read blocks and resample if necessary. Args: buffer (list): A list of blocks of samples. n_channels (int): The number of channels of the input data. Returns: np.array: The samples
codesearchnet
def clip_action(action, space): if isinstance(space, gym.spaces.Box): return np.clip(action, space.low, space.high) elif isinstance(space, gym.spaces.Tuple): if (type(action) not in (tuple, list)): raise ValueError('Expected tuple space for actions {}: {}'.format(action, space)) out = [] for (a, s) in zip(action, space.spaces): out.append(clip_action(a, s)) return out else: return action
Called to clip actions to the specified range of this policy. Arguments: action: Single action. space: Action space the actions should be present in. Returns: Clipped batch of actions.
codesearchnet
def market(self, accountID, **kwargs): return self.create(accountID, order=MarketOrderRequest(**kwargs))
Shortcut to create a Market Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketOrderRequest Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def _parse_schema(schema, method): if method and schema.get('readOnly', False): return _READONLY_PROPERTY if 'allOf' in schema: schema_ = copy.deepcopy(schema['allOf'][0]) for x in schema['allOf'][1:]: _dict_merge(schema_, x) return _parse_schema(schema_, method) if 'oneOf' in schema: return _parse_schema(schema['oneOf'][0], method) if 'enum' in schema: return schema['enum'][0] schema_type = schema.get('type', 'object') if schema_type == 'array': if 'oneOf' in schema['items']: return [ _parse_schema(x, method) for x in schema['items']['oneOf']] return [_parse_schema(schema['items'], method)] if schema_type == 'object': if method and all(v.get('readOnly', False) for v in schema['properties'].values()): return _READONLY_PROPERTY results = [] for name, prop in schema.get('properties', {}).items(): result = _parse_schema(prop, method) if result != _READONLY_PROPERTY: results.append((name, result)) return collections.OrderedDict(results) if (schema_type, schema.get('format')) in _TYPE_MAPPING: return _TYPE_MAPPING[(schema_type, schema.get('format'))] return _TYPE_MAPPING[(schema_type, None)]
Convert a Schema Object to a Python object. Args: schema: An ``OrderedDict`` representing the schema object.
juraj-google-style
def put(self, rid, data, raise_on_error=True): response_data = None headers = {'Content-Type': 'application/json', 'DB-Method': 'PUT'} url = '/v2/exchange/db/{}/{}/{}'.format(self.domain, self.data_type, rid) r = self.tcex.session.post(url, json=data, headers=headers) self.tcex.log.debug('datastore put status code: {}'.format(r.status_code)) if (r.ok and ('application/json' in r.headers.get('content-type', ''))): response_data = r.json() else: error = (r.text or r.reason) self.tcex.handle_error(805, ['put', r.status_code, error], raise_on_error) return response_data
Update the data for the provided Id. Args: rid (str): The record identifier. data (dict): A search query raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
codesearchnet
def __init__(self, uri='mongodb: self.sample_collection = 'samples' self.worker_cap = worker_cap self.samples_cap = samples_cap self.database_name = database self.uri = 'mongodb: self.mongo = pymongo.MongoClient(self.uri, use_greenlets=True) self.database = self.mongo.get_default_database() self.gridfs_handle = gridfs.GridFS(self.database) self.last_ops_run = time.time() self.periodic_ops() print '\t- WorkBench DataStore connected: %s:%s' % (self.uri, self.database_name)
Initialization for the Workbench data store class. Args: uri: Connection String for DataStore backend. database: Name of database. worker_cap: MBs in the capped collection. samples_cap: MBs of sample to be stored.
juraj-google-style
def __init__(self, output_writer, tool_name): super(StatusView, self).__init__() self._artifact_filters = None self._filter_file = None self._have_ansi_support = not win32console self._mode = self.MODE_WINDOW self._output_writer = output_writer self._source_path = None self._source_type = None self._stdout_output_writer = isinstance( output_writer, tools.StdoutOutputWriter) self._storage_file_path = None self._tool_name = tool_name if win32console: kernel32 = ctypes.windll.kernel32 stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE) result = kernel32.SetConsoleMode( stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE) self._have_ansi_support = result != 0
Initializes a status view. Args: output_writer (OutputWriter): output writer. tool_name (str): namd of the tool.
juraj-google-style
def _tzinfome(tzinfo): if not isinstance(tzinfo, datetime.tzinfo): try: tzinfo = pytz.timezone(tzinfo) assert tzinfo.zone in pytz.all_timezones except AttributeError: raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo) return tzinfo
Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded.
juraj-google-style
def add(self, name, value, bitmask=DEFMASK): _add_enum_member(self._eid, name, value, bitmask)
Add an enum member Args: name: Name of the member value: value of the member bitmask: bitmask. Only use if enum is a bitfield.
codesearchnet
def _test_tensorflow_vs_numpy(self, x_np): y_np = self._total_variation_np(x_np) self._test(x_np, y_np)
Test the TensorFlow implementation against a numpy implementation. Args: x_np: Numpy array with 3 or 4 dimensions.
github-repos
def movies_in_theaters(self, **kwargs): path = self._get_path('movies_in_theaters') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the movies currently in theaters from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def compiler(name): pinfo = __get_paths() _compiler = local[name] _compiler = _compiler.setenv(PATH=pinfo['path'], LD_LIBRARY_PATH=pinfo['ld_library_path']) return _compiler
Get a usable clang++ plumbum command. This searches for a usable clang++ in the llvm binary path Returns: plumbum Command that executes clang++
codesearchnet
def AddMonths(start_date, months): current_date = start_date i = 0 while i < months: month_days = calendar.monthrange(current_date.year, current_date.month)[1] current_date += timedelta(days=month_days) i += 1 return current_date
A simple convenience utility for adding months to a given start date. This increments the months by adding the number of days in the current month to the current month, for each month. Args: start_date: date The date months are being added to. months: int The number of months to add. Returns: A date equal to the start date incremented by the given number of months.
juraj-google-style
def update_non_slot(self, colocate_with, fn, args=(), kwargs=None, group=True): _require_cross_replica_or_default_context_extended(self) if kwargs is None: kwargs = {} fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx(), convert_by_default=False) with self._container_strategy().scope(): return self._update_non_slot(colocate_with, fn, args, kwargs, group)
Runs `fn(*args, **kwargs)` on `colocate_with` devices. Used to update non-slot variables. DEPRECATED: TF 1.x ONLY. Args: colocate_with: Devices returned by `non_slot_devices()`. fn: Function to execute. args: Tuple or list. Positional arguments to pass to `fn()`. kwargs: Dict with keyword arguments to pass to `fn()`. group: Boolean. Defaults to True. If False, the return value will be unwrapped. Returns: Return value of `fn`, possibly merged across devices.
github-repos
def generate_zip_data(M, L, n_cells, cluster_probs=None): (genes, clusters) = M.shape output = np.zeros((genes, n_cells)) if (cluster_probs is None): cluster_probs = (np.ones(clusters) / clusters) zip_p = np.random.random((genes, n_cells)) labels = [] for i in range(n_cells): c = np.random.choice(range(clusters), p=cluster_probs) labels.append(c) output[(:, i)] = np.where((zip_p[(:, i)] < L[(:, c)]), 0, np.random.poisson(M[(:, c)])) return (output, np.array(labels))
Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster. Args: M (array): genes x clusters matrix L (array): genes x clusters matrix - zero-inflation parameters n_cells (int): number of output cells cluster_probs (array): prior probability for each cluster. Default: uniform. Returns: output - array with shape genes x n_cells labels - array of cluster labels
codesearchnet
def checkout_commit(repo: Repo, commit_id: str): current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head)
Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager.
github-repos
def add_string_pairs_from_text_field_element(xib_file, results, text_field, special_ui_components_prefix): text_field_entry_comment = extract_element_internationalized_comment(text_field) if text_field_entry_comment is None: return if text_field.hasAttribute('usesAttributedText') and text_field.attributes['usesAttributedText'].value == 'YES': add_string_pairs_from_attributed_ui_element(results, text_field, text_field_entry_comment) else: try: text_field_entry_key = text_field.attributes['text'].value results.append((text_field_entry_key, text_field_entry_comment + ' default text value')) except KeyError: pass try: text_field_entry_key = text_field.attributes['placeholder'].value results.append((text_field_entry_key, text_field_entry_comment + ' placeholder text value')) except KeyError: pass warn_if_element_not_of_class(text_field, 'TextField', special_ui_components_prefix)
Adds string pairs from a textfield element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. text_field(element): The textfield element from the xib, to extract the string pairs from. special_ui_components_prefix (str): If not None, extraction will not warn about internationalized UI components with this class prefix.
juraj-google-style
def has_title(self, title, **kwargs): try: self.assert_title(title, **kwargs) return True except ExpectationNotMet: return False
Checks if the page has the given title. Args: title (str | RegexObject): The string or regex that the title should match. **kwargs: Arbitrary keyword arguments for :class:`TitleQuery`. Returns: bool: Whether it matches.
codesearchnet
def create_folder_structure(project_name, batch_name): out_data_dir = prms.Paths['outdatadir'] project_dir = os.path.join(out_data_dir, project_name) batch_dir = os.path.join(project_dir, batch_name) raw_dir = os.path.join(batch_dir, 'raw_data') if (not os.path.isdir(project_dir)): os.mkdir(project_dir) if (not os.path.isdir(batch_dir)): os.mkdir(batch_dir) if (not os.path.isdir(raw_dir)): os.mkdir(raw_dir) info_file = ('cellpy_batch_%s.json' % batch_name) info_file = os.path.join(project_dir, info_file) return (info_file, (project_dir, batch_dir, raw_dir))
This function creates a folder structure for the batch project. The folder structure consists of main working folder ``project_name` located in the ``outdatadir`` (as defined in the cellpy configuration file) with a sub-folder named ``batch_name``. It also creates a folder inside the ``batch_name`` folder for storing the raw data. If the folders does not exist, they will be made. The function also returns the name of the info-df. Args: project_name: name of the project batch_name: name of the batch Returns: (info_file, (project_dir, batch_dir, raw_dir))
codesearchnet
def dump(self,format='ttl'): return self.rdf.graph.serialize(format=format).decode('utf-8')
Convenience method to return RDF data for resource, optionally selecting serialization format. Inspired by .dump from Samvera. Args: format (str): expecting serialization formats accepted by rdflib.serialization(format=)
juraj-google-style
def _handle_request(self, request: dict) -> dict: request_body: bytes = request['request_body'] signature_chain_url: str = request['signature_chain_url'] signature: str = request['signature'] alexa_request: dict = request['alexa_request'] if (not self._verify_request(signature_chain_url, signature, request_body)): return {'error': 'failed certificate/signature check'} timestamp_str = alexa_request['request']['timestamp'] timestamp_datetime = datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%SZ') now = datetime.utcnow() delta = ((now - timestamp_datetime) if (now >= timestamp_datetime) else (timestamp_datetime - now)) if (abs(delta.seconds) > REQUEST_TIMESTAMP_TOLERANCE_SECS): log.error(f"Failed timestamp check for request: {request_body.decode('utf-8', 'replace')}") return {'error': 'failed request timestamp check'} conversation_key = alexa_request['session']['user']['userId'] if (conversation_key not in self.conversations.keys()): if self.config['multi_instance']: conv_agent = self._init_agent() log.info('New conversation instance level agent initiated') else: conv_agent = self.agent self.conversations[conversation_key] = Conversation(config=self.config, agent=conv_agent, conversation_key=conversation_key, self_destruct_callback=(lambda : self._del_conversation(conversation_key))) log.info(f'Created new conversation, key: {conversation_key}') conversation = self.conversations[conversation_key] response = conversation.handle_request(alexa_request) return response
Processes Alexa requests from skill server and returns responses to Alexa. Args: request: Dict with Alexa request payload and metadata. Returns: result: Alexa formatted or error response.
codesearchnet
def forward(self, hidden_states: torch.Tensor) -> Tuple: router_probs, router_logits = self._compute_router_probabilities(hidden_states) expert_index = torch.argmax(router_probs, dim=-1) expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts) token_priority = torch.cumsum(expert_index, dim=-2) expert_capacity_mask = token_priority <= self.expert_capacity expert_index = expert_index * expert_capacity_mask router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1) return (expert_index, router_probs, router_logits)
Generic forward function for every Router class. Each Router expects to have the same input hidden states (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert. Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned to an expert. Then each Router class will have to define its own `_compute_routing_instructions`. Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs and the router logits. The router probabilities and logits are required to compute the loss.
github-repos
def update_state(self, values, sample_weight=None): [values], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([values], sample_weight) try: values = math_ops.cast(values, self._dtype) except (ValueError, TypeError): msg = 'The output of a metric function can only be a single Tensor. Got: %s' % (values,) if isinstance(values, dict): msg += '. To return a dict of values, implement a custom Metric subclass.' raise RuntimeError(msg) if sample_weight is not None: sample_weight = math_ops.cast(sample_weight, self._dtype) values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(values, sample_weight=sample_weight) try: sample_weight = weights_broadcast_ops.broadcast_weights(sample_weight, values) except ValueError: ndim = backend.ndim(values) weight_ndim = backend.ndim(sample_weight) if self.reduction == metrics_utils.Reduction.SUM: values = math_ops.reduce_sum(values, axis=list(range(weight_ndim, ndim))) else: values = math_ops.reduce_mean(values, axis=list(range(weight_ndim, ndim))) values = math_ops.multiply(values, sample_weight) value_sum = math_ops.reduce_sum(values) with ops.control_dependencies([value_sum]): update_total_op = self.total.assign_add(value_sum) if self.reduction == metrics_utils.Reduction.SUM: return update_total_op if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE: num_values = math_ops.cast(array_ops.size(values), self._dtype) elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN: if sample_weight is None: num_values = math_ops.cast(array_ops.size(values), self._dtype) else: num_values = math_ops.reduce_sum(sample_weight) else: raise NotImplementedError('reduction [%s] not implemented' % self.reduction) with ops.control_dependencies([update_total_op]): return self.count.assign_add(num_values)
Accumulates statistics for computing the metric. Args: values: Per-example value. sample_weight: Optional weighting of each example. Defaults to 1. Returns: Update op.
github-repos
def list_types_poi(self, **kwargs): url_args = {'language': util.language_code(kwargs.get('lang'))} result = self.make_request('list_poi_types', url_args) if (not util.check_result(result)): return (False, result.get('message', 'UNKNOWN ERROR')) values = util.response_list(result, 'Data') return (True, [emtype.ParkingPoiType(**a) for a in values])
Obtain a list of families, types and categories of POI. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[ParkingPoiType]), or message string in case of error.
codesearchnet
def depth_soil_conductivity(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_soil_conductivity`'.format(value)) self._depth_soil_conductivity = value
Corresponds to IDD Field `depth_soil_conductivity` Args: value (float): value for IDD Field `depth_soil_conductivity` Unit: W/m-K, if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def set_type(self, weather_type): weather_type.lower() exists = self.has_type(weather_type) if exists: self.add_string_parameters(weather_type)
Set the weather type. Args: weather_type (str): The weather type.
codesearchnet
def on(self, *qubits: raw_types.Qid) -> 'SingleQubitPauliStringGateOperation': if len(qubits) != 1: raise ValueError( 'Expected a single qubit, got <{!r}>.'.format(qubits)) from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation return SingleQubitPauliStringGateOperation(self, qubits[0])
Returns an application of this gate to the given qubits. Args: *qubits: The collection of qubits to potentially apply the gate to.
juraj-google-style
def bfloat16_activations_var_getter(getter, *args, **kwargs): requested_dtype = kwargs["dtype"] if requested_dtype == tf.bfloat16: kwargs["dtype"] = tf.float32 var = getter(*args, **kwargs) if var.dtype.base_dtype != requested_dtype: var = tf.cast(var, requested_dtype) return var
A custom getter function for float32 parameters and bfloat16 activations. Args: getter: custom getter *args: arguments **kwargs: keyword arguments Returns: variables with the correct dtype. Raises: KeyError: if "dtype" is not provided as a kwarg.
juraj-google-style
def warn_once(self, msg, msg_name=None): assert isinstance(msg, str) msg_name = msg_name if msg_name else msg if msg_name not in warnings_given: warnings.warn(msg) warnings_given.add(msg_name)
Prints a warning statement just once Args: msg: The warning message msg_name: [optional] The name of the warning. If None, the msg_name will be the msg itself.
juraj-google-style
def create_graph_from_data(self, data): self.arguments['{SCORE}'] = self.score self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) results = self._run_bnlearn(data, verbose=self.verbose) graph = nx.DiGraph() graph.add_edges_from(results) return graph
Run the algorithm on data. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the algorithm.
juraj-google-style
def business_days_between(self, from_dates, to_dates): pass
Calculates number of business between pairs of dates. For each pair, the initial date is included in the difference, and the final date is excluded. If the final date is the same or earlier than the initial date, zero is returned. Args: from_dates: DateTensor of initial dates. to_dates: DateTensor of final dates, should be broadcastable to `from_dates`. Returns: An int32 Tensor with the number of business days between the corresponding pairs of dates.
github-repos
def _or_join(self, close_group=False): if (not self.initialized): raise ValueError('You must add a search term before adding an operator.') else: self._operator('OR', close_group=close_group) return self
Combine terms with OR. There must be a term added before using this method. Arguments: close_group (bool): If ``True``, will end the current group and start a new one. If ``False``, will continue current group. Example: If the current query is "(term1" .or(close_group=True) => "(term1) OR(" .or(close_group=False) => "(term1 OR " Returns: SearchHelper: Self
codesearchnet
def get_stim(self, type_, return_all=False): if isinstance(type_, string_types): type_ = _get_stim_class(type_) matches = [] for s in self.elements: if isinstance(s, type_): if (not return_all): return s matches.append(s) if (not matches): return ([] if return_all else None) return matches
Returns component elements of the specified type. Args: type_ (str or Stim class): the desired Stim subclass to return. return_all (bool): when True, returns all elements that matched the specified type as a list. When False (default), returns only the first matching Stim. Returns: If return_all is True, a list of matching elements (or an empty list if no elements match). If return_all is False, returns the first matching Stim, or None if no elements match.
codesearchnet
def set_last_checkpoints(self, last_checkpoints): assert isinstance(last_checkpoints, list) self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
DEPRECATED: Use set_last_checkpoints_with_time. Sets the list of old checkpoint filenames. Args: last_checkpoints: A list of checkpoint filenames. Raises: AssertionError: If last_checkpoints is not a list.
github-repos
def delete(self, membershipId): check_type(membershipId, basestring, may_be_none=False) self._session.delete(API_ENDPOINT + '/' + membershipId)
Delete a team membership, by ID. Args: membershipId(basestring): The team membership ID. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
juraj-google-style
def __init__(self, output_filename="OSZICAR", nionic_steps=10): self.output_filename = output_filename self.nionic_steps = nionic_steps
Initializes the handler with the output file to check. Args: output_filename (str): This is the OSZICAR file. Change this only if it is different from the default (unlikely). nionic_steps (int): The threshold number of ionic steps that needs to hit the maximum number of electronic steps for the run to be considered non-converging.
juraj-google-style
def add_policy_statements(self, statements): if isinstance(statements, Statement): statements = [statements] self._policy_statements.extend(statements)
Adds statements to the policy. Args: statements (:class:`awacs.aws.Statement` or list): Either a single Statment, or a list of statements.
juraj-google-style
def dict_head(d, N=5): return {k: d[k] for k in list(d.keys())[:N]}
Return the head of a dictionary. It will be random! Default is to return the first 5 key/value pairs in a dictionary. Args: d: Dictionary to get head. N: Number of elements to display. Returns: dict: the first N items of the dictionary.
codesearchnet
def napalm_cli(task: Task, commands: List[str]) -> Result: device = task.host.get_connection('napalm', task.nornir.config) result = device.cli(commands) return Result(host=task.host, result=result)
Run commands on remote devices using napalm Arguments: commands: commands to execute Returns: Result object with the following attributes set: * result (``dict``): result of the commands execution
codesearchnet
def make_row(row, fields): if not hasattr(row, 'get'): row = {f.name: col for f, col in zip(fields, row)} row_fields = [] for f in fields: val = row.get(f.name, None) if val is None: val = str(f.default_value()) row_fields.append(val) return encode_row(row_fields)
Encode a mapping of column name to values into a [incr tsdb()] profile line. The *fields* parameter determines what columns are used, and default values are provided if a column is missing from the mapping. Args: row: a mapping of column names to values fields: an iterable of :class:`Field` objects Returns: A [incr tsdb()]-encoded string
juraj-google-style
def setall(self, key, values): self.delall(key) for tag in values: self[tag.HashKey] = tag
Delete frames of the given type and add frames in 'values'. Args: key (text): key for frames to delete values (list[Frame]): frames to add
codesearchnet
def html(self, data=None, template=None): if (data is None): data = {} if template: return render(self.request, template, data) return HttpResponse(data)
Send html document to user. Args: - data: Dict to render template, or string with rendered HTML. - template: Name of template to render HTML document with passed data.
codesearchnet
def _get_language_modeling_inputs(filename, delimiter='\n', repeat=1, append_space_to_final_punctionation=True): with tf.gfile.Open(filename) as f: text = f.read() inputs = text.split(delimiter) if (not inputs[(- 1)]): inputs.pop() inputs *= repeat if append_space_to_final_punctionation: inputs = [((s + ' ') if (s and (s[(- 1)] in string.punctuation)) else s) for s in inputs] return inputs
Read a file of partial texts to continue. The purpose of append_space_to_final_punctionation is that SubwordTokenizer groups punctuation and the ensuing space in the same token. Adding a space causes the token to be completed. Args: filename: a string delimiter: a string repeat: an integer - we repeat the entire file that many times. append_space_to_final_punctionation: a boolean Returns: a list of strings
codesearchnet
def check_for_dep_in_outputs(dep, verbose, G): if verbose: print("checking dep {}".format(dep)) ret_list = [] for node in G.nodes(data=True): if "output" not in node[1]: continue for out in node[1]['output']: if fnmatch.fnmatch(out, dep): ret_list.append(node[0]) break return ret_list
Function to help construct_graph() identify dependencies Args: A dependency A flag indication verbosity A (populated) NetworkX DiGraph Returns: A list of targets that build given dependency
juraj-google-style
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self.object_type: self._object_type.write(local_stream, kmip_version=kmip_version) else: raise ValueError('Payload is missing the object type field.') if self.unique_identifier: self._unique_identifier.write(local_stream, kmip_version=kmip_version) else: raise ValueError('Payload is missing the unique identifier field.') if self.secret: self._secret.write(local_stream, kmip_version=kmip_version) else: raise ValueError('Payload is missing the secret field.') self.length = local_stream.length() super(GetResponsePayload, self).write(output_stream, kmip_version=kmip_version) output_stream.write(local_stream.buffer)
Write the data encoding the Get response payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the object type, unique identifier, or secret attributes are missing from the payload struct.
codesearchnet
def convert_nested_bidirectional(weights): num_weights_per_layer = len(weights) forward_weights = preprocess_weights_for_loading(layer.forward_layer, weights[:num_weights_per_layer], original_keras_version, original_backend) backward_weights = preprocess_weights_for_loading(layer.backward_layer, weights[num_weights_per_layer:], original_keras_version, original_backend) return forward_weights + backward_weights
Converts layers nested in `Bidirectional` wrapper. This function uses `preprocess_weights_for_loading()` for converting layers. Args: weights: List of weights values (Numpy arrays). Returns: A list of weights values (Numpy arrays).
github-repos
def get_one_aminame(inst_img_id): try: aminame = EC2R.Image(inst_img_id).name except AttributeError: aminame = "Unknown" return aminame
Get Image_Name for the image_id specified. Args: inst_img_id (str): image_id to get name value from. Returns: aminame (str): name of the image.
juraj-google-style
def _update_internal_nodes(self, index, delta): while index > 0: index = (index - 1) self._memory[index] += delta
Update internal priority sums when leaf priority has been changed. Args: index: leaf node index delta: change in priority
juraj-google-style
def parse_args(self, argv): file_config_names = set(config.ITEMS) | set(self.pytype_single_args) args = self.create_initial_args(file_config_names) self._parser.parse_args(argv, args) self.clean_args(args, file_config_names) self.postprocess(args) return args
Parses argv. Commandline-only args are parsed normally. File-configurable args appear in the parsed args only if explicitly present in argv. Args: argv: sys.argv[1:] Returns: An argparse.Namespace.
github-repos
def __eq__(self, other): if type(self) is type(other) and \ self.phase == other.phase: return True return False
Two FrameChanges are the same if they are of the same type and have the same phase. Args: other (FrameChange): other FrameChange Returns: bool: are self and other equal.
juraj-google-style
def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None): date_parser = (date_parser or (lambda x: x)) (dotted_ext, dotted_output_ext) = (None, None) if ((ext != None) and (output_ext != None)): dotted_ext = (('' if ext.startswith('.') else '.') + ext) dotted_output_ext = (('' if output_ext.startswith('.') else '.') + output_ext) table = {} for file_properties in util.find_files(path, ext=(ext or ''), verbosity=verbosity): file_path = file_properties['path'] if (output_ext and ((dotted_output_ext + '.') in file_path)): continue df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows) df = flatten_dataframe(df, verbosity=verbosity) if ((dotted_ext != None) and (dotted_output_ext != None)): df.to_csv(((file_path[:(- len(dotted_ext))] + dotted_output_ext) + dotted_ext)) return table
Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict Arguments: path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from ext (str): file name extension (to filter files by) date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used Returns: dict of DataFrame: { file_path: flattened_data_frame }
codesearchnet
def set_slats_level(self, slatsLevel=0.0, shutterLevel=None): if (shutterLevel is None): shutterLevel = self.shutterLevel data = {'channelIndex': 1, 'deviceId': self.id, 'slatsLevel': slatsLevel, 'shutterLevel': shutterLevel} return self._restCall('device/control/setSlatsLevel', json.dumps(data))
sets the slats and shutter level Args: slatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed, shutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value Returns: the result of the _restCall
codesearchnet
def _get_cached_response_from_django_cache(key): if TieredCache._should_force_django_cache_miss(): return CachedResponse(is_found=False, key=key, value=None) cached_value = django_cache.get(key, _CACHE_MISS) is_found = (cached_value is not _CACHE_MISS) return CachedResponse(is_found, key, cached_value)
Retrieves a CachedResponse for the given key from the django cache. If the request was set to force cache misses, then this will always return a cache miss response. Args: key (string) Returns: A CachedResponse with is_found status and value.
codesearchnet
def RunOnce(self): if (not self._FetchServerCertificate()): self.timer.Wait() return HTTPObject(code=500) if (self.http_manager.consecutive_connection_errors == 0): message_list = self.client_worker.Drain(max_size=config.CONFIG['Client.max_post_size']) else: message_list = rdf_flows.MessageList() for message in message_list.job: if message.require_fastpoll: self.timer.FastPoll() break payload = rdf_flows.ClientCommunication() if self.client_worker.MemoryExceeded(): logging.info('Memory exceeded, will not retrieve jobs.') payload.queue_size = 1000000 else: payload.queue_size = self.client_worker.InQueueSize() nonce = self.communicator.EncodeMessages(message_list, payload) payload_data = payload.SerializeToString() response = self.MakeRequest(payload_data) if ((response.code != 200) or (response.messages is None)): logging.info('%s: Could not connect to server at %s, status %s', self.communicator.common_name, self.http_manager.active_base_url, response.code) self.server_certificate = None messages = list(message_list.job) for message in messages: message.require_fastpoll = False message.ttl -= 1 if (message.ttl > 0): self.client_worker.QueueResponse(message) else: logging.info('Dropped message due to retransmissions.') return response if (response.nonce != nonce): logging.info('Nonce not matched.') response.code = 500 return response if (response.source != self.communicator.server_name): logging.info('Received a message not from the server %s, expected %s.', response.source, self.communicator.server_name) response.code = 500 return response for message in response.messages: if message.require_fastpoll: self.timer.FastPoll() break self.client_worker.QueueMessages(response.messages) cn = self.communicator.common_name logging.info('%s: Sending %s(%s), Received %s messages in %s sec. Sleeping for %s sec.', cn, len(message_list), len(payload_data), len(response.messages), response.duration, self.timer.sleep_time) return response
Makes a single request to the GRR server. Returns: A Status() object indicating how the last POST went.
codesearchnet
def scan_devices(self, subnet, timeout=None): max_range = {16: 256, 24: 256, 25: 128, 27: 32, 28: 16, 29: 8, 30: 4, 31: 2} if ('/' not in subnet): mask = int(24) network = subnet else: (network, mask) = subnet.split('/') mask = int(mask) if (mask not in max_range): raise RuntimeError('Cannot determine the subnet mask!') network = network.rpartition('.')[0] if (mask == 16): for i in range(0, 1): network = network.rpartition('.')[0] if (mask == 16): for seq1 in range(0, max_range[mask]): for seq2 in range(0, max_range[mask]): ipaddr = '{0}.{1}.{2}'.format(network, seq1, seq2) thd = threading.Thread(target=self.__raw_scan, args=(ipaddr, timeout)) thd.start() else: for seq1 in range(0, max_range[mask]): ipaddr = '{0}.{1}'.format(network, seq1) thd = threading.Thread(target=self.__raw_scan, args=(ipaddr, timeout)) thd.start() return self.amcrest_ips
Scan cameras in a range of ips Params: subnet - subnet, i.e: 192.168.1.0/24 if mask not used, assuming mask 24 timeout_sec - timeout in sec Returns:
codesearchnet
def endpoints(self): children = [item.endpoints() for item in self.items] return (self.name, self.endpoint, children)
Get all the endpoints under this node in a tree like structure. Returns: (tuple): name (str): This node's name. endpoint (str): Endpoint name relative to root. children (list): ``child.endpoints for each child
codesearchnet