code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def compute_trigonometric_terms(self, thetas, phis): if (len(thetas) != len(phis)): raise ValueError('List of polar and azimuthal angles have to be equal!') self._pow_sin_t.clear() self._pow_cos_t.clear() self._sin_n_p.clear() self._cos_n_p.clear() self._pow_sin_t[1] = [sin(float(t)) for t in thetas] self._pow_cos_t[1] = [cos(float(t)) for t in thetas] self._sin_n_p[1] = [sin(float(p)) for p in phis] self._cos_n_p[1] = [cos(float(p)) for p in phis] for i in range(2, (self._max_trig_order + 1)): self._pow_sin_t[i] = [(e[0] * e[1]) for e in zip(self._pow_sin_t[(i - 1)], self._pow_sin_t[1])] self._pow_cos_t[i] = [(e[0] * e[1]) for e in zip(self._pow_cos_t[(i - 1)], self._pow_cos_t[1])] self._sin_n_p[i] = [sin((float(i) * float(p))) for p in phis] self._cos_n_p[i] = [cos((float(i) * float(p))) for p in phis]
Computes trigonometric terms that are required to calculate bond orientational order parameters using internal variables. Args: thetas ([float]): polar angles of all neighbors in radians. phis ([float]): azimuth angles of all neighbors in radians. The list of azimuth angles of all neighbors in radians. The list of azimuth angles is expected to have the same size as the list of polar angles; otherwise, a ValueError is raised. Also, the two lists of angles have to be coherent in order. That is, it is expected that the order in the list of azimuth angles corresponds to a distinct sequence of neighbors. And, this sequence has to equal the sequence of neighbors in the list of polar angles.
codesearchnet
def main(args): args = parse_args(args) setup_logging(args.loglevel) _logger.info("Starting GramVaani importer...") _logger.info("Starting loading GramVaani csv...") csv = GramVaaniCSV(args.csv_filename) _logger.info("Starting downloading GramVaani mp3's...") downloader = GramVaaniDownloader(csv, args.target_dir) mp3_directory = downloader.download() _logger.info("Starting converting GramVaani mp3's to wav's...") converter = GramVaaniConverter(args.target_dir, mp3_directory) wav_directory = converter.convert() datasets = GramVaaniDataSets(args.target_dir, wav_directory, csv) datasets.create() datasets.save() _logger.info("Finished GramVaani importer...")
Main entry point allowing external calls Args: args ([str]): command line parameter list
juraj-google-style
def update_state(self, y_true, y_pred, sample_weight=None): return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight)
Accumulates the metric statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to `1`. Can be a tensor whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`.
github-repos
async def end_takeout(self, success): try: async with _TakeoutClient(True, self, None) as takeout: takeout.success = success except ValueError: return False return True
Finishes a takeout, with specified result sent back to Telegram. Returns: ``True`` if the operation was successful, ``False`` otherwise.
codesearchnet
def overload(fn): if (not isfunction(fn)): raise TypeError('paco: fn must be a callable object') spec = getargspec(fn) args = spec.args if ((not spec.varargs) and ((len(args) < 2) or (args[1] != 'iterable'))): raise ValueError('paco: invalid function signature or arity') @functools.wraps(fn) def decorator(*args, **kw): if (len(args) < 2): return PipeOverloader(fn, args, kw) return fn(*args, **kw) return decorator
Overload a given callable object to be used with ``|`` operator overloading. This is especially used for composing a pipeline of transformation over a single data set. Arguments: fn (function): target function to decorate. Raises: TypeError: if function or coroutine function is not provided. Returns: function: decorated function
codesearchnet
def url(self, value): if value == self._defaults['url'] and 'url' in self._values: del self._values['url'] else: self._values['url'] = value
The url property. Args: value (string). the property value.
juraj-google-style
def nic_s(msg): tc = typecode(msg) if tc != 31: raise RuntimeError("%s: Not a status operation message, expecting TC = 31" % msg) msgbin = common.hex2bin(msg) nic_s = int(msgbin[75]) return nic_s
Obtain NIC supplement bit, TC=31 message Args: msg (string): 28 bytes hexadecimal message string Returns: int: NICs number (0 or 1)
juraj-google-style
def _full_axis_reduce_along_select_indices(self, func, axis, index): old_index = (self.index if axis else self.columns) numeric_indices = [i for (i, name) in enumerate(old_index) if (name in index)] result = self.data.apply_func_to_select_indices_along_full_axis(axis, func, numeric_indices) return result
Reduce Manger along select indices using function that needs full axis. Args: func: Callable that reduces the dimension of the object and requires full knowledge of the entire axis. axis: 0 for columns and 1 for rows. Defaults to 0. index: Index of the resulting QueryCompiler. Returns: A new QueryCompiler object with index or BaseFrameManager object.
codesearchnet
def _CheckIsDevice(self, file_entry): if (definitions.FILE_ENTRY_TYPE_DEVICE not in self._file_entry_types): return False return file_entry.IsDevice()
Checks the is_device find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not.
codesearchnet
def GetTemplateArgs(clean_lines, linenum): func_line = linenum while (func_line > 0): line = clean_lines.elided[func_line] if Match('^\\s*$', line): return set() if (line.find('(') >= 0): break func_line -= 1 if (func_line == 0): return set() argument_list = '' match = Match('^(\\s*template\\s*)<', clean_lines.elided[func_line]) if match: start_col = len(match.group(1)) (_, end_line, end_col) = CloseExpression(clean_lines, func_line, start_col) if ((end_col > (- 1)) and (end_line == func_line)): start_col += 1 argument_list = clean_lines.elided[func_line][start_col:end_col] elif (func_line > 1): match = Match('^(.*)>\\s*$', clean_lines.elided[(func_line - 1)]) if match: end_col = len(match.group(1)) (_, start_line, start_col) = ReverseCloseExpression(clean_lines, (func_line - 1), end_col) if (start_col > (- 1)): start_col += 1 while (start_line < (func_line - 1)): argument_list += clean_lines.elided[start_line][start_col:] start_col = 0 start_line += 1 argument_list += clean_lines.elided[(func_line - 1)][start_col:end_col] if (not argument_list): return set() typenames = set() while True: match = Match('^[,\\s]*(?:typename|class)(?:\\.\\.\\.)?\\s+(\\w+)(.*)$', argument_list) if (not match): break typenames.add(match.group(1)) argument_list = match.group(2) return typenames
Find list of template arguments associated with this function declaration. Args: clean_lines: A CleansedLines instance containing the file. linenum: Line number containing the start of the function declaration, usually one line after the end of the template-argument-list. Returns: Set of type names, or empty set if this does not appear to have any template parameters.
codesearchnet
def observe(self, terminal, reward, index=0): fetches = self.episode_output feed_dict = self.get_feed_dict(terminal=terminal, reward=reward, index=index) episode = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict) return episode
Adds an observation (reward and is-terminal) to the model without updating its trainable variables. Args: terminal (List[bool]): List of is-terminal signals. reward (List[float]): List of reward signals. index: (int) parallel episode you want to observe Returns: The value of the model-internal episode counter.
juraj-google-style
def is_alias_command(subcommands, args): if (not args): return False for subcommand in subcommands: if (args[:2] == ['alias', subcommand]): return True return False
Check if the user is invoking one of the comments in 'subcommands' in the from az alias . Args: subcommands: The list of subcommands to check through. args: The CLI arguments to process. Returns: True if the user is invoking 'az alias {command}'.
codesearchnet
def percent_of(percent, whole): percent = float(percent) whole = float(whole) return (percent * whole) / 100
Calculates the value of a percent of a number ie: 5% of 20 is what --> 1 Args: percent (float): The percent of a number whole (float): The whole of the number Returns: float: The value of a percent Example: >>> percent_of(25, 100) 25.0 >>> percent_of(5, 20) 1.0
juraj-google-style
def _debug(message, color=None, attrs=None): if attrs is None: attrs = [] if color is not None: print colored(message, color, attrs=attrs) else: if len(attrs) > 0: print colored(message, "white", attrs=attrs) else: print message
Print a message if the class attribute 'verbose' is enabled Args: message (str): Message to print
juraj-google-style
def remove_profile(self, profile=None): with self.db: return self.db.remove((self.query.profile == profile))
Remove profile from credentials file. Args: profile (str): Credentials profile to remove. Returns: list: List of affected document IDs.
codesearchnet
def get_block_size(self, token, resolution=None): cdims = self.get_metadata(token)['dataset']['cube_dimension'] if (resolution is None): resolution = min(cdims.keys()) return cdims[str(resolution)]
Gets the block-size for a given token at a given resolution. Arguments: token (str): The token to inspect resolution (int : None): The resolution at which to inspect data. If none is specified, uses the minimum available. Returns: int[3]: The xyz blocksize.
codesearchnet
def _state_to_task(cls, tstate, shard_state, eta=None, countdown=None): base_path = tstate.base_path task_name = MapperWorkerCallbackHandler.get_task_name(tstate.shard_id, tstate.slice_id, tstate.retries) headers = util._get_task_headers(tstate.mapreduce_spec.mapreduce_id) headers[util._MR_SHARD_ID_TASK_HEADER] = tstate.shard_id worker_task = model.HugeTask(url=((base_path + '/worker_callback/') + tstate.shard_id), params=tstate.to_dict(), name=task_name, eta=eta, countdown=countdown, parent=shard_state, headers=headers) return worker_task
Generate task for slice according to current states. Args: tstate: An instance of TransientShardState. shard_state: An instance of ShardState. eta: Absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: Time in seconds into the future that this MR should execute. Defaults to zero. Returns: A model.HugeTask instance for the slice specified by current states.
codesearchnet
def send_update(url_id, dataset): data = _convert_to_seeder_format(dataset) if not data: return try: _send_request(url_id, json=data, req_type=requests.patch) except Exception as e: sys.stderr.write("Seeder PATCH error: ") sys.stderr.write(str(e.message)) return None
Send request to Seeder's API with data changed by user. Args: url_id (str): ID used as identification in Seeder. dataset (dict): WA-KAT dataset sent from frontend.
juraj-google-style
def _get_what_to_read_next(fp, previously_read_position, chunk_size): seek_position = max((previously_read_position - chunk_size), 0) read_size = chunk_size while (seek_position > 0): fp.seek(seek_position) if _is_partially_read_new_line(fp.read(1)): seek_position -= 1 read_size += 1 else: break read_size = min((previously_read_position - seek_position), read_size) return (seek_position, read_size)
Return information on which file pointer position to read from and how many bytes. Args: fp past_read_positon (int): The file pointer position that has been read previously chunk_size(int): ideal io chunk_size Returns: (int, int): The next seek position, how many bytes to read next
codesearchnet
def parse_multiple_json(json_file, offset=None): json_info_list = [] if (not os.path.exists(json_file)): return json_info_list try: with open(json_file, 'r') as f: if offset: f.seek(offset) for line in f: if (line[(- 1)] != '\n'): break json_info = json.loads(line) json_info_list.append(json_info) offset += len(line) except BaseException as e: logging.error(e.message) return (json_info_list, offset)
Parse multiple json records from the given file. Seek to the offset as the start point before parsing if offset set. return empty list if the json file does not exists or exception occurs. Args: json_file (str): File path to be parsed. offset (int): Initial seek position of the file. Returns: A dict of json info. New offset after parsing.
codesearchnet
def encode(g, top=None, cls=PENMANCodec, **kwargs): codec = cls(**kwargs) return codec.encode(g, top=top)
Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: the node identifier for the top of the serialized graph; if unset, the original top of *g* is used cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: the PENMAN-serialized string of the Graph *g* Example: >>> encode(Graph([('h', 'instance', 'hi')])) (h / hi)
juraj-google-style
def __init__(self, concentration, validate_args=False, allow_nan_stats=True, name='Dirichlet'): parameters = dict(locals()) with ops.name_scope(name, values=[concentration]) as name: self._concentration = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration, name='concentration'), validate_args) self._total_concentration = math_ops.reduce_sum(self._concentration, -1) super(Dirichlet, self).__init__(dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.FULLY_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration, self._total_concentration], name=name)
Initialize a batch of Dirichlet distributions. Args: concentration: Positive floating-point `Tensor` indicating mean number of class occurrences; aka "alpha". Implies `self.dtype`, and `self.batch_shape`, `self.event_shape`, i.e., if `concentration.shape = [N1, N2, ..., Nm, k]` then `batch_shape = [N1, N2, ..., Nm]` and `event_shape = [k]`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class.
github-repos
def furnish(app: web.Application): app_name = app['config']['name'] prefix = '/' + app_name.lstrip('/') app.router.add_routes(routes) cors_middleware.enable_cors(app) known_resources = set() for route in list(app.router.routes()): if route.resource in known_resources: continue known_resources.add(route.resource) route.resource.add_prefix(prefix) aiohttp_swagger.setup_swagger(app, swagger_url=prefix + '/api/doc', description='', title=f'Brewblox Service "{app_name}"', api_version='0.0', contact='development@brewpi.com') LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN')) for route in app.router.routes(): LOGGER.info(f'Endpoint [{route.method}] {route.resource}') for name, impl in app.get(features.FEATURES_KEY, {}).items(): LOGGER.info(f'Feature [{name}] {impl}')
Configures Application routes, readying it for running. This function modifies routes and resources that were added by calling code, and must be called immediately prior to `run(app)`. Args: app (web.Application): The Aiohttp Application as created by `create_app()`
juraj-google-style
def get_ui(ui_type, on_ui_exit=None, available_ui_types=None, config=None): if available_ui_types is None: available_ui_types = copy.deepcopy(SUPPORTED_UI_TYPES) if ui_type and ui_type not in available_ui_types: raise ValueError("Invalid ui_type: '%s'" % ui_type) try: if ui_type == 'readline': from tensorflow.python.debug.cli import readline_ui return readline_ui.ReadlineUI(on_ui_exit=on_ui_exit, config=config) except ImportError: available_ui_types.remove(ui_type) if not available_ui_types: raise ValueError('Exhausted all fallback ui_types.') return get_ui(available_ui_types[0], available_ui_types=available_ui_types)
Create a `base_ui.BaseUI` subtype. This factory method attempts to fallback to other available ui_types on ImportError. Args: ui_type: (`str`) requested UI type. Currently supported: ( readline) on_ui_exit: (`Callable`) the callback to be called when the UI exits. available_ui_types: (`None` or `list` of `str`) Manually-set available ui_types. config: An instance of `cli_config.CLIConfig()` carrying user-facing configurations. Returns: A `base_ui.BaseUI` subtype object. Raises: ValueError: on invalid ui_type or on exhausting or fallback ui_types.
github-repos
def trace_export(name, step=None, profiler_outdir=None): global _current_trace_context if ops.inside_function(): logging.warn('Cannot export trace inside a tf.function.') return if not context.executing_eagerly(): logging.warn('Can only export trace while executing eagerly.') return with _current_trace_context_lock: if _current_trace_context is None: raise ValueError('Must enable trace before export through tf.summary.trace_on.') graph, profiler = _current_trace_context run_meta = context.context().export_run_metadata() if graph and (not profiler): run_metadata_graphs(name, run_meta, step) else: run_metadata(name, run_meta, step) if profiler: if profiler_outdir: logging.warn('Ignoring `profiler_outdir` passed to trace_export(). Please pass it to trace_on() instead.') _profiler.stop() trace_off()
Stops and exports the active trace as a Summary and/or profile file. Stops the trace and exports all metadata collected during the trace to the default SummaryWriter, if one has been set. Args: name: A name for the summary to be written. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. profiler_outdir: This arg is a no-op. Please set this in trace_on(). Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
github-repos
def delete(self, branch, commit_message, **kwargs): file_path = self.get_id().replace('/', '%2F') self.manager.delete(file_path, branch, commit_message, **kwargs)
Delete the file from the server. Args: branch (str): Branch from which the file will be removed commit_message (str): Commit message for the deletion **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabDeleteError: If the server cannot perform the request
codesearchnet
def analyze_structures(self, structures, step_freq=10, most_frequent_polyhedra=15): voro_dict = {} step = 0 for structure in structures: step += 1 if ((step % step_freq) != 0): continue v = [] for n in range(len(structure)): v.append(str(self.analyze(structure, n=n).view())) for voro in v: if (voro in voro_dict): voro_dict[voro] += 1 else: voro_dict[voro] = 1 return sorted(voro_dict.items(), key=(lambda x: (x[1], x[0])), reverse=True)[:most_frequent_polyhedra]
Perform Voronoi analysis on a list of Structures. Note that this might take a significant amount of time depending on the size and number of structures. Args: structures (list): list of Structures cutoff (float: cutoff distance around an atom to search for neighbors step_freq (int): perform analysis every step_freq steps qhull_options (str): options to pass to qhull most_frequent_polyhedra (int): this many unique polyhedra with highest frequences is stored. Returns: A list of tuples in the form (voronoi_index,frequency)
codesearchnet
def chop(array, epsilon=1e-10): ret = np.array(array) if np.isrealobj(ret): ret[abs(ret) < epsilon] = 0.0 else: ret.real[abs(ret.real) < epsilon] = 0.0 ret.imag[abs(ret.imag) < epsilon] = 0.0 return ret
Truncate small values of a complex array. Args: array (array_like): array to truncte small values. epsilon (float): threshold. Returns: np.array: A new operator with small values set to zero.
juraj-google-style
def _CreateLineStringForShape(self, parent, shape): coordinate_list = [(longitude, latitude) for (latitude, longitude, distance) in shape.points] return self._CreateLineString(parent, coordinate_list)
Create a KML LineString using coordinates from a shape. Args: parent: The parent ElementTree.Element instance. shape: The transitfeed.Shape instance. Returns: The LineString ElementTree.Element instance or None if coordinate_list is empty.
codesearchnet
def _save_cached_when_graph_building(self, file_prefix, object_graph_tensor, options, update_ckpt_state=False): named_saveable_objects, graph_proto, feed_additions, unused_registered_savers = self._gather_saveables(object_graph_tensor=object_graph_tensor) if self._last_save_object_graph != graph_proto or context.executing_eagerly() or ops.inside_function(): saver = _DSaver(self._mesh, named_saveable_objects) save_op = saver.save(file_prefix, options=options) with ops.device('/cpu:0'): with ops.control_dependencies([save_op]): self._cached_save_operation = array_ops.identity(file_prefix) self._last_save_object_graph = graph_proto return (self._cached_save_operation, feed_additions)
Create or retrieve save ops, overrides parents's private method. Args: file_prefix: The prefix for saved checkpoint files. object_graph_tensor: A `Tensor` to which the current object graph will be fed. options: `CheckpointOptions` object. update_ckpt_state: Optional bool flag. Indiciate whether the internal checkpoint state needs to be updated. This is used for async checkpoint, which DTrackableSaver currently does not support. TODO(chienchunh): Implement async checkpoint for DTrackableSaver. Returns: A two-element tuple with a filename tensor and a feed_dict of tensors to feed when running it (if graph building). The feed dict contains the current object graph and any Python state to be saved in the checkpoint. When executing eagerly only the first argument is meaningful.
github-repos
def _batch_examples(dataset, batch_size, max_length): (buckets_min, buckets_max) = _create_min_max_boundaries(max_length) bucket_batch_sizes = [(batch_size bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64) def example_to_bucket_id(example_input, example_target): 'Return int64 bucket id for this example, calculated based on length.' seq_length = _get_example_length((example_input, example_target)) conditions_c = tf.logical_and(tf.less_equal(buckets_min, seq_length), tf.less(seq_length, buckets_max)) bucket_id = tf.reduce_min(tf.where(conditions_c)) return bucket_id def window_size_fn(bucket_id): 'Return number of examples to be grouped when given a bucket id.' return bucket_batch_sizes[bucket_id] def batching_fn(bucket_id, grouped_dataset): 'Batch and add padding to a dataset of elements with similar lengths.' bucket_batch_size = window_size_fn(bucket_id) return grouped_dataset.padded_batch(bucket_batch_size, ([None], [None])) return dataset.apply(tf.contrib.data.group_by_window(key_func=example_to_bucket_id, reduce_func=batching_fn, window_size=None, window_size_func=window_size_fn))
Group examples by similar lengths, and return batched dataset. Each batch of similar-length examples are padded to the same length, and may have different number of elements in each batch, such that: group_batch_size * padded_length <= batch_size. This decreases the number of padding tokens per batch, which improves the training speed. Args: dataset: Dataset of unbatched examples. batch_size: Max number of tokens per batch of examples. max_length: Max number of tokens in an example input or target sequence. Returns: Dataset of batched examples with similar lengths.
codesearchnet
def _get_dir_size(self, path: str='.'): total = 0 for root, _, files in os.walk(path): for filename in files: total += os.path.getsize(os.path.join(root, filename)) return total
Get the total size of files and sub-directories under the path. Args: path: Path of a directory or a file to calculate the total size. Returns: Total size of the directory or a file.
github-repos
def add_to_buffer(self, content, read_position): self.read_position = read_position if (self.read_buffer is None): self.read_buffer = content else: self.read_buffer = (content + self.read_buffer)
Add additional bytes content as read from the read_position. Args: content (bytes): data to be added to buffer working BufferWorkSpac. read_position (int): where in the file pointer the data was read from.
codesearchnet
def RestrictFeedItemToGeoTarget(client, feed_item, location_id): feed_item_target_service = client.GetService('FeedItemTargetService', version='v201809') criterion_target = {'xsi_type': 'FeedItemCriterionTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'criterion': {'xsi_type': 'Location', 'id': location_id}} operation = {'operator': 'ADD', 'operand': criterion_target} response = feed_item_target_service.mutate([operation]) new_location_target = response['value'][0] print(('Feed item target for feed ID %d and feed item ID %d was created to restrict serving to location ID %d.' % (new_location_target['feedId'], new_location_target['feedItemId'], new_location_target['criterion']['id'])))
Restrict a feed item to a geo target location. Args: client: An AdWordsClient instance. feed_item: A FeedItem. location_id: The Id of the location to restrict to.
codesearchnet
def create_selected_summaries_dict(summaries_list): headers_summary = cellpy.parameters.internal_settings.get_headers_summary() selected_summaries = dict() for h in summaries_list: selected_summaries[h] = headers_summary[h] return selected_summaries
Creates a dictionary with summary column headers. Examples: >>> summaries_to_output = ["discharge_capacity", "charge_capacity"] >>> summaries_to_output_dict = create_selected_summaries_dict( >>> summaries_to_output >>> ) >>> print(summaries_to_output_dict) {'discharge_capacity': "Discharge_Capacity(mAh/g)", 'charge_capacity': "Charge_Capacity(mAh/g)} Args: summaries_list: list containing cellpy summary column id names Returns: dictionary of the form {cellpy id name: cellpy summary header name,}
codesearchnet
def do_hook_actions(self, actions, hook_type): logger.log_debug("call {} hook actions.".format(hook_type)) for action in actions: if isinstance(action, dict) and len(action) == 1: var_name, hook_content = list(action.items())[0] hook_content_eval = self.session_context.eval_content(hook_content) logger.log_debug( "assignment with hook: {} = {} => {}".format( var_name, hook_content, hook_content_eval ) ) self.session_context.update_test_variables( var_name, hook_content_eval ) else: logger.log_debug("call hook function: {}".format(action)) self.session_context.eval_content(action)
call hook actions. Args: actions (list): each action in actions list maybe in two format. format1 (dict): assignment, the value returned by hook function will be assigned to variable. {"var": "${func()}"} format2 (str): only call hook functions. ${func()} hook_type (enum): setup/teardown
juraj-google-style
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states hidden_states = self.layer_norm1(hidden_states) residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states hidden_states = self.layer_norm2(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def meta_features_path(self, path): return os.path.join( path, app.config['XCESSIV_META_FEATURES_FOLDER'], str(self.id) ) + '.npy'
Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder
juraj-google-style
def close_children_tasks(self, parent_task_name): if (parent_task_name not in self.tasks): return while self.tasks: next_task = reversed(self.tasks.keys()).next() if (next_task == parent_task_name): break del self.tasks[next_task]
Closes all the children tasks that were open Args: parent_task_name (str): Name of the parent task Returns: None
codesearchnet
def do_command_line(infile: typing.IO[str]) -> int: lines = infile.readlines() tree = ast.parse(''.join(lines)) checker = Checker(tree, lines, infile.name) checker.load() errors = [] for func in checker.all_funcs(skip_noqa=True): try: errors = list(func.check_all()) except ValidationError as error: errors = [error.to_aaa()] print(func.__str__(errors), end='') return len(errors)
Currently a small stub to create an instance of Checker for the passed ``infile`` and run its test functions through linting. Args: infile Returns: int: Number of flake8 errors raised.
codesearchnet
def do_get_next(endpoint, access_token): headers = {"Authorization": 'Bearer ' + access_token} headers['User-Agent'] = get_user_agent() looping = True value_list = [] vm_dict = {} while looping: get_return = requests.get(endpoint, headers=headers).json() if not 'value' in get_return: return get_return if not 'nextLink' in get_return: looping = False else: endpoint = get_return['nextLink'] value_list += get_return['value'] vm_dict['value'] = value_list return vm_dict
Do an HTTP GET request, follow the nextLink chain and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON body.
juraj-google-style
def _getScalesDiag(self,termx=0): assert self.P>1, 'CVarianceDecomposition:: diagonal init_method allowed only for multi trait models' assert self.noisPos!=None, 'CVarianceDecomposition:: noise term has to be set' assert termx<self.n_terms-1, 'CVarianceDecomposition:: termx>=n_terms-1' assert self.covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'CVarianceDecimposition:: diagonal initializaiton not posible for such a parametrization' assert self.covar_type[termx] not in ['lowrank','block','fixed'], 'CVarianceDecimposition:: diagonal initializaiton not posible for such a parametrization' scales = [] res = self.estimateHeritabilities(self.vd.getTerm(termx).getK()) scaleg = SP.sqrt(res['varg'].mean()) scalen = SP.sqrt(res['varn'].mean()) for term_i in range(self.n_terms): if term_i==termx: _scales = scaleg*self.diag[term_i] elif term_i==self.noisPos: _scales = scalen*self.diag[term_i] else: _scales = 0.*self.diag[term_i] if self.offset[term_i]>0: _scales = SP.concatenate((_scales,SP.array([SP.sqrt(self.offset[term_i])]))) scales.append(_scales) return SP.concatenate(scales)
Uses 2 term single trait model to get covar params for initialization Args: termx: non-noise term terms that is used for initialization
juraj-google-style
def waitOnUpdate(self, timeout: float=0) -> bool: if timeout: with suppress(asyncio.TimeoutError): util.run(asyncio.wait_for(self.updateEvent, timeout)) else: util.run(self.updateEvent) return True
Wait on any new update to arrive from the network. Args: timeout: Maximum time in seconds to wait. If 0 then no timeout is used. .. note:: A loop with ``waitOnUpdate`` should not be used to harvest tick data from tickers, since some ticks can go missing. This happens when multiple updates occur almost simultaneously; The ticks from the first update are then cleared. Use events instead to prevent this.
codesearchnet
def GetFeedItems(client, feed): feed_item_service = client.GetService('FeedItemService', 'v201809') feed_items = [] more_pages = True selector = {'fields': ['FeedItemId', 'AttributeValues'], 'predicates': [{'field': 'Status', 'operator': 'EQUALS', 'values': ['ENABLED']}, {'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']]}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}} while more_pages: page = feed_item_service.get(selector) if ('entries' in page): feed_items.extend(page['entries']) selector['paging']['startIndex'] += PAGE_SIZE more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries'])) return feed_items
Returns the Feed Items for a given Feed. Args: client: an AdWordsClient instance. feed: the Feed we are retrieving Feed Items from. Returns: The Feed Items associated with the given Feed.
codesearchnet
def flux_up(self, fluxUpBottom, emission=None): if (emission is None): emission = np.zeros_like(self.absorptivity) E = np.concatenate((emission, np.atleast_1d(fluxUpBottom)), axis=(- 1)) return np.squeeze(matrix_multiply(self.Tup, E[(..., np.newaxis)]))
Compute downwelling radiative flux at interfaces between layers. Inputs: * fluxDownTop: flux down at top * emission: emission from atmospheric levels (N) defaults to zero if not given Returns: * vector of downwelling radiative flux between levels (N+1) element 0 is the flux down to the surface.
codesearchnet
def sg_queue_context(sess=None): sess = (tf.get_default_session() if (sess is None) else sess) coord = tf.train.Coordinator() try: threads = tf.train.start_queue_runners(sess, coord) (yield) finally: coord.request_stop() coord.join(threads)
r"""Context helper for queue routines. Args: sess: A session to open queues. If not specified, a new session is created. Returns: None
codesearchnet
def run_attack_work(self, work_id): adv_batch_id = self.attack_work.work[work_id]['output_adversarial_batch_id'] adv_batch = self.adv_batches[adv_batch_id] dataset_batch_id = adv_batch['dataset_batch_id'] submission_id = adv_batch['submission_id'] epsilon = self.dataset_batches[dataset_batch_id]['epsilon'] logging.info('Attack work piece: dataset_batch_id="%s" submission_id="%s" epsilon=%d', dataset_batch_id, submission_id, epsilon) if (submission_id in self.blacklisted_submissions): raise WorkerError('Blacklisted submission') attack = AttackSubmission(submission_id, self.submissions, self.storage_bucket) attack.download() input_dir = os.path.join(LOCAL_DATASET_DIR, dataset_batch_id) if (attack.type == TYPE_TARGETED): target_class_filename = os.path.join(input_dir, 'target_class.csv') self.dataset_meta.save_target_classes_for_batch(target_class_filename, self.dataset_batches, dataset_batch_id) if os.path.exists(LOCAL_OUTPUT_DIR): sudo_remove_dirtree(LOCAL_OUTPUT_DIR) os.mkdir(LOCAL_OUTPUT_DIR) if os.path.exists(LOCAL_PROCESSED_OUTPUT_DIR): shutil.rmtree(LOCAL_PROCESSED_OUTPUT_DIR) os.mkdir(LOCAL_PROCESSED_OUTPUT_DIR) if os.path.exists(LOCAL_ZIPPED_OUTPUT_DIR): shutil.rmtree(LOCAL_ZIPPED_OUTPUT_DIR) os.mkdir(LOCAL_ZIPPED_OUTPUT_DIR) elapsed_time_sec = attack.run(input_dir, LOCAL_OUTPUT_DIR, epsilon) if (attack.type == TYPE_TARGETED): os.remove(target_class_filename) image_hashes = eval_lib.enforce_epsilon_and_compute_hash(input_dir, LOCAL_OUTPUT_DIR, LOCAL_PROCESSED_OUTPUT_DIR, epsilon) if (not image_hashes): logging.warning('No images saved by the attack.') return (elapsed_time_sec, submission_id) for (clean_image_id, hash_val) in iteritems(image_hashes): adv_img_id = ((adv_batch_id + '_') + clean_image_id) os.rename(os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, (clean_image_id + '.png')), os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, (adv_img_id + '.png'))) image_path = '{0}/adversarial_images/{1}/{1}.zip/{2}.png'.format(self.round_name, adv_batch_id, adv_img_id) adv_batch['images'][adv_img_id] = {'clean_image_id': (u'' + str(clean_image_id)), 'image_path': (u'' + str(image_path)), 'image_hash': (u'' + str(hash_val))} zipped_images_filename = os.path.join(LOCAL_ZIPPED_OUTPUT_DIR, (adv_batch_id + '.zip')) try: logging.debug('Compressing adversarial images to %s', zipped_images_filename) shell_call(['zip', '-j', '-r', zipped_images_filename, LOCAL_PROCESSED_OUTPUT_DIR]) except subprocess.CalledProcessError as e: raise WorkerError('Cant make archive from adversarial iamges', e) dst_filename = '{0}/adversarial_images/{1}/{1}.zip'.format(self.round_name, adv_batch_id) logging.debug('Copying archive with adversarial images to %s', dst_filename) self.storage_client.new_blob(dst_filename).upload_from_filename(zipped_images_filename) logging.debug('Writing adversarial batch to datastore') self.adv_batches.write_single_batch_images_to_datastore(adv_batch_id) return (elapsed_time_sec, submission_id)
Runs one attack work. Args: work_id: ID of the piece of work to run Returns: elapsed_time_sec, submission_id - elapsed time and id of the submission Raises: WorkerError: if error occurred during execution.
codesearchnet
def norm(self, valu): func = self._type_norms.get(type(valu)) if func is None: raise s_exc.NoSuchFunc(name=self.name, mesg='no norm for type: %r' % (type(valu),)) return func(valu)
Normalize the value for a given type. Args: valu (obj): The value to normalize. Returns: ((obj,dict)): The normalized valu, info tuple. Notes: The info dictionary uses the following key conventions: subs (dict): The normalized sub-fields as name: valu entries.
juraj-google-style
def __init__(self, package, device): self.package = package self.log = device.log self.verbose_logging = True self._device = device self._counter = None self._lock = threading.Lock() self._event_client = None
Initializes the instance of ClientBase. Args: package: str, the user-visible name of the snippet library being communicated with. device: DeviceController, the device object associated with a client.
github-repos
def pop_all(self, event_name): if (not self.started): raise IllegalStateError('Dispatcher needs to be started before popping.') results = [] try: self.lock.acquire() while True: e = self.event_dict[event_name].get(block=False) results.append(e) except (queue.Empty, KeyError): return results finally: self.lock.release()
Return and remove all stored events of a specified name. Pops all events from their queue. May miss the latest ones. If no event is available, return immediately. Args: event_name: Name of the events to be popped. Returns: List of the desired events. Raises: IllegalStateError: Raised if pop is called before the dispatcher starts polling.
codesearchnet
def _enroll_users(cls, request, enterprise_customer, emails, mode, course_id=None, program_details=None, notify=True): pending_messages = [] if course_id: (succeeded, pending, failed) = cls.enroll_users_in_course(enterprise_customer=enterprise_customer, course_id=course_id, course_mode=mode, emails=emails) all_successes = (succeeded + pending) if notify: enterprise_customer.notify_enrolled_learners(catalog_api_user=request.user, course_id=course_id, users=all_successes) if succeeded: pending_messages.append(cls.get_success_enrollment_message(succeeded, course_id)) if failed: pending_messages.append(cls.get_failed_enrollment_message(failed, course_id)) if pending: pending_messages.append(cls.get_pending_enrollment_message(pending, course_id)) if program_details: (succeeded, pending, failed) = cls.enroll_users_in_program(enterprise_customer=enterprise_customer, program_details=program_details, course_mode=mode, emails=emails) all_successes = (succeeded + pending) if notify: cls.notify_program_learners(enterprise_customer=enterprise_customer, program_details=program_details, users=all_successes) program_identifier = program_details.get('title', program_details.get('uuid', _('the program'))) if succeeded: pending_messages.append(cls.get_success_enrollment_message(succeeded, program_identifier)) if failed: pending_messages.append(cls.get_failed_enrollment_message(failed, program_identifier)) if pending: pending_messages.append(cls.get_pending_enrollment_message(pending, program_identifier)) cls.send_messages(request, pending_messages)
Enroll the users with the given email addresses to the courses specified, either specifically or by program. Args: cls (type): The EnterpriseCustomerManageLearnersView class itself request: The HTTP request the enrollment is being created by enterprise_customer: The instance of EnterpriseCustomer whose attached users we're enrolling emails: An iterable of strings containing email addresses to enroll in a course mode: The enrollment mode the users will be enrolled in the course with course_id: The ID of the course in which we want to enroll program_details: Details about a program in which we want to enroll notify: Whether to notify (by email) the users that have been enrolled
codesearchnet
def variable(self, var_name, shape, init, dt=tf.float32, train=None): dt = tf.as_dtype(dt).base_dtype if (var_name in self.vars): v = self.vars[var_name] if (v.get_shape() != shape): raise ValueError(('Shape mismatch: %s vs %s. Perhaps a UnboundVariable had incompatible values within a graph.' % (v.get_shape(), shape))) return v elif callable(init): if (train is None): train = _defaults.get('trainable_variables', True) variable_collections = _defaults.get('variable_collections', ()) if (tf.GraphKeys.GLOBAL_VARIABLES not in variable_collections): variable_collections = (list(variable_collections) + [tf.GraphKeys.GLOBAL_VARIABLES]) v = tf.get_variable(var_name, shape=shape, dtype=dt, initializer=init, trainable=train, collections=variable_collections) self.vars[var_name] = v return v else: v = tf.convert_to_tensor(init, name=var_name, dtype=dt) v.get_shape().assert_is_compatible_with(shape) self.vars[var_name] = v return v
Adds a named variable to this bookkeeper or returns an existing one. Variables marked train are returned by the training_variables method. If the requested name already exists and it is compatible (same shape, dt and train) then it is returned. In case of an incompatible type, an exception is thrown. Args: var_name: The unique name of this variable. If a variable with the same name exists, then it is returned. shape: The shape of the variable. init: The init function to use or a Tensor to copy. dt: The datatype, defaults to float. This will automatically extract the base dtype. train: Whether or not the variable should be trained; defaults to True unless a default_scope has overridden it. Returns: A TensorFlow tensor. Raises: ValueError: if reuse is False (or unspecified and allow_reuse is False) and the variable already exists or if the specification of a reused variable does not match the original.
codesearchnet
def parse_GSM(filepath, entry_name=None): if isinstance(filepath, str): with utils.smart_open(filepath) as f: soft = [] has_table = False for line in f: if "_table_begin" in line or (not line.startswith(("^", "!", " has_table = True soft.append(line.rstrip()) else: soft = [] has_table = False for line in filepath: if "_table_begin" in line or (not line.startswith(("^", "!", " has_table = True soft.append(line.rstrip()) if entry_name is None: sets = [i for i in soft if i.startswith("^")] if len(sets) > 1: raise Exception("More than one entry in GPL") if len(sets) == 0: raise NoEntriesException( "No entries found. Check the if accession is correct!") entry_name = parse_entry_name(sets[0]) columns = parse_columns(soft) metadata = parse_metadata(soft) if has_table: table_data = parse_table_data(soft) else: table_data = DataFrame() gsm = GSM(name=entry_name, table=table_data, metadata=metadata, columns=columns) return gsm
Parse GSM entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry or list of lines representing GSM from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. Returns: :obj:`GEOparse.GSM`: A GSM object.
juraj-google-style
def _setup_transitions(tdef, states, prev=()): trs = list(prev) for transition in tdef: if len(transition) == 3: (name, source, target) = transition if is_string(source) or isinstance(source, State): source = [source] source = [states[src] for src in source] target = states[target] tr = Transition(name, source, target) else: raise TypeError( "Elements of the 'transition' attribute of a " "workflow should be three-tuples; got %r instead." % (transition,) ) if any(prev_tr.name == tr.name for prev_tr in trs): trs = [tr if prev_tr.name == tr.name else prev_tr for prev_tr in trs] else: trs.append(tr) return TransitionList(trs)
Create a TransitionList object from a 'transitions' Workflow attribute. Args: tdef: list of transition definitions states (StateList): already parsed state definitions. prev (TransitionList): transition definitions from a parent. Returns: TransitionList: the list of transitions defined in the 'tdef' argument.
juraj-google-style
def sg_summary_activation(tensor, prefix=None, name=None): prefix = ('' if (prefix is None) else (prefix + '/')) name = ((prefix + _pretty_name(tensor)) if (name is None) else (prefix + name)) _scalar((name + '/ratio'), tf.reduce_mean(tf.cast(tf.greater(tensor, 0), tf.sg_floatx))) _histogram((name + '/ratio-h'), tensor)
r"""Register `tensor` to summary report as `activation` Args: tensor: A `Tensor` to log as activation prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
codesearchnet
def __init__(self, img, gaussian_kernel_1d=None, size=None): self.img = img if not isinstance(img, compat.basestring) \ else compat.Image.open(img) if size and size != self.img.size: self.img = self.img.resize(size, Image.ANTIALIAS) self.size = self.img.size if gaussian_kernel_1d is not None: self.gaussian_kernel_1d = gaussian_kernel_1d self.img_gray, self.img_alpha = to_grayscale(self.img) if self.img_alpha is not None: self.img_gray[self.img_alpha == 255] = 0 self.img_gray_squared = self.img_gray ** 2 self.img_gray_mu = convolve_gaussian_2d( self.img_gray, self.gaussian_kernel_1d) self.img_gray_mu_squared = self.img_gray_mu ** 2 self.img_gray_sigma_squared = convolve_gaussian_2d( self.img_gray_squared, self.gaussian_kernel_1d) self.img_gray_sigma_squared -= self.img_gray_mu_squared else: self.img_gray = ImageOps.grayscale(self.img)
Create an SSIMImage. Args: img (str or PIL.Image): PIL Image object or file name. gaussian_kernel_1d (np.ndarray, optional): Gaussian kernel that was generated with utils.get_gaussian_kernel is used to precompute common objects for SSIM computation size (tuple, optional): New image size to resize image to.
juraj-google-style
def IsDataVisible(self, path): if path is None: return (False, RESPONSES['UNKNOWN_TYPE']) if _Matches(path, self.blacklist_patterns): return (False, RESPONSES['BLACKLISTED']) if not _Matches(path, self.whitelist_patterns): return (False, RESPONSES['NOT_WHITELISTED']) return (True, RESPONSES['VISIBLE'])
Returns a tuple (visible, reason) stating if the data should be visible. Args: path: A dot separated path that represents a package, class, method or variable. The format is identical to pythons "import" statement. Returns: (visible, reason) where visible is a boolean that is True if the data should be visible. Reason is a string reason that can be displayed to the user and indicates why data is visible or not visible.
juraj-google-style
def split_range(self): if self.is_single_namespace: return [self] mid_point = ((_namespace_to_ord(self.namespace_start) + _namespace_to_ord(self.namespace_end)) return [NamespaceRange(self.namespace_start, _ord_to_namespace(mid_point), _app=self.app), NamespaceRange(_ord_to_namespace((mid_point + 1)), self.namespace_end, _app=self.app)]
Splits the NamespaceRange into two nearly equal-sized ranges. Returns: If this NamespaceRange contains a single namespace then a list containing this NamespaceRange is returned. Otherwise a two-element list containing two NamespaceRanges whose total range is identical to this NamespaceRange's is returned.
codesearchnet
def ParseFromUnicode(self, value): precondition.AssertType(value, Text) value = value.strip() super(ClientURN, self).ParseFromUnicode(value) match = self.CLIENT_ID_RE.match(self._string_urn) if (not match): raise type_info.TypeValueError(('Client urn malformed: %s' % value)) clientid = match.group('clientid') clientid_correctcase = ''.join((clientid[0].upper(), clientid[1:].lower())) self._string_urn = self._string_urn.replace(clientid, clientid_correctcase, 1)
Parse a string into a client URN. Convert case so that all URNs are of the form C.[0-9a-f]. Args: value: string value to parse
codesearchnet
def strip_quotes(self, content): error_msg = 'Following rule is badly quoted: {}' if ((content.startswith('"') and content.endswith('"')) or (content.startswith("'") and content.endswith("'"))): return content[1:(- 1)] elif ((content.startswith('"') and (not content.endswith('"'))) or (content.startswith("'") and (not content.endswith("'")))): raise InvalidImportRule(error_msg.format(content)) elif (((not content.startswith('"')) and content.endswith('"')) or ((not content.startswith("'")) and content.endswith("'"))): raise InvalidImportRule(error_msg.format(content)) return content
Unquote given rule. Args: content (str): An import rule. Raises: InvalidImportRule: Raise exception if the rule is badly quoted (not started or not ended quotes). Returns: string: The given rule unquoted.
codesearchnet
def find(self, name): collectors = self.get_collectors() for collector in collectors: if name.lower() == collector['name'].lower(): self.collector_id = collector['id'] return collector return {'status': 'No results found.'}
Returns a dict of collector's details if found. Args: name (str): name of collector searching for
juraj-google-style
def verify_edge_segments(edge_infos): if (edge_infos is None): return for edge_info in edge_infos: num_segments = len(edge_info) for index in six.moves.xrange((- 1), (num_segments - 1)): (index1, start1, end1) = edge_info[index] if (not (0.0 <= start1 < end1 <= 1.0)): raise ValueError(BAD_SEGMENT_PARAMS, edge_info[index]) (index2, _, _) = edge_info[(index + 1)] if (index1 == index2): raise ValueError(SEGMENTS_SAME_EDGE, edge_info[index], edge_info[(index + 1)])
Verify that the edge segments in an intersection are valid. .. note:: This is a helper used only by :func:`generic_intersect`. Args: edge_infos (Optional[list]): List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). Raises: ValueError: If two consecutive edge segments lie on the same edge index. ValueError: If the start and end parameter are "invalid" (they should be between 0 and 1 and start should be strictly less than end).
codesearchnet
def put_image(self, name, val): assert isinstance(val, np.ndarray) arr = image_to_nhwc(val) self._dispatch(lambda m: m.process_image(name, arr)) s = create_image_summary(name, arr) self._dispatch(lambda m: m.process_summary(s))
Put an image. Args: name (str): val (np.ndarray): 2D, 3D (HWC) or 4D (NHWC) numpy array of images in range [0,255]. If channel is 3, assumed to be RGB.
juraj-google-style
def convert_sum(params, w_name, scope_name, inputs, layers, weights, names): print('Converting Sum ...') def target_layer(x): import keras.backend as K return K.sum(x) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert sum. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def not_storable(_type): return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type)))
Helper for tagging unserializable types. Arguments: _type (type): type to be ignored. Returns: Storable: storable instance that does not poke.
codesearchnet
def ed25519_public_key_from_string(string): try: return Ed25519PublicKey.from_public_bytes(base64.b64decode(string)) except (UnsupportedAlgorithm, Base64Error) as exc: raise ScriptWorkerEd25519Error("Can't create Ed25519PublicKey: {}!".format(str(exc)))
Create an ed25519 public key from ``string``, which is a seed. Args: string (str): the string to use as a seed. Returns: Ed25519PublicKey: the public key
codesearchnet
def QA_fetch_get_sh_margin(date): if (date in trade_date_sse): data = pd.read_excel(_sh_url.format(QA_util_date_str2int(date)), 1).assign(date=date).assign(sse='sh') data.columns = ['code', 'name', 'leveraged_balance', 'leveraged_buyout', 'leveraged_payoff', 'margin_left', 'margin_sell', 'margin_repay', 'date', 'sse'] return data else: pass
return shanghai margin data Arguments: date {str YYYY-MM-DD} -- date format Returns: pandas.DataFrame -- res for margin data
codesearchnet
def _Build(self, storage_file): self._index = {} for event_tag in storage_file.GetEventTags(): self.SetEventTag(event_tag)
Builds the event tag index. Args: storage_file (BaseStorageFile): storage file.
codesearchnet
def included(self, start, stop): for event in self: if (start <= event.begin <= stop and start <= event.end <= stop): yield event
Iterates (in chronological order) over every event that is included in the timespan between `start` and `stop` Args: start : (Arrow object) stop : (Arrow object)
juraj-google-style
def add_keywords_from_dict(self, keyword_dict): for (clean_name, keywords) in keyword_dict.items(): if (not isinstance(keywords, list)): raise AttributeError('Value of key {} should be a list'.format(clean_name)) for keyword in keywords: self.add_keyword(keyword, clean_name)
To add keywords from a dictionary Args: keyword_dict (dict): A dictionary with `str` key and (list `str`) as value Examples: >>> keyword_dict = { "java": ["java_2e", "java programing"], "product management": ["PM", "product manager"] } >>> keyword_processor.add_keywords_from_dict(keyword_dict) Raises: AttributeError: If value for a key in `keyword_dict` is not a list.
codesearchnet
def encode_all_features(dataset, vocabulary): def my_fn(features): ret = {} for (k, v) in features.items(): v = vocabulary.encode_tf(v) v = tf.concat([tf.to_int64(v), [1]], 0) ret[k] = v return ret return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
Encode all features. Args: dataset: a tf.data.Dataset vocabulary: a vocabulary.Vocabulary Returns: a tf.data.Dataset
codesearchnet
def word_ids(self, batch_index: int=0) -> List[Optional[int]]: if not self._encodings: raise ValueError('word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).') return self._encodings[batch_index].word_ids
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).
github-repos
def should_execute_serially(self, applied_ptransform): if isinstance(applied_ptransform.transform, (_GroupByKeyOnly, _StreamingGroupByKeyOnly, _StreamingGroupAlsoByWindow)): return True elif isinstance(applied_ptransform.transform, core.ParDo) and is_stateful_dofn(applied_ptransform.transform.dofn): return True return False
Returns True if this applied_ptransform should run one bundle at a time. Some TransformEvaluators use a global state object to keep track of their global execution state. For example evaluator for _GroupByKeyOnly uses this state as an in memory dictionary to buffer keys. Serially executed evaluators will act as syncing point in the graph and execution will not move forward until they receive all of their inputs. Once they receive all of their input, they will release the combined output. Their output may consist of multiple bundles as they may divide their output into pieces before releasing. Args: applied_ptransform: Transform to be used for execution. Returns: True if executor should execute applied_ptransform serially.
github-repos
def get_cards(self, **query_params): cards = self.get_cards_json(self.base_uri, query_params=query_params) cards_list = [] for card_json in cards: cards_list.append(self.create_card(card_json)) return cards_list
Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to
codesearchnet
def _partitions_list(N): if N < (_NUM_PRECOMPUTED_PARTITION_LISTS): return list(_partition_lists[N]) else: raise ValueError( 'Partition lists not yet available for system with {} ' 'nodes or more'.format(_NUM_PRECOMPUTED_PARTITION_LISTS))
Return a list of partitions of the |N| binary nodes. Args: N (int): The number of nodes under consideration. Returns: list[list]: A list of lists, where each inner list is the set of micro-elements corresponding to a macro-element. Example: >>> _partitions_list(3) [[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]], [[0], [1], [2]]]
juraj-google-style
def calculate_embedding(self, batch_image_bytes): return self.tf_session.run(self.embedding, feed_dict={self.input_jpeg: batch_image_bytes})
Get the embeddings for a given JPEG image. Args: batch_image_bytes: As if returned from [ff.read() for ff in file_list]. Returns: The Inception embeddings (bottleneck layer output)
codesearchnet
def respond(self, prompt_id, response): _LOG.debug('Responding to prompt (%s): "%s"', prompt_id, response) with self._cond: if not (self._prompt and self._prompt.id == prompt_id): return False self._response = response self.last_response = (prompt_id, response) self.remove_prompt() self._cond.notifyAll() return True
Respond to the prompt with the given ID. If there is no active prompt or the given ID doesn't match the active prompt, do nothing. Args: prompt_id: A string uniquely identifying the prompt. response: A string response to the given prompt. Returns: True if the prompt with the given ID was active, otherwise False.
juraj-google-style
def _FlushExportBuffer(self, output_module, deduplicate_events=True): last_macb_group_identifier = None last_content_identifier = None macb_group = [] generator = self._export_event_heap.PopEvents() for (macb_group_identifier, content_identifier, event) in generator: if (deduplicate_events and (last_content_identifier == content_identifier)): self._events_status.number_of_duplicate_events += 1 continue if (macb_group_identifier is None): if macb_group: output_module.WriteEventMACBGroup(macb_group) macb_group = [] output_module.WriteEvent(event) else: if ((last_macb_group_identifier == macb_group_identifier) or (not macb_group)): macb_group.append(event) else: output_module.WriteEventMACBGroup(macb_group) macb_group = [event] self._events_status.number_of_macb_grouped_events += 1 last_macb_group_identifier = macb_group_identifier last_content_identifier = content_identifier if macb_group: output_module.WriteEventMACBGroup(macb_group)
Flushes buffered events and writes them to the output module. Args: output_module (OutputModule): output module. deduplicate_events (Optional[bool]): True if events should be deduplicated.
codesearchnet
def __init__(self, latitude, longitude, name, units='km'): super(NumberedPoint, self).__init__(latitude, longitude, units) self.name = name
Initialise a new ``NumberedPoint`` object. Args: latitude (float): Location's latitude longitude (float): Location's longitude name (str): Location's name or command line position units (str): Unit type to be used for distances
juraj-google-style
def fail_request(self, orig_request, message, start_response): cors_handler = self._create_cors_handler(orig_request) return util.send_wsgi_error_response(message, start_response, cors_handler=cors_handler)
Write an immediate failure response to outfile, no redirect. This calls start_response and returns the error body. Args: orig_request: An ApiRequest, the original request from the user. message: A string containing the error message to be displayed to user. start_response: A function with semantics defined in PEP-333. Returns: A string containing the body of the error response.
codesearchnet
def passthrough_context_definition(context_params): check.inst_param(context_params, 'context', ExecutionContext) context_definition = PipelineContextDefinition(context_fn=(lambda *_args: context_params)) return {DEFAULT_CONTEXT_NAME: context_definition}
Create a context definition from a pre-existing context. This can be useful in testing contexts where you may want to create a context manually and then pass it into a one-off PipelineDefinition Args: context (ExecutionContext): The context that will provided to the pipeline. Returns: PipelineContextDefinition: The passthrough context definition.
codesearchnet
def MakeStatResponse(self, tsk_file, tsk_attribute=None, append_name=None): precondition.AssertOptionalType(append_name, Text) info = tsk_file.info response = rdf_client_fs.StatEntry() meta = info.meta if meta: response.st_ino = meta.addr for attribute in ['mode', 'nlink', 'uid', 'gid', 'size', 'atime', 'mtime', 'ctime', 'crtime']: try: value = int(getattr(meta, attribute)) if (value < 0): value &= 4294967295 setattr(response, ('st_%s' % attribute), value) except AttributeError: pass name = info.name child_pathspec = self.pathspec.Copy() if (append_name is not None): child_pathspec.last.path = utils.JoinPath(child_pathspec.last.path, append_name) child_pathspec.last.inode = meta.addr if (tsk_attribute is not None): child_pathspec.last.ntfs_type = int(tsk_attribute.info.type) child_pathspec.last.ntfs_id = int(tsk_attribute.info.id) child_pathspec.last.stream_name = tsk_attribute.info.name response.st_size = tsk_attribute.info.size default = rdf_paths.PathSpec.tsk_fs_attr_type.TSK_FS_ATTR_TYPE_DEFAULT last = child_pathspec.last if ((last.ntfs_type != default) or last.ntfs_id): response.st_mode &= (~ self.stat_type_mask) response.st_mode |= stat.S_IFREG else: child_pathspec.last.ntfs_type = None child_pathspec.last.ntfs_id = None child_pathspec.last.stream_name = None if name: response.st_mode |= self.FILE_TYPE_LOOKUP.get(int(name.type), 0) if meta: response.st_mode |= self.META_TYPE_LOOKUP.get(int(meta.type), 0) response.pathspec = child_pathspec return response
Given a TSK info object make a StatEntry. Note that tsk uses two things to uniquely identify a data stream - the inode object given in tsk_file and the attribute object which may correspond to an ADS of this file for filesystems which support ADS. We store both of these in the stat response. Args: tsk_file: A TSK File object for the specified inode. tsk_attribute: A TSK Attribute object for the ADS. If None we use the main stream. append_name: If specified we append this name to the last element of the pathspec. Returns: A StatEntry which can be used to re-open this exact VFS node.
codesearchnet
def parse_ids(chrom, pos, ref, alt, case_id, variant_type): ids = {} pos = str(pos) ids['simple_id'] = parse_simple_id(chrom, pos, ref, alt) ids['variant_id'] = parse_variant_id(chrom, pos, ref, alt, variant_type) ids['display_name'] = parse_display_name(chrom, pos, ref, alt, variant_type) ids['document_id'] = parse_document_id(chrom, pos, ref, alt, variant_type, case_id) return ids
Construct the necessary ids for a variant Args: chrom(str): Variant chromosome pos(int): Variant position ref(str): Variant reference alt(str): Variant alternative case_id(str): Unique case id variant_type(str): 'clinical' or 'research' Returns: ids(dict): Dictionary with the relevant ids
codesearchnet
def __init__(self, num_points): self.num_points = num_points self.column_names = [] self.name_to_values = {}
Constructs a metadata for an embedding of the specified size. Args: num_points: Number of points in the embedding.
juraj-google-style
def _prep_binary_content(self): if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys(): raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header') elif 'Content-Location' in self.resource.headers.keys(): logger.debug('Content-Location header found, using') self.delivery = 'header' elif 'Content-Location' not in self.resource.headers.keys(): if self.location: self.resource.headers['Content-Location'] = self.location self.delivery = 'header' elif self.data: if isinstance(self.data, io.BufferedIOBase): logger.debug('detected file-like object') self.delivery = 'payload' else: logger.debug('detected bytes') self.delivery = 'payload'
Sets delivery method of either payload or header Favors Content-Location header if set Args: None Returns: None: sets attributes in self.binary and headers
juraj-google-style
def individuals(self, ind_ids=None): if ind_ids: for ind_id in ind_ids: for ind in self.individual_objs: if ind.ind_id == ind_id: yield ind else: for ind in self.individual_objs: yield ind
Return information about individuals Args: ind_ids (list(str)): List of individual ids Returns: individuals (Iterable): Iterable with Individuals
juraj-google-style
def is_array(self, data_type): data_type = data_type.split('[')[0].strip() return data_type.lower() in self.array_types
Check if a type is a known array type Args: data_type (str): Name of type to check Returns: True if ``data_type`` is a known array type.
juraj-google-style
def ParseDownloadsRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = FirefoxDownloadEventData() event_data.full_path = self._GetRowValue(query_hash, row, 'target') event_data.mime_type = self._GetRowValue(query_hash, row, 'mimeType') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.received_bytes = self._GetRowValue(query_hash, row, 'currBytes') event_data.referrer = self._GetRowValue(query_hash, row, 'referrer') event_data.temporary_location = self._GetRowValue( query_hash, row, 'tempPath') event_data.total_bytes = self._GetRowValue(query_hash, row, 'maxBytes') event_data.url = self._GetRowValue(query_hash, row, 'source') timestamp = self._GetRowValue(query_hash, row, 'startTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'endTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_END) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a downloads row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def _parse_username(self, config): (username, priv, role, nopass, fmt, secret, sshkey) = config resource = dict() resource['privilege'] = priv resource['role'] = role resource['nopassword'] = (nopass == 'nopassword') resource['format'] = fmt resource['secret'] = secret resource['sshkey'] = sshkey return {username: resource}
Scans the config block and returns the username as a dict Args: config (str): The config block to parse Returns: dict: A resource dict that is intended to be merged into the user resource
codesearchnet
def block(self, **kwargs): path = '/users/%s/block' % self.id server_data = self.manager.gitlab.http_post(path, **kwargs) if server_data is True: self._attrs['state'] = 'blocked' return server_data
Block the user. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabBlockError: If the user could not be blocked Returns: bool: Whether the user status has been changed
juraj-google-style
def _set_spawn_exe_path(): if sys.argv[0].endswith('.py'): def guess_path(package_root): if 'bazel-out' in sys.argv[0] and package_root in sys.argv[0]: package_root_base = sys.argv[0][:sys.argv[0].rfind(package_root)] binary = os.environ['TEST_TARGET'][2:].replace(':', '/', 1) possible_path = os.path.join(package_root_base, package_root, binary) logging.info('Guessed test binary path: %s', possible_path) if os.access(possible_path, os.X_OK): return possible_path return None path = guess_path('org_tensorflow') if not path: path = guess_path('org_keras') if path is None: logging.error('Cannot determine binary path. sys.argv[0]=%s os.environ=%s', sys.argv[0], os.environ) raise RuntimeError('Cannot determine binary path') sys.argv[0] = path multiprocessing.get_context().set_executable(sys.argv[0])
Set the path to the executable for spawned processes. This utility searches for the binary the parent process is using, and sets the executable of multiprocessing's context accordingly. Raises: RuntimeError: If the binary path cannot be determined.
github-repos
def gumbel_sample(shape): uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998) return -tf.log(-tf.log(uniform_samples))
Sample from the Gumbel distribution, protect from overflows. Args: shape: Shape of Gumbel samples. Returns: Noise drawn from Gumbel distribution.
juraj-google-style
def __init__(self, channel): self.Invoke = channel.unary_unary( '/pulumirpc.ResourceMonitor/Invoke', request_serializer=provider__pb2.InvokeRequest.SerializeToString, response_deserializer=provider__pb2.InvokeResponse.FromString, ) self.ReadResource = channel.unary_unary( '/pulumirpc.ResourceMonitor/ReadResource', request_serializer=resource__pb2.ReadResourceRequest.SerializeToString, response_deserializer=resource__pb2.ReadResourceResponse.FromString, ) self.RegisterResource = channel.unary_unary( '/pulumirpc.ResourceMonitor/RegisterResource', request_serializer=resource__pb2.RegisterResourceRequest.SerializeToString, response_deserializer=resource__pb2.RegisterResourceResponse.FromString, ) self.RegisterResourceOutputs = channel.unary_unary( '/pulumirpc.ResourceMonitor/RegisterResourceOutputs', request_serializer=resource__pb2.RegisterResourceOutputsRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def _add_sv_coordinates(self, variant): variant.stop_chrom = variant.CHROM variant.start = int(variant.POS) if ':' in variant.ALT: other_coordinates = variant.ALT.strip('ACGTN[]').split(':') variant.stop_chrom = other_coordinates[0].lstrip('chrCHR') other_position = other_coordinates[1] variant.sv_len = float('inf') variant.sv_type = 'BND' else: variant.sv_len = variant.stop - variant.start variant['cytoband_start'] = get_cytoband_coord( chrom=variant.CHROM, pos=variant.start ) variant['cytoband_stop'] = get_cytoband_coord( chrom=variant.stop_chrom, pos=variant.stop )
Add the neccesary sv coordinates for a variant Args: variant (puzzle.models.variant)
juraj-google-style
def __call__(self, fn): def fail(app, *args, **kwargs): data = fn(app, *args, **kwargs) if isinstance(self.enable, bool): enabled = self.enable app.tcex.log.debug('Fail on output is ({}).'.format(self.enable)) else: enabled = getattr(app.args, self.enable) app.tcex.log.debug('Fail on output is ({}) for ({}).'.format(enabled, self.enable)) if not isinstance(enabled, bool): app.tcex.playbook.exit( 1, 'The enable value must be a boolean for fail on output.' ) failed = False if enabled is True: if isinstance(data, list): for d in data: if d in self.values: failed = True else: if data in self.values: failed = True if failed: app.tcex.exit(1, self.msg) return data return fail
Implement __call__ function for decorator. Args: fn (function): The decorated function. Returns: function: The custom decorator function.
juraj-google-style
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False): def yield_csv(csv_contents, csv_file): try: for line in csv_contents: yield line finally: try: csv_file.close() except: pass def process_csv(csv_contents, csv_file): return [line for line in yield_csv(csv_contents, csv_file)] if file_contents: csv_file = BytesIO(file_contents) else: csv_file = open(file_name, 'rb') reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding) if on_demand: table = yield_csv(reader, csv_file) else: table = process_csv(reader, csv_file) return [table]
Gets good old csv data from a file. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. encoding: Loads the file with the specified cell encoding. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
juraj-google-style
def __init__(self, direct_subclasses=None, any_also_is_bottom=True): self.direct_subclasses = direct_subclasses or {} self.any_also_is_bottom = any_also_is_bottom self.solver = booleq.Solver() self._implications = {}
Construct. Args: direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type. any_also_is_bottom: Whether we should, (if True) consider pytd.AnythingType() to also be at the bottom of the type hierarchy, thus making it a subclass of everything, or (if False) to be only at the top.
github-repos
def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None): self._remote = RemoteClient(host, username, password, private_key, private_key_pass) self._remote_id = uuid.uuid4().hex
Defines the remote scheduler Args: host (str): the hostname or ip address of the remote scheduler username (str, optional): the username used to connect to the remote scheduler. Default is 'root' password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None. private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None. private_key_pass (str, optional): the passphrase for the private_key. Default is None. Returns: An RemoteClient representing the remote scheduler.
codesearchnet
def stage_tc_associations(self, entity1, entity2): entity1 = self.tcex.playbook.read(entity1) entity1_id = entity1.get('id') entity1_owner = entity1.get('ownerName') entity1_type = entity1.get('type') if entity1.get('type') in self.tcex.indicator_types: entity1_id = entity1.get('value') entity2 = self.tcex.playbook.read(entity2) entity2_id = entity2.get('id') entity2_owner = entity1.get('ownerName') entity2_type = entity2.get('type') if entity2.get('type') in self.tcex.indicator_types: entity2_id = entity2.get('value') if entity1_owner != entity2_owner: self.log.error('[stage] Can not associate resource across owners.') return resource1 = self.tcex.resource(entity1_type) resource1.http_method = 'POST' resource1.owner = entity1_owner resource1.resource_id(entity1_id) resource2 = self.tcex.resource(entity2_type) resource2.resource_id(entity2_id) a_resource = resource1.associations(resource2) response = a_resource.request() if response.get('status') != 'Success': self.log.warning( '[stage] Failed associating "{}:{}" with "{}:{}" ({}).'.format( entity1_type, entity1_id, entity2_type, entity2_id, response.get('response').text, ) )
Add an attribute to a resource. Args: entity1 (str): A Redis variable containing a TCEntity. entity2 (str): A Redis variable containing a TCEntity.
juraj-google-style
def RegisterDefinition(self, data_type_definition): name_lower = data_type_definition.name.lower() if name_lower in self._definitions: raise KeyError('Definition already set for name: {0:s}.'.format( data_type_definition.name)) if data_type_definition.name in self._aliases: raise KeyError('Alias already set for name: {0:s}.'.format( data_type_definition.name)) for alias in data_type_definition.aliases: if alias in self._aliases: raise KeyError('Alias already set for name: {0:s}.'.format(alias)) self._definitions[name_lower] = data_type_definition for alias in data_type_definition.aliases: self._aliases[alias] = name_lower if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT: self._format_definitions.append(name_lower)
Registers a data type definition. The data type definitions are identified based on their lower case name. Args: data_type_definition (DataTypeDefinition): data type definitions. Raises: KeyError: if data type definition is already set for the corresponding name.
juraj-google-style