code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, device_name, node_exec_stats, file_path, line_number, func_name, op_type): self.device_name = device_name self.node_exec_stats = node_exec_stats self.file_path = file_path self.line_number = line_number self.func_name = func_name if self.file_path: self.file_line_func = '%s:%d(%s)' % (os.path.basename(self.file_path), self.line_number, self.func_name) else: self.file_line_func = '' self.op_type = op_type self.start_time = self.node_exec_stats.all_start_micros self.op_time = self.node_exec_stats.op_end_rel_micros - self.node_exec_stats.op_start_rel_micros
Constructor. Args: device_name: (string) name of the device. node_exec_stats: `NodeExecStats` proto. file_path: path to the source file involved in creating the op. line_number: line number in the file involved in creating the op. func_name: name of the function that the line belongs to. op_type: (string) Operation type.
github-repos
def deepnn(x): with tf.name_scope("reshape"): x_image = tf.reshape(x, [-1, 28, 28, 1]) with tf.name_scope("conv1"): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) with tf.name_scope("pool1"): h_pool1 = max_pool_2x2(h_conv1) with tf.name_scope("conv2"): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) with tf.name_scope("pool2"): h_pool2 = max_pool_2x2(h_conv2) with tf.name_scope("fc1"): W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) with tf.name_scope("dropout"): keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) with tf.name_scope("fc2"): W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 return y_conv, keep_prob
deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout.
juraj-google-style
def set_default(self, name, value): fl = self._flags() if name not in fl: self._set_unknown_flag(name, value) return fl[name]._set_default(value) self._assert_validators(fl[name].validators)
Changes the default value of the named flag object. The flag's current value is also updated if the flag is currently using the default value, i.e. not specified in the command line, and not set by FLAGS.name = value. Args: name: str, the name of the flag to modify. value: The new default value. Raises: UnrecognizedFlagError: Raised when there is no registered flag named name. IllegalFlagValueError: Raised when value is not valid.
juraj-google-style
def copy(self) -> 'TraceableStack[T]': return TraceableStack(self._stack)
Return a copy of self referencing the same objects but in a new list. This method is implemented to support thread-local stacks. Returns: TraceableStack with a new list that holds existing objects.
github-repos
def analyze(model_path=None, model_content=None, gpu_compatibility=False, **kwargs): if not model_path and (not model_content): raise ValueError('neither `model_path` nor `model_content` is provided') if model_path: print(f'=== {model_path} ===\n') tflite_model = model_path input_is_filepath = True else: print('=== TFLite ModelAnalyzer ===\n') tflite_model = model_content input_is_filepath = False if kwargs.get('experimental_use_mlir', False): print(wrap_converter.wrapped_flat_buffer_file_to_mlir(tflite_model, input_is_filepath)) else: print(_analyzer_wrapper.ModelAnalyzer(tflite_model, input_is_filepath, gpu_compatibility))
Analyzes the given tflite_model with dumping model structure. This tool provides a way to understand users' TFLite flatbuffer model by dumping internal graph structure. It also provides additional features like checking GPU delegate compatibility. WARNING: Experimental interface, subject to change. The output format is not guaranteed to stay stable, so don't write scripts to this. Args: model_path: TFLite flatbuffer model path. model_content: TFLite flatbuffer model object. gpu_compatibility: Whether to check GPU delegate compatibility. **kwargs: Experimental keyword arguments to analyze API. Returns: Print analyzed report via console output.
github-repos
def flush_all(self, delay=0, noreply=None): if (noreply is None): noreply = self.default_noreply cmd = (b'flush_all ' + six.text_type(delay).encode('ascii')) if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'flush_all', noreply) if noreply: return True return (results[0] == b'OK')
The memcached "flush_all" command. Args: delay: optional int, the number of seconds to wait before flushing, or zero to flush immediately (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
codesearchnet
def _location_infos_equal(left, right): if ((not isinstance(left, LocationInfo)) or (not isinstance(right, LocationInfo))): raise AssertionError(u'Unsupported LocationInfo comparison between types {} and {} with values {}, {}'.format(type(left), type(right), left, right)) optional_scopes_depth_equal = (left.optional_scopes_depth == right.optional_scopes_depth) parent_query_paths_equal = (((left.parent_location is None) and (right.parent_location is None)) or (left.parent_location.query_path == right.parent_location.query_path)) recursive_scopes_depths_equal = (left.recursive_scopes_depth == right.recursive_scopes_depth) types_equal = (left.type == right.type) return all([optional_scopes_depth_equal, parent_query_paths_equal, recursive_scopes_depths_equal, types_equal])
Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise. LocationInfo objects are considered equal for the SQL backend iff the optional scopes depth, recursive scopes depth, types and parent query paths are equal. Args: left: LocationInfo, left location info object to compare. right: LocationInfo, right location info object to compare. Returns: bool, True if LocationInfo objects equivalent, False otherwise.
codesearchnet
def get_tag(self, tag_name, **kwargs): return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX, tag_name, **kwargs)
get a tag by name Args: tag_name (string): name of tag to get Returns: dictionary of the response
codesearchnet
def get(cls, ns, key): return getattr(db, cls.__name__).find_one( ConfigItem.namespace_prefix == ns, ConfigItem.key == key )
Fetch an item by namespace and key Args: ns (str): Namespace prefix key (str): Item key Returns: :obj:`Configitem`: Returns config item object if found, else `None`
juraj-google-style
def _check_wires_list(self, wires, node): if (len(set(wires)) != len(wires)): raise DAGCircuitError('duplicate wires') wire_tot = (len(node.qargs) + len(node.cargs)) if (node.condition is not None): wire_tot += node.condition[0].size if (len(wires) != wire_tot): raise DAGCircuitError(('expected %d wires, got %d' % (wire_tot, len(wires))))
Check that a list of wires is compatible with a node to be replaced. - no duplicate names - correct length for operation Raise an exception otherwise. Args: wires (list[register, index]): gives an order for (qu)bits in the input circuit that is replacing the node. node (DAGNode): a node in the dag Raises: DAGCircuitError: if check doesn't pass.
codesearchnet
def move(self, delta): self.pos = ((self.pos[0] + delta[0]), (self.pos[1] + delta[1]))
Move the node. Args: delta (tupel): A tupel, holding the adjustment of the position.
codesearchnet
def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor]=None) -> Tuple: self.input_dtype = hidden_states.dtype batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.reshape(batch_size * sequence_length, hidden_dim) hidden_states = hidden_states.to(self.dtype) self._cast_classifier() router_logits = self.classifier(hidden_states) top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask) return (top_1_mask, router_probs)
The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for each experts.) Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)): Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token using the top1 probabilities of the router. router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor` of shape (batch_size, sequence_length))): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss.
github-repos
def DeserializeFromBufer(buffer, offset=0): mstream = StreamManager.GetStream(buffer) reader = BinaryReader(mstream) tx = Transaction.DeserializeFrom(reader) StreamManager.ReleaseStream(mstream) return tx
Deserialize object instance from the specified buffer. Args: buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from. offset: UNUSED Returns: Transaction:
codesearchnet
def _validate_query(query): query = deepcopy(query) if query["q"] == BLANK_QUERY["q"]: raise ValueError("No query specified.") query["q"] = _clean_query_string(query["q"]) if query["limit"] is None: query["limit"] = SEARCH_LIMIT if query["advanced"] else NONADVANCED_LIMIT elif query["limit"] > SEARCH_LIMIT: warnings.warn('Reduced result limit from {} to the Search maximum: {}' .format(query["limit"], SEARCH_LIMIT), RuntimeWarning) query["limit"] = SEARCH_LIMIT for key, val in BLANK_QUERY.items(): if query.get(key, float('nan')) == val: query.pop(key) to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()] [query.pop(field) for field in to_remove] return query
Validate and clean up a query to be sent to Search. Cleans the query string, removes unneeded parameters, and validates for correctness. Does not modify the original argument. Raises an Exception on invalid input. Arguments: query (dict): The query to validate. Returns: dict: The validated query.
juraj-google-style
def OpenFileEntry(cls, path_spec_object, resolver_context=None): file_system = cls.OpenFileSystem( path_spec_object, resolver_context=resolver_context) if resolver_context is None: resolver_context = cls._resolver_context file_entry = file_system.GetFileEntryByPathSpec(path_spec_object) resolver_context.ReleaseFileSystem(file_system) return file_entry
Opens a file entry object defined by path specification. Args: path_spec_object (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built in context which is not multi process safe. Returns: FileEntry: file entry or None if the path specification could not be resolved.
juraj-google-style
def pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs): ax, fig, plt = get_ax_fig_plt(ax=ax) ax.axis("equal") labels, vals = self.names_and_values(key, minfract=minfract) ax.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True) return fig
Plot pie chart for this timer. Args: key: Keyword used to extract data from the timer. minfract: Don't show sections whose relative weight is less that minfract. ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure
juraj-google-style
def read_handle(url, cache=None, mode='rb'): scheme = urlparse(url).scheme if (cache == 'purge'): _purge_cached(url) cache = None if (_is_remote(scheme) and (cache is None)): cache = True log.debug('Cache not specified, enabling because resource is remote.') if cache: handle = _read_and_cache(url, mode=mode) elif (scheme in ('http', 'https')): handle = _handle_web_url(url, mode=mode) elif (scheme in 'gs'): handle = _handle_gfile(url, mode=mode) else: handle = open(url, mode=mode) (yield handle) handle.close()
Read from any URL with a file handle. Use this to get a handle to a file rather than eagerly load the data: ``` with read_handle(url) as handle: result = something.load(handle) result.do_something() ``` When program execution leaves this `with` block, the handle will be closed automatically. Args: url: a URL including scheme or a local path Returns: A file handle to the specified resource if it could be reached. The handle will be closed automatically once execution leaves this context.
codesearchnet
def delete_interconnect(self, enclosure_uri, bay, timeout=(- 1)): uri = '{path}?location=Enclosure:{enclosure_uri},Bay:{bay}'.format(path=self.LOCATIONS_PATH, enclosure_uri=enclosure_uri, bay=bay) return self._helper.delete(uri, timeout=timeout)
Deletes an interconnect from a location. Warning: This won't delete the LOGICAL INTERCONNECT itself and might cause inconsistency between the enclosure and Logical Interconnect Group. Args: enclosure_uri: URI of the Enclosure bay: Bay timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicating if the interconnect was successfully deleted.
codesearchnet
def get_subgraph_for_concept_pairs( self, concepts: List[str], cutoff: Optional[int] = None ): path_generator = ( nx.all_simple_paths(self, source, target, cutoff=cutoff) for source, target in permutations(concepts, 2) ) paths = chain.from_iterable(path_generator) return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths))))
Get subgraph comprised of simple paths between the source and the target. Args: concepts cutoff
juraj-google-style
def get_hist(self, observable: Any, **kwargs: Dict[(str, Any)]) -> Any: return observable
Get the histogram that may be stored in some object. This histogram is used to project from. Note: The output object could just be the raw ROOT histogram. Note: This function is just a basic placeholder and likely should be overridden. Args: observable (object): The input object. It could be a histogram or something more complex kwargs: Additional arguments passed to the projection function Return: ROOT.TH1 or ROOT.THnBase histogram which should be projected. By default, it returns the observable (input object).
codesearchnet
def slice_begin(self, tensor_shape, pnum): tensor_layout = self.tensor_layout(tensor_shape) coordinates = pnum_to_processor_coordinates(self.shape, pnum) ret = [] for (dim_size, mesh_axis) in zip(tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis): if (mesh_axis is None): ret.append(0) else: ret.append(((dim_size return ret
Begin position for the tensor slice for the given processor. Args: tensor_shape: Shape. pnum: int <= self.size. Returns: list of integers with length tensor_shape.ndims.
codesearchnet
def __init__(self, channel): self.Converse = channel.stream_stream( '/google.assistant.embedded.v1alpha1.EmbeddedAssistant/Converse', request_serializer=google_dot_assistant_dot_embedded_dot_v1alpha1_dot_embedded__assistant__pb2.ConverseRequest.SerializeToString, response_deserializer=google_dot_assistant_dot_embedded_dot_v1alpha1_dot_embedded__assistant__pb2.ConverseResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def isexe(*components): _path = path(*components) return (isfile(_path) and os.access(_path, os.X_OK))
Return whether a path is an executable file. Arguments: path (str): Path of the file to check. Examples: >>> fs.isexe("/bin/ls") True >>> fs.isexe("/home") False >>> fs.isexe("/not/a/real/path") False Returns: bool: True if file is executable, else false.
codesearchnet
def log_jwt_dict_info(log, msg_str, jwt_dict): d = ts_to_str(jwt_dict) log_list = ([(b, d.pop(a)) for (a, b, c) in CLAIM_LIST if (a in d)] + [(k, d[k]) for k in sorted(d)]) list(map(log, (['{}:'.format(msg_str)] + [' {}: {}'.format(k, v) for (k, v) in log_list])))
Dump JWT to log. Args: log: Logger Logger to which to write the message. msg_str: str A message to write to the log before the JWT values. jwt_dict: dict JWT containing values to log. Returns: None
codesearchnet
def format_delta(__timedelta: datetime.timedelta) -> str: if (__timedelta == datetime.timedelta(0)): return '' days_s = ('{}D'.format(__timedelta.days) if __timedelta.days else '') (hours, minutes) = divmod(__timedelta.seconds, 3600) (minutes, seconds) = divmod(minutes, 60) hours_s = ('{:02d}H'.format(hours) if hours else '') minutes_s = ('{:02d}M'.format(minutes) if minutes else '') seconds_s = ('{:02d}S'.format(seconds) if seconds else '') return 'P{}{}{}{}{}'.format(days_s, ('T' if (hours or minutes or seconds) else ''), hours_s, minutes_s, seconds_s)
Format ISO-8601 duration string. Args: __timedelta: Duration to process Returns: ISO-8601 representation of duration
codesearchnet
def _Write(self, data, output_file): _, extension = os.path.splitext(output_file) with TemporaryDirectoryResource() as tempdir: if extension == '.json': json.dump(data, open(output_file, 'w'), sort_keys=True, indent=2) elif extension in ['.tflite', '.bin']: input_json = os.path.join(tempdir, 'temp.json') with open(input_json, 'w') as fp: json.dump(data, fp, sort_keys=True, indent=2) returncode = subprocess.call([self._flatc_path, '-b', '--defaults-json', '--strict-json', '-o', tempdir, self._new_schema, input_json]) if returncode != 0: raise RuntimeError('flatc failed to convert upgraded json to binary.') shutil.copy(os.path.join(tempdir, 'temp.tflite'), output_file) else: raise ValueError('Invalid extension on output file %r' % output_file)
Output a json or bin version of the flatbuffer model. Args: data: Dict representing the TensorFlow Lite model to write. output_file: filename to write the converted flatbuffer to. (json, tflite, or bin extension is required). Raises: ValueError: When the extension is not json or bin RuntimeError: When flatc fails to convert json data to binary.
github-repos
def easeInOutBack(n, s=1.70158): _checkRange(n) n = n * 2 if n < 1: s *= 1.525 return 0.5 * (n * n * ((s + 1) * n - s)) else: n -= 2 s *= 1.525 return 0.5 * (n * n * ((s + 1) * n + s) + 2)
A "back-in" tween function that overshoots both the start and destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
juraj-google-style
def closure(self, rules): closure = set() todo = set(rules) while todo: rule = todo.pop() closure.add(rule) if rule.at_end: continue symbol = rule.rhs[rule.pos] for production in self.nonterminals[symbol]: for first in self.first(rule.rest): if EPSILON in production.rhs: new_rule = DottedRule(production, 1, first) else: new_rule = DottedRule(production, 0, first) if new_rule not in closure: todo.add(new_rule) return frozenset(closure)
Fills out the entire closure based on some initial dotted rules. Args: rules - an iterable of DottedRules Returns: frozenset of DottedRules
juraj-google-style
def aggregate(self, index): if isinstance(index, string_types): col_df_grouped = self.col_df.groupby(self.df[index]) else: self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index]) col_df_grouped = self.col_df.groupby(level=index) self.col_df.index = self.df.index self.reduced_df = pd.DataFrame({ colred: col_df_grouped[colred.column].agg(colred.agg_func) for colred in self.column_reductions }) reduced_dfs = [] for cf in self.column_functions: reduced_dfs.append(cf.apply_and_name(self)) return pd.concat(reduced_dfs, axis=1)
Performs a groupby of the unique Columns by index, as constructed from self.df. Args: index (str, or pd.Index): Index or column name of self.df. Returns: pd.DataFrame: A dataframe, aggregated by index, that contains the result of the various ColumnFunctions, and named accordingly.
juraj-google-style
def __tomo_linear_inv(freqs, ops, weights=None, trace=None): if weights is not None: W = np.array(weights) if W.ndim == 1: W = np.diag(W) S = np.array([vectorize(m).conj() for m in ops]).reshape(len(ops), ops[0].size) if weights is not None: S = np.dot(W, S) v = np.array(freqs) if weights is not None: v = np.dot(W, freqs) Sdg = S.T.conj() inv = np.linalg.pinv(np.dot(Sdg, S)) ret = devectorize(np.dot(inv, np.dot(Sdg, v))) if trace is not None: ret = trace * ret / np.trace(ret) return ret
Reconstruct a matrix through linear inversion. Args: freqs (list[float]): list of observed frequences. ops (list[np.array]): list of corresponding projectors. weights (list[float] or array_like): weights to be used for weighted fitting. trace (float or None): trace of returned operator. Returns: numpy.array: A numpy array of the reconstructed operator.
juraj-google-style
def stage(self, startimage, newimage): client = utils.get_client() cprint(' Copying file from "%s:/%s" \n to "%s: % (self.sourceimage, self.sourcepath, startimage, self.destpath), 'blue') cachedir = self._setcache(client) cacherelpath = os.path.relpath(cachedir, TMPDIR) if os.path.exists(cachedir) and not os.path.exists(os.path.join(cachedir, 'content.tar')): shutil.rmtree(cachedir) if not os.path.exists(cachedir): print(' * Creating cache at %s' % cacherelpath) container = client.containers.create(self.sourceimage) try: tarfile_stream, tarfile_stats = container.get_archive(self.sourcepath) except docker.errors.NotFound: raise errors.MissingFileError( 'Cannot copy file "%s" from image "%s" - it does not exist!' % (self.sourcepath, self.sourceimage)) tempdir = tempfile.mkdtemp(dir=BUILD_TEMPDIR) with open(os.path.join(tempdir, 'content.tar'), 'wb') as localfile: for chunk in tarfile_stream: localfile.write(chunk) os.mkdir(cachedir) os.rename(tempdir, cachedir) else: print(' Using cached files from %s' % cacherelpath) dockerfile = 'FROM %s\nADD content.tar %s' % (startimage, self.destpath) with open(os.path.join(cachedir, 'Dockerfile'), 'w') as df: df.write(dockerfile) buildargs = dict(path=cachedir, tag=newimage, decode=True) utils.set_build_cachefrom(self.cache_from, buildargs, client) stream = client.api.build(**buildargs) try: utils.stream_docker_logs(stream, newimage) except ValueError as e: raise errors.BuildError(dockerfile, e.args[0], build_args=buildargs)
Copies the file from source to target Args: startimage (str): name of the image to stage these files into newimage (str): name of the created image
juraj-google-style
def get_all_counters(obj, instance_list=None): counters, instances_avail = win32pdh.EnumObjectItems(None, None, obj, -1, 0) if instance_list is None: instance_list = instances_avail if not isinstance(instance_list, list): instance_list = [instance_list] counter_list = [] for counter in counters: for instance in instance_list: instance = '*' if instance.lower() == '_total' else instance counter_list.append((obj, instance, counter)) else: counter_list.append((obj, None, counter)) return get_counters(counter_list) if counter_list else {}
Get the values for all counters available to a Counter object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance_list (list): A list of instances to return. Use this to narrow down the counters that are returned. .. note:: ``_Total`` is returned as ``*``
juraj-google-style
def __init__(self, min_score=DEFAULT_MIN_SCORE, user_attributes=DEFAULT_USER_ATTRIBUTES): if min_score < 1: min_score = 1 elif min_score > 4: min_score = 4 self.min_score = min_score self.user_attributes = user_attributes
Init method. Args: min_score (int): minimum score to accept (between 0 and 4). user_attributes (tuple): list of user attributes to check.
juraj-google-style
def notch_filter(data: FLOATS_TYPE, sampling_freq_hz: float, notch_freq_hz: float, quality_factor: float) -> FLOATS_TYPE: b, a = iirnotch( w0=normalized_frequency(notch_freq_hz, sampling_freq_hz), Q=quality_factor ) filtered_data = lfilter(b=b, a=a, x=data) return filtered_data
Design and use a notch (band reject) filter to filter the data. Args: data: time series of the data sampling_freq_hz: sampling frequency :math:`f_s`, in Hz (or other consistent units) notch_freq_hz: notch frequency, in Hz (or other consistent units) quality_factor: notch filter quality factor, :math:`Q` Returns: filtered data
juraj-google-style
def write(self, x, access_logits): gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name='gamma') write_logits = (access_logits - (gamma * tf.expand_dims(self.mean_logits, 1))) candidate_value = tf.layers.dense(x, self.val_depth, activation=tf.nn.relu, name='candidate_value') erase_gates = tf.layers.dense(x, self.memory_size, activation=tf.nn.sigmoid, name='erase') write_weights = tf.nn.softmax(write_logits) erase_weights = tf.expand_dims((1 - (erase_gates * write_weights)), 3) erase = tf.multiply(erase_weights, tf.expand_dims(self.mem_vals, 1)) addition = tf.multiply(tf.expand_dims(write_weights, 3), tf.expand_dims(candidate_value, 2)) update_value_op = self.mem_vals.assign(tf.reduce_mean((erase + addition), axis=1)) with tf.control_dependencies([update_value_op]): write_op = self.mean_logits.assign(((self.mean_logits * 0.1) + tf.reduce_mean((write_logits * 0.9), axis=1))) return write_op
Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op.
codesearchnet
def dump_in_memory_result(self, result, output_path): file_count = 0 logger.debug('Dumping in-memory processing results to output folder: %s', output_path) for (k, v) in iteritems(result): cur_output_path = os.path.join(output_path, k) if isinstance(v, dict): file_count += self.dump_in_memory_result(v, cur_output_path) else: if (not os.path.isdir(output_path)): os.makedirs(output_path) filename = os.path.join(output_path, k) logger.debug('Writing output file: %s', filename) with open(filename, 'wt', encoding=self.config.encoding) as f: f.write(v) file_count += 1 return file_count
Recursively dumps the result of our processing into files within the given output path. Args: result: The in-memory result of our processing. output_path: Full path to the folder into which to dump the files. Returns: The number of files generated (integer).
codesearchnet
def ContainsNone(self, *values): self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_NONE') return self._query_builder
Sets the type of the WHERE clause as "contains none". Args: *values: The values to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
juraj-google-style
def depricated_name(newmethod): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn('Function {} is depricated, please use {} instead.'.format(func.__name__, newmethod), category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return wrapper return decorator
Decorator for warning user of depricated functions before use. Args: newmethod (str): Name of method to use instead.
codesearchnet
def distance(self, other): return distance(self.lat, self.lon, None, other.lat, other.lon, None)
Distance between points Args: other (:obj:`Point`) Returns: float: Distance in km
juraj-google-style
def _get_enrollments_list_page(self, params=None): req_url = urljoin(self.base_url, self.enrollment_list_url) resp = self.requester.get(req_url, params=params) resp.raise_for_status() resp_json = resp.json() results = resp_json['results'] next_url_str = resp_json.get('next') cursor = None qstr_cursor = None if next_url_str: next_url = urlparse(next_url_str) qstr = parse_qs(next_url.query) qstr_cursor = qstr.get('cursor') if (qstr_cursor and isinstance(qstr_cursor, list)): cursor = qstr_cursor[0] return (results, cursor)
Submit request to retrieve enrollments list. Args: params (dict): Query parameters to use in the request. Valid parameters are: * course_id: Filters the result to course enrollments for the course corresponding to the given course ID. The value must be URL encoded. Optional. * username: username: List of comma-separated usernames. Filters the result to the course enrollments of the given users. Optional.
codesearchnet
def map_creative_click_tag_feeds(self, creative_feed, click_tag_feed): for creative in creative_feed: creative['click_tags'] = [click_tag for click_tag in click_tag_feed if self._assignment_matches(creative, click_tag)]
Maps click tag feed to the corresponding creative. Click Tag is a child object to the creative, and there is a 1 creative to many click tags relationship. In Bulkdozer they are represented by two separate tab in the feed, and this method maps the creatives to their respective click tags based on the creative ID. Args: creative_feed: Creative feed. click_tag_feed: Click tag feed.
github-repos
def _get_fbeta_score(true_positives, selected, relevant, beta=1): precision = 1 if (selected > 0): precision = (true_positives / selected) if (beta == 0): return precision recall = 1 if (relevant > 0): recall = (true_positives / relevant) if ((precision > 0) and (recall > 0)): beta2 = (beta * beta) return ((((1 + beta2) * precision) * recall) / ((beta2 * precision) + recall)) else: return 0
Compute Fbeta score. Args: true_positives: Number of true positive ngrams. selected: Number of selected ngrams. relevant: Number of relevant ngrams. beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only. Returns: Fbeta score.
codesearchnet
def GetCloudPath(self, resource_id, cache, database): cloud_path = cache.GetResults('cloud_path') if (not cloud_path): results = database.Query(self.CLOUD_PATH_CACHE_QUERY) cache.CacheQueryResults(results, 'cloud_path', 'resource_id', ('filename', 'parent')) cloud_path = cache.GetResults('cloud_path') if (resource_id == 'folder:root'): return '/' paths = [] (parent_path, parent_id) = cloud_path.get(resource_id, ['', '']) while parent_path: if (parent_path == 'folder:root'): break paths.append(parent_path) (parent_path, parent_id) = cloud_path.get(parent_id, ['', '']) if (not paths): return '/' paths.reverse() return '/{0:s}/'.format('/'.join(paths))
Return cloud path given a resource id. Args: resource_id (str): resource identifier for the file. cache (SQLiteCache): cache. database (SQLiteDatabase): database. Returns: str: full path to the resource value.
codesearchnet
def _convert_template_option(template): option = {} extraction_method = template.get('extraction_method') if (extraction_method == 'guess'): option['guess'] = True elif (extraction_method == 'lattice'): option['lattice'] = True elif (extraction_method == 'stream'): option['stream'] = True option['pages'] = template.get('page') option['area'] = [round(template['y1'], 3), round(template['x1'], 3), round(template['y2'], 3), round(template['x2'], 3)] return option
Convert Tabula app template to tabula-py option Args: template (dict): Tabula app template Returns: `obj`:dict: tabula-py option
codesearchnet
def add_sample_meta(self, source, reference, method='', filename='', md5='', sha1='', sha256='', size='', mimetype='', campaign='', confidence='', description='', bucket_list=[]): data = {'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'filename': filename, 'md5': md5, 'sha1': sha1, 'sha256': sha256, 'size': size, 'mimetype': mimetype, 'upload_type': 'meta', 'campaign': campaign, 'confidence': confidence, 'bucket_list': ','.join(bucket_list)} r = requests.post('{0}/samples/'.format(self.url), data=data, verify=self.verify, proxies=self.proxies) if (r.status_code == 200): result_data = json.loads(r.text) return result_data else: log.error('Error with status code {0} and message {1}'.format(r.status_code, r.text)) return None
Adds a metadata sample. To add an actual file, use add_sample_file. Args: source: Source of the information reference: A reference where more information can be found method: The method for obtaining the sample. filename: The name of the file. md5: An MD5 hash of the file. sha1: SHA1 hash of the file. sha256: SHA256 hash of the file. size: size of the file. mimetype: The mimetype of the file. campaign: An associated campaign confidence: The campaign confidence bucket_list: A list of bucket list items to add upload_type: Either 'file' or 'meta' Returns: A JSON sample object or None if there was an error.
codesearchnet
def api_class(self, resource_name=None, path=None, audiences=None, scopes=None, allowed_client_ids=None, auth_level=None, api_key_required=None): if (auth_level is not None): _logger.warn(_AUTH_LEVEL_WARNING) def apiserving_api_decorator(api_class): "Decorator for ProtoRPC class that configures Google's API server.\n\n Args:\n api_class: remote.Service class, ProtoRPC service class being wrapped.\n\n Returns:\n Same class with API attributes assigned in api_info.\n " self.__classes.append(api_class) api_class.api_info = _ApiInfo(self.__common_info, resource_name=resource_name, path=path, audiences=audiences, scopes=scopes, allowed_client_ids=allowed_client_ids, auth_level=auth_level, api_key_required=api_key_required) return api_class return apiserving_api_decorator
Get a decorator for a class that implements an API. This can be used for single-class or multi-class implementations. It's used implicitly in simple single-class APIs that only use @api directly. Args: resource_name: string, Resource name for the class this decorates. (Default: None) path: string, Base path prepended to any method paths in the class this decorates. (Default: None) audiences: list of strings, Acceptable audiences for authentication. (Default: None) scopes: list of strings, Acceptable scopes for authentication. (Default: None) allowed_client_ids: list of strings, Acceptable client IDs for auth. (Default: None) auth_level: enum from AUTH_LEVEL, Frontend authentication level. (Default: None) api_key_required: bool, Whether a key is required to call into this API. (Default: None) Returns: A decorator function to decorate a class that implements an API.
codesearchnet
def add_imports_for_symbol(module_code_builder, symbol, source_module_name, source_name, api_name, api_version, output_module_prefix=''): if api_version == 1: names_attr = API_ATTRS_V1[api_name].names constants_attr = API_ATTRS_V1[api_name].constants else: names_attr = API_ATTRS[api_name].names constants_attr = API_ATTRS[api_name].constants if source_name == constants_attr: for exports, name in symbol: for export in exports: dest_module, dest_name = _get_name_and_module(export) dest_module = _join_modules(output_module_prefix, dest_module) module_code_builder.add_import(None, source_module_name, name, dest_module, dest_name) if hasattr(symbol, '__dict__') and names_attr in symbol.__dict__: for export in getattr(symbol, names_attr): dest_module, dest_name = _get_name_and_module(export) dest_module = _join_modules(output_module_prefix, dest_module) module_code_builder.add_import(symbol, source_module_name, source_name, dest_module, dest_name)
Add imports for the given symbol to `module_code_builder`. Args: module_code_builder: `_ModuleInitCodeBuilder` instance. symbol: A symbol. source_module_name: Module that we can import the symbol from. source_name: Name we can import the symbol with. api_name: API name. Currently, must be `tensorflow`. api_version: API version. output_module_prefix: Prefix to prepend to destination module.
github-repos
def value_from_message(self, message): if not isinstance(message, self.message_type): raise DecodeError('Expected type %s, got %s: %r' % (self.message_type.__name__, type(message).__name__, message)) return message
Convert a message to a value instance. Used by deserializers to convert from underlying messages to value of expected user type. Args: message: A message instance of type self.message_type. Returns: Value of self.message_type.
juraj-google-style
def __init__(cls, name, bases, dictionary): if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: return descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] service_builder = _ServiceBuilder(descriptor) service_builder.BuildService(cls)
Creates a message service class. Args: name: Name of the class (ignored, but required by the metaclass protocol). bases: Base classes of the class being constructed. dictionary: The class dictionary of the class being constructed. dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object describing this protocol service type.
juraj-google-style
def num_parameters(self, only_trainable: bool=False, exclude_embeddings: bool=False) -> int: if exclude_embeddings: embedding_param_names = [f'{name}.weight' for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)] total_parameters = [parameter for name, parameter in self.named_parameters() if name not in embedding_param_names] else: total_parameters = list(self.parameters()) total_numel = [] is_loaded_in_4bit = getattr(self, 'is_loaded_in_4bit', False) if is_loaded_in_4bit: if is_bitsandbytes_available(): import bitsandbytes as bnb else: raise ValueError('bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. ') for param in total_parameters: if param.requires_grad or not only_trainable: if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): if hasattr(param, 'element_size'): num_bytes = param.element_size() elif hasattr(param, 'quant_storage'): num_bytes = param.quant_storage.itemsize else: num_bytes = 1 total_numel.append(param.numel() * 2 * num_bytes) else: total_numel.append(param.numel()) return sum(total_numel)
Get number of (optionally, trainable or non-embeddings) parameters in the module. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embeddings parameters Returns: `int`: The number of parameters.
github-repos
def WriteHashes(self, arr): length = len(arr) self.WriteVarInt(length) for item in arr: ba = bytearray(binascii.unhexlify(item)) ba.reverse() self.WriteBytes(ba)
Write an array of hashes to the stream. Args: arr (list): a list of 32 byte hashes.
juraj-google-style
def __getattr__(self, name): if self.has_service_by_name(name): return self._service_objects[name] return self.__getattribute__(name)
Syntactic sugar to enable direct access of service objects by alias. Args: name: string, the alias a service object was registered under.
juraj-google-style
def _create_test(self, testcase, function_name, sdkobject, attribute=None): func = getattr(testcase, function_name) object_name = sdkobject.rest_name test_name = "" rep = dict() rep["object"] = object_name if attribute: rep["attribute"] = attribute.local_name rep = dict((re.escape(k), v) for k, v in rep.items()) pattern = re.compile("|".join(list(rep.keys()))) if function_name.startswith("_"): function_name = function_name[1:] test_name = pattern.sub(lambda m: rep[re.escape(m.group(0))], function_name) test_func = None if attribute: test_func = lambda self, attribute=attribute: func(self, attribute) else: test_func = lambda self: func(self) test_func.__name__ = str(test_name) return (test_name, test_func)
Create a test method for the sdkoject Args: testcase: the testcase to that should manage the method function_name: the name of the method in the testcase sdkobject: the object that should be tested attribute: the attribute information if necessary Returns: It returns a tuple (name, method) that represents the test method
juraj-google-style
def consume(self, tokens): wait_time = 0. self.tokens -= tokens if self.tokens < 0: self._get_tokens() if self.tokens < 0: wait_time = -self.tokens / self.fill_rate return wait_time
Consume tokens. Args: tokens (float): number of transport tokens to consume Returns: wait_time (float): waiting time for the consumer
juraj-google-style
def _setup(self): if isinstance(self.module, torch.nn.RNNBase): self.module.flatten_parameters = noop for name_w in self.weights: w = getattr(self.module, name_w) del self.module._parameters[name_w] self.module.register_parameter(name_w + '_raw', nn.Parameter(w.data))
for each string defined in self.weights, the corresponding attribute in the wrapped module is referenced, then deleted, and subsequently registered as a new parameter with a slightly modified name. Args: None Returns: None
juraj-google-style
def __init__(self, name): super(ProgressWorker, self).__init__() self.name = name
Worker object that will be passed to the thread. Args: name (str): name shown in progress ui.
juraj-google-style
def parse_formal_type_parameters(base: '_classes.InterpreterClass | _classes.PyTDClass | _classes.ParameterizedClass', prefix: str | None, formal_type_parameters: 'datatypes.AliasingDict[str, _instance_base.SimpleValue]', container: '_instance_base.SimpleValue | DummyContainer | None'=None) -> None: def merge(t0: '_instance_base.SimpleValue', t1: '_instance_base.SimpleValue', name: str) -> '_instance_base.SimpleValue': return _merge_type(t0, t1, name, base) if isinstance(base, _abstract.ParameterizedClass): if base.full_name == 'typing.Generic': return if isinstance(base.base_cls, (_abstract.InterpreterClass, _abstract.PyTDClass)): formal_type_parameters.merge_from(base.base_cls.all_formal_type_parameters, merge) params = base.get_formal_type_parameters() if hasattr(container, 'cls'): container_template = container.cls.template else: container_template = () for name, param in params.items(): if isinstance(param, _abstract.TypeParameter): if prefix: formal_type_parameters.add_alias(name, prefix + '.' + param.name, merge) elif param in container_template: formal_type_parameters[name] = param elif name not in formal_type_parameters: formal_type_parameters[name] = param else: last_type = formal_type_parameters[name] formal_type_parameters[name] = merge(last_type, param, name) else: if isinstance(base, (_abstract.InterpreterClass, _abstract.PyTDClass)): formal_type_parameters.merge_from(base.all_formal_type_parameters, merge) if base.template: for item in base.template: if isinstance(item, _abstract.TypeParameter): name = full_type_name(base, item.name) if name not in formal_type_parameters: formal_type_parameters[name] = None
Parse type parameters from base class. Args: base: base class. prefix: the full name of subclass of base class. formal_type_parameters: the mapping of type parameter name to its type. container: An abstract value whose class template is used when prefix=None to decide how to handle type parameters that are aliased to other type parameters. Values that are in the class template are kept, while all others are ignored. Raises: GenericTypeError: If the lazy types of type parameter don't match
github-repos
def put(self, item: T, context: PipelineContext = None) -> None: LOGGER.info("Converting item \"{item}\" for sink \"{sink}\"".format(item=item, sink=self._sink)) item = self._transform(data=item, context=context) LOGGER.info("Puting item \"{item}\" into sink \"{sink}\"".format(item=item, sink=self._sink)) self._sink.put(self._store_type, item, context)
Puts an objects into the data sink. The objects may be transformed into a new type for insertion if necessary. Args: item: The objects to be inserted into the data sink. context: The context of the insertion (mutable).
juraj-google-style
def from_tuple(cls, query): (field, query) = (query[0], query[1:]) try: cls = TYPES[type(query[0])] except KeyError: pass return cls(field, *query)
Create a condition from a query tuple. Args: query (tuple or list): Tuple or list that contains a query domain in the format of ``(field_name, field_value, field_value_to)``. ``field_value_to`` is only applicable in the case of a date search. Returns: DomainCondition: An instance of a domain condition. The specific type will depend on the data type of the first value provided in ``query``.
codesearchnet
def waitAndGet(self, event_name, timeout=None): if timeout is None: timeout = self.default_timeout_sec if timeout: if timeout > self.rpc_max_timeout_sec: raise errors.CallbackHandlerBaseError(self._device, f'Specified timeout {timeout} is longer than max timeout {self.rpc_max_timeout_sec}.') raw_event = self.callEventWaitAndGetRpc(self._id, event_name, timeout) return callback_event.from_dict(raw_event)
Waits and gets a CallbackEvent with the specified identifier. It will raise a timeout error if the expected event does not occur within the time limit. Args: event_name: str, the name of the event to get. timeout: float, the number of seconds to wait before giving up. If None, it will be set to self.default_timeout_sec. Returns: CallbackEvent, the oldest entry of the specified event. Raises: errors.CallbackHandlerBaseError: If the specified timeout is longer than the max timeout supported. errors.CallbackHandlerTimeoutError: The expected event does not occur within the time limit.
github-repos
def transcripts_by_gene(self, build='37'): hgnc_transcripts = {} LOG.info("Fetching all transcripts") for transcript in self.transcript_collection.find({'build':build}): hgnc_id = transcript['hgnc_id'] if not hgnc_id in hgnc_transcripts: hgnc_transcripts[hgnc_id] = [] hgnc_transcripts[hgnc_id].append(transcript) return hgnc_transcripts
Return a dictionary with hgnc_id as keys and a list of transcripts as value Args: build(str) Returns: hgnc_transcripts(dict)
juraj-google-style
def from_config(cls, config): return cls(**config)
Creates a regularizer from its config. This method is the reverse of `get_config`, capable of instantiating the same regularizer from the config dictionary. This method is used by Keras `model_to_estimator`, saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. Args: config: A Python dictionary, typically the output of get_config. Returns: A regularizer instance.
github-repos
def timeout_thread_handler(timeout, stop_event): stop_happened = stop_event.wait(timeout) if (stop_happened is False): print(('Killing program due to %f second timeout' % timeout)) os._exit(2)
A background thread to kill the process if it takes too long. Args: timeout (float): The number of seconds to wait before killing the process. stop_event (Event): An optional event to cleanly stop the background thread if required during testing.
codesearchnet
def get_config_dir(program='', system_wide=False): config_homes = [] if system_wide: if os.name == 'nt': config_homes.append( winreg.ExpandEnvironmentStrings('%PROGRAMDATA%')) else: config_homes.append('/etc') config_homes.append('/etc/xdg') if os.name == 'darwin': config_homes.append('/Library') else: if os.name == 'nt': import winreg config_homes.append( winreg.ExpandEnvironmentStrings('%LOCALAPPDATA%')) config_homes.append( os.path.join( winreg.ExpandEnvironmentStrings('%APPDATA%'), 'Roaming')) else: if os.getenv('XDG_CONFIG_HOME'): config_homes.append(os.getenv('XDG_CONFIG_HOME')) else: try: from xdg import BaseDirectory config_homes.append(BaseDirectory.xdg_config_home) except ImportError: config_homes.append(os.path.expanduser('~/.config')) config_homes.append(os.path.expanduser('~')) if os.name == 'darwin': config_homes.append(os.path.expanduser('~/Library')) if program: def __find_homes(app, dirs): homes = [] for home in dirs: if os.path.isdir(os.path.join(home, app)): homes.append(os.path.join(home, app)) if os.path.isdir(os.path.join(home, '.' + app)): homes.append(os.path.join(home, '.' + app)) if os.path.isdir(os.path.join(home, app + '.d')): homes.append(os.path.join(home, app + '.d')) return homes app_homes = __find_homes(program, config_homes) if program == 'vim': app_homes.extend(__find_homes('vimfiles', config_homes)) elif program == 'chrome': app_homes.extend(__find_homes('google-chrome', config_homes)) elif program in ['firefox', 'thunderbird']: app_homes.extend( __find_homes( program, [ os.path.expanduser('~/.mozilla')])) return app_homes return config_homes
Get the configuration directory. Get the configuration directories, optionally for a specific program. Args: program (str) : The name of the program whose configuration directories have to be found. system_wide (bool): Gets the system-wide configuration directories. Returns: list: A list of all matching configuration directories found.
juraj-google-style
def decompress(content, encoding, filename='N/A'): try: encoding = (encoding or '').lower() if (encoding == ''): return content elif (encoding == 'gzip'): return gunzip(content) except DecompressionError as err: print(('Filename: ' + str(filename))) raise raise NotImplementedError((str(encoding) + ' is not currently supported. Supported Options: None, gzip'))
Decompress file content. Required: content (bytes): a file to be compressed encoding: None (no compression) or 'gzip' Optional: filename (str:default:'N/A'): Used for debugging messages Raises: NotImplementedError if an unsupported codec is specified. compression.EncodeError if the encoder has an issue Return: decompressed content
codesearchnet
def compile_files(raw_dir, raw_files, tag): tf.logging.info("Compiling files with tag %s." % tag) filename = "%s-%s" % (_PREFIX, tag) input_compiled_file = os.path.join(raw_dir, filename + ".lang1") target_compiled_file = os.path.join(raw_dir, filename + ".lang2") with tf.gfile.Open(input_compiled_file, mode="w") as input_writer: with tf.gfile.Open(target_compiled_file, mode="w") as target_writer: for i in range(len(raw_files["inputs"])): input_file = raw_files["inputs"][i] target_file = raw_files["targets"][i] tf.logging.info("Reading files %s and %s." % (input_file, target_file)) write_file(input_writer, input_file) write_file(target_writer, target_file) return input_compiled_file, target_compiled_file
Compile raw files into a single file for each language. Args: raw_dir: Directory containing downloaded raw files. raw_files: Dict containing filenames of input and target data. {"inputs": list of files containing data in input language "targets": list of files containing corresponding data in target language } tag: String to append to the compiled filename. Returns: Full path of compiled input and target files.
juraj-google-style
def register(self, node, vendorSpecific=None): response = self.registerResponse(node, vendorSpecific) return self._read_boolean_response(response)
See Also: registerResponse() Args: node: vendorSpecific: Returns:
juraj-google-style
def check_error_response(self, body, status): status_code = int(status.split(' ', 1)[0]) if status_code >= 300: raise errors.BackendError(body, status)
Raise an exception if the response from the backend was an error. Args: body: A string containing the backend response body. status: A string containing the backend response status. Raises: BackendError if the response is an error.
juraj-google-style
def set_last_step_output(self, name, output, reduce_op=None): if distribute_lib.in_cross_replica_context(): self._last_step_outputs_reduce_ops[name] = reduce_op if reduce_op is None: self._last_step_outputs[name] = output else: distribution = distribute_lib.get_strategy() self._last_step_outputs[name] = distribution.reduce(reduce_op, output, axis=None) else: assert reduce_op is not None def merge_fn(distribution, value): self._last_step_outputs[name] = distribution.reduce(reduce_op, value, axis=None) self._last_step_outputs_reduce_ops[name] = reduce_op distribute_lib.get_replica_context().merge_call(merge_fn, args=(output,))
Set `output` with `name` to be outputted from the last step. Args: name: String, name to identify the output. Doesn't need to match tensor name. output: The tensors that should be outputted with `name`. See below for actual types supported. reduce_op: Reduction method to use to reduce outputs from multiple replicas. Required if `set_last_step_output` is called in a replica context. Optional in cross_replica_context. When present, the outputs from all the replicas are reduced using the current distribution strategy's `reduce` method. Hence, the type of `output` must be what's supported by the corresponding `reduce` method. For e.g. if using MirroredStrategy and reduction is set, output must be a `PerReplica` value. The reduce method is also recorded in a dictionary `_last_step_outputs_reduce_ops` for later interpreting of the outputs as already reduced or not.
github-repos
def __init__(self, quantity, period_type): self._quantity = tf.convert_to_tensor(quantity, dtype=tf.int32, name='pt_quantity') self._period_type = period_type
Initializer. Args: quantity: A Tensor of type tf.int32, representing the quantities of period types (e.g. how many months). Can be both positive and negative. period_type: A PeriodType (a day, a month, etc). Currently only one PeriodType per instance of PeriodTensor is supported. Example: ```python two_weeks = PeriodTensor(2, PeriodType.WEEK) months = [3, 6, 9, 12] periods = PeriodTensor(months, PeriodType.MONTH) ```
github-repos
def create_conversion_event(self, event_key, user_id, attributes, event_tags): params = self._get_common_params(user_id, attributes) conversion_params = self._get_required_params_for_conversion(event_key, event_tags) params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(conversion_params) return Event(self.EVENTS_URL, params, http_verb=self.HTTP_VERB, headers=self.HTTP_HEADERS)
Create conversion Event to be sent to the logging endpoint. Args: event_key: Key representing the event which needs to be recorded. user_id: ID for user. attributes: Dict representing user attributes and values. event_tags: Dict representing metadata associated with the event. Returns: Event object encapsulating the conversion event.
codesearchnet
def decompose(miz_file: Path, output_folder: Path): (mission_folder, assets_folder) = NewMiz._get_subfolders(output_folder) NewMiz._wipe_folders(mission_folder, assets_folder) LOGGER.info('unzipping mission file') with Miz(miz_file) as miz: version = miz.mission.d['version'] LOGGER.debug(f'mission version: "%s"', version) LOGGER.info('copying assets to: "%s"', assets_folder) ignore = shutil.ignore_patterns('mission') shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore) NewMiz._reorder_warehouses(assets_folder) LOGGER.info('decomposing mission table into: "%s" (this will take a while)', mission_folder) NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz)
Decompose this Miz into json Args: output_folder: folder to output the json structure as a Path miz_file: MIZ file path as a Path
codesearchnet
def tanh_shrink(x): return ops.tanh_shrink(x)
Tanh shrink activation function. It is defined as: `f(x) = x - tanh(x)`. Args: x: Input tensor.
github-repos
def add_pagination_meta(self, params, meta): meta['page_size'] = params['page_size'] meta['page'] = params['page'] meta['prev'] = "page={0}&page_size={1}".format( params['page'] - 1, params['page_size'] ) if meta['page'] > 0 else None meta['next'] = "page={0}&page_size={1}".format( params['page'] + 1, params['page_size'] ) if meta.get('has_more', True) else None
Extend default meta dictionary value with pagination hints. Note: This method handler attaches values to ``meta`` dictionary without changing it's reference. This means that you should never replace ``meta`` dictionary with any other dict instance but simply modify its content. Args: params (dict): dictionary of decoded parameter values meta (dict): dictionary of meta values attached to response
juraj-google-style
def _create_variable(self, *args, **kwargs): with ops.name_scope('random_generator'): kwargs['name'] = 'StateVar' v = variables.Variable(*args, **kwargs) if isinstance(v, sharded_variable.ShardedVariable): raise ValueError("tf.random.Generator state is sharded, which is not allowed. When creating a tf.distribute.experimental.ParameterServerStrategy, please make sure that the `variable_partitioner` argument won't shard a small variable of shape [2] or [3]. Ways to avoid sharding small variables include setting `variable_partitioner` to None or to tf.distribute.experimental.partitioners.MinSizePartitioner with a large enough `min_shard_bytes`.") return v
Creates a variable. Args: *args: positional arguments passed along to `variables.Variable. **kwargs: keyword arguments passed along to `variables.Variable. Returns: The created variable.
github-repos
def write_other_members(self, f, catch_all=False): if catch_all: names = self._members.items() else: names = inspect.getmembers(self._module) leftovers = [] for (name, _) in names: if ((name in self._members) and (name not in self._documented)): leftovers.append(name) if leftovers: print(('%s: undocumented members: %d' % (self._title, len(leftovers)))) print('\n for name in sorted(leftovers): print((' %s' % name)) self._documented.add(name) self._mentioned.add(name) self._write_member_markdown_to_file(f, '
Writes the leftover members to `f`. Args: f: File to write to. catch_all: If true, document all missing symbols from any module. Otherwise, document missing symbols from just this module.
codesearchnet
def get_function_id(sig): s = sha3.keccak_256() s.update(sig.encode('utf-8')) return int("0x" + s.hexdigest()[:8], 16)
Return the function id of the given signature Args: sig (str) Return: (int)
juraj-google-style
def __get_merged_api_info(self, services): base_paths = sorted(set(s.api_info.base_path for s in services)) if len(base_paths) != 1: raise api_exceptions.ApiConfigurationError( 'Multiple base_paths found: {!r}'.format(base_paths)) names_versions = sorted(set( (s.api_info.name, s.api_info.api_version) for s in services)) if len(names_versions) != 1: raise api_exceptions.ApiConfigurationError( 'Multiple apis/versions found: {!r}'.format(names_versions)) return services[0].api_info
Builds a description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: The _ApiInfo object to use for the API that the given services implement.
juraj-google-style
def keras_mode_combinations(mode=None, run_eagerly=None): if mode is None: mode = ['eager'] if tf2.enabled() else ['graph', 'eager'] if run_eagerly is None: run_eagerly = [True, False] result = [] if 'eager' in mode: result += combinations.combine(mode=['eager'], run_eagerly=run_eagerly) if 'graph' in mode: result += combinations.combine(mode=['graph'], run_eagerly=[False]) return result
Returns the default test combinations for tf.keras tests. Note that if tf2 is enabled, then v1 session test will be skipped. Args: mode: List of modes to run the tests. The valid options are 'graph' and 'eager'. Default to ['graph', 'eager'] if not specified. If a empty list is provide, then the test will run under the context based on tf's version, eg graph for v1 and eager for v2. run_eagerly: List of `run_eagerly` value to be run with the tests. Default to [True, False] if not specified. Note that for `graph` mode, run_eagerly value will only be False. Returns: A list contains all the combinations to be used to generate test cases.
github-repos
def get_session(op_input_list=()): session = _get_session(op_input_list) if not _MANUAL_VAR_INIT: with session.graph.as_default(): _initialize_variables(session) return session
Returns the TF session to be used by the backend. If a default TensorFlow session is available, we will return it. Else, we will return the global Keras session assuming it matches the current graph. If no global Keras session exists at this point: we will create a new global session. Note that you can manually set the global session via `K.set_session(sess)`. Args: op_input_list: An option sequence of tensors or ops, which will be used to determine the current graph. Otherwise the default graph will be used. Returns: A TensorFlow session.
github-repos
def batch_decode(self, waveforms, waveform_lengths=None) -> List[np.ndarray]: waveforms = [waveform.detach().to(device='cpu', copy=True).numpy() for waveform in waveforms] if waveform_lengths is not None: waveforms = [waveform[:waveform_lengths[i]] for i, waveform in enumerate(waveforms)] return waveforms
Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D audio waveform arrays and not a single tensor/array because in general the waveforms will have different lengths after removing padding. Args: waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): The batched output waveforms from the [`UnivNetModel`]. waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): The batched lengths of each waveform before padding. Returns: `List[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed.
github-repos
def kill(self, procname): for proc in psutil.process_iter(): if (proc.name() == procname): self.info_log(('[pid:%s][name:%s] killed' % (proc.pid, proc.name()))) proc.kill()
Kill by process name Args: procname (str)
codesearchnet
def append(self, event, help=""): if isinstance(event, str): self._events[event] = HookList(is_waterfall=self.is_waterfall) self._help[event] = (help, getframeinfo(stack()[1][0])) if not help: logger.warning("Great, don't say anything about your hooks and \ wait for plugin creators to figure it out.") elif isinstance(event, Iterable): for name in event: self.append(name) else: raise TypeError("Invalid event name!")
Creates a new event. `event` may be iterable or string Args: event (str): Name of event to declare Kwrgs: help (str): Help string for the event Raises: TypeError **Please** describe the event and its calling arguments in the help string.
juraj-google-style
def sort_resources(cls, request, resources, fail_enum, header_proto=None): if (not request.sorting): return resources value_handlers = cls._get_handler_set(request, fail_enum, header_proto) def sorter(resource_a, resource_b): for handler in value_handlers: (val_a, val_b) = handler.get_sort_values(resource_a, resource_b) if (val_a < val_b): return handler.xform_result((- 1)) if (val_a > val_b): return handler.xform_result(1) return 0 return sorted(resources, key=cmp_to_key(sorter))
Sorts a list of resources based on a list of sort controls Args: request (object): The parsed protobuf request object resources (list of objects): The resources to be sorted fail_enum (int, enum): The enum status to raise with invalid keys header_proto(class): Class to decode a resources header Returns: list: The sorted list of resources
codesearchnet
def set_category(self, category): if isinstance(category, Category): name = category.name else: name = category self.find('category').text = name
Set package category Args: category: String of an existing category's name, or a Category object.
codesearchnet
def logloss(y, p): p[(p < EPS)] = EPS p[(p > (1 - EPS))] = (1 - EPS) return log_loss(y, p)
Bounded log loss error. Args: y (numpy.array): target p (numpy.array): prediction Returns: bounded log loss error
codesearchnet
def serialize_feature_columns(feature_columns): return [serialize_feature_column(fc) for fc in feature_columns]
Serializes a list of FeatureColumns. Returns a list of Keras-style config dicts that represent the input FeatureColumns and can be used with `deserialize_feature_columns` for reconstructing the original columns. Args: feature_columns: A list of FeatureColumns. Returns: Keras serialization for the list of FeatureColumns. Raises: ValueError if called with input that is not a list of FeatureColumns.
github-repos
def load_validator(schema_path, schema): if os.name == 'nt': file_prefix = 'file: else: file_prefix = 'file:' resolver = RefResolver(file_prefix + schema_path.replace("\\", "/"), schema) validator = Draft4Validator(schema, resolver=resolver) return validator
Create a JSON schema validator for the given schema. Args: schema_path: The filename of the JSON schema. schema: A Python object representation of the same schema. Returns: An instance of Draft4Validator.
juraj-google-style
def get_mutation_rates(transcripts, mut_dict, ensembl): rates = {'missense': 0, 'nonsense': 0, 'splice_lof': 0, 'splice_region': 0, 'synonymous': 0} combined = None for tx_id in transcripts: try: tx = construct_gene_object(ensembl, tx_id) except ValueError: continue if len(tx.get_cds_sequence()) % 3 != 0: raise ValueError("anomalous_coding_sequence") if tx.get_chrom() == "MT": continue sites = SiteRates(tx, mut_dict, masked_sites=combined) combined = tx + combined for cq in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']: rates[cq] += sites[cq].get_summed_rate() if combined is None: raise ValueError('no tx found') length = combined.get_coding_distance(combined.get_cds_end())['pos'] return rates, combined, length
determines mutation rates per functional category for transcripts Args: transcripts: list of transcript IDs for a gene mut_dict: dictionary of local sequence context mutation rates ensembl: EnsemblRequest object, to retrieve information from Ensembl. Returns: tuple of (rates, merged transcript, and transcript CDS length)
juraj-google-style
def register(self, name, asymmetric=False): def register_func(func): if asymmetric: self._asymmetric.append(name) self.store[name] = func return func return register_func
Decorator for registering a measure with PyPhi. Args: name (string): The name of the measure. Keyword Args: asymmetric (boolean): ``True`` if the measure is asymmetric.
codesearchnet
def post_error(self, name, message): self.post_command(OPERATIONS.CMD_POST_MESSAGE, _create_message(name, states.ERROR_LEVEL, message))
Asynchronously post a user facing error message about a service. Args: name (string): The name of the service message (string): The user facing error message that will be stored for the service and can be queried later.
juraj-google-style
def from_axis_angle_and_translation(axis, angle, angle_in_radians=False, translation_vec=(0, 0, 0)): if isinstance(axis, (tuple, list)): axis = np.array(axis) if isinstance(translation_vec, (tuple, list)): vec = np.array(translation_vec) else: vec = translation_vec a = (angle if angle_in_radians else ((angle * pi) / 180)) cosa = cos(a) sina = sin(a) u = (axis / np.linalg.norm(axis)) r = np.zeros((3, 3)) r[(0, 0)] = (cosa + ((u[0] ** 2) * (1 - cosa))) r[(0, 1)] = (((u[0] * u[1]) * (1 - cosa)) - (u[2] * sina)) r[(0, 2)] = (((u[0] * u[2]) * (1 - cosa)) + (u[1] * sina)) r[(1, 0)] = (((u[0] * u[1]) * (1 - cosa)) + (u[2] * sina)) r[(1, 1)] = (cosa + ((u[1] ** 2) * (1 - cosa))) r[(1, 2)] = (((u[1] * u[2]) * (1 - cosa)) - (u[0] * sina)) r[(2, 0)] = (((u[0] * u[2]) * (1 - cosa)) - (u[1] * sina)) r[(2, 1)] = (((u[1] * u[2]) * (1 - cosa)) + (u[0] * sina)) r[(2, 2)] = (cosa + ((u[2] ** 2) * (1 - cosa))) return SymmOp.from_rotation_and_translation(r, vec)
Generates a SymmOp for a rotation about a given axis plus translation. Args: axis: The axis of rotation in cartesian space. For example, [1, 0, 0]indicates rotation about x-axis. angle (float): Angle of rotation. angle_in_radians (bool): Set to True if angles are given in radians. Or else, units of degrees are assumed. translation_vec: A translation vector. Defaults to zero. Returns: SymmOp for a rotation about given axis and translation.
codesearchnet
def viewTemplate(id): conn = Qubole.agent() return conn.get(Template.element_path(id))
View an existing Template details. Args: `id`: ID of the template to fetch Returns: Dictionary containing the details of the template.
juraj-google-style
def AddStop(self, lat, lng, name, stop_id=None): if (stop_id is None): stop_id = util.FindUniqueId(self.stops) stop = self._gtfs_factory.Stop(stop_id=stop_id, lat=lat, lng=lng, name=name) self.AddStopObject(stop) return stop
Add a stop to this schedule. Args: lat: Latitude of the stop as a float or string lng: Longitude of the stop as a float or string name: Name of the stop, which will appear in the feed stop_id: stop_id of the stop or None, in which case a unique id is picked Returns: A new Stop object
codesearchnet
def _apply_user_agent(headers, user_agent): if user_agent is not None: if 'user-agent' in headers: headers['user-agent'] = (user_agent + ' ' + headers['user-agent']) else: headers['user-agent'] = user_agent return headers
Adds a user-agent to the headers. Args: headers: dict, request headers to add / modify user agent within. user_agent: str, the user agent to add. Returns: dict, the original headers passed in, but modified if the user agent is not None.
juraj-google-style
def get_airport_weather(self, iata, page=1, limit=100): url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit) weather = self._fr24.get_airport_weather(url) mi = weather['sky']['visibility']['mi'] if ((mi is not None) and (mi != 'None')): mi = float(mi) km = (mi * 1.6094) weather['sky']['visibility']['km'] = km return weather
Retrieve the weather at an airport Given the IATA code of an airport, this method returns the weather information. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_weather('HYD') f.get_airport_weather('HYD',page=1,limit=10)
codesearchnet
def mkdirs(self, path): raise NotImplementedError
Recursively create directories for the provided path. Args: path: string path of the directory structure that should be created Raises: IOError: if leaf directory already exists.
github-repos
def get_conversion_metadata(model_buffer): model_object = flatbuffer_utils.convert_bytearray_to_object(model_buffer) if not model_object or not model_object.metadata: return None for meta in model_object.metadata: if meta.name.decode('utf-8') == CONVERSION_METADATA_FIELD_NAME: metadata_buf = model_object.buffers[meta.buffer].data.tobytes() return conversion_metadata_fb.ConversionMetadataT.InitFromObj(conversion_metadata_fb.ConversionMetadata.GetRootAsConversionMetadata(metadata_buf, 0)) return None
Read conversion metadata from a tflite model. Args: model_buffer: A tflite model. Returns: The conversion metadata or None if it is not populated.
github-repos
def make_query(self, ns): if issubclass(self.model_class, db.Model): query = db.Query(self.model_class, namespace=ns) for f in self.filters: query.filter(('%s %s' % (f[0], f[1])), f[2]) else: query = self.model_class.query(namespace=ns) for f in self.filters: query = query.filter(ndb.FilterNode(*f)) return query
Make a query of entities within this range. Query options are not supported. They should be specified when the query is run. Args: ns: namespace of this query. Returns: a db.Query or ndb.Query, depends on the model class's type.
codesearchnet
def _namespace_to_ord(namespace): n = 0 for i, c in enumerate(namespace): n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] * NAMESPACE_CHARACTERS.index(c) + 1) return n
Converts a namespace string into an int representing its lexographic order. >>> _namespace_to_ord('') '' >>> _namespace_to_ord('_') 1 >>> _namespace_to_ord('__') 2 Args: namespace: A namespace string. Returns: An int representing the lexographical order of the given namespace string.
juraj-google-style