code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def pop_parameter(key): names = key.split('/') if (len(names) > 1): with parameter_scope(names[0]): return pop_parameter('/'.join(names[1:])) global current_scope param = current_scope.get(key, None) if (param is not None): del current_scope[key] return param
Remove and get parameter by key. Args: key(str): Key of parameter. Returns: ~nnabla.Variable Parameter if key found, otherwise None.
codesearchnet
def RetrieveAsset(logdir, plugin_name, asset_name): asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name) try: with tf.io.gfile.GFile(asset_path, "r") as f: return f.read() except tf.errors.NotFoundError: raise KeyError("Asset path %s not found" % asset_path) except tf.errors.OpError as e: raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e))
Retrieve a particular plugin asset from a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: The plugin we want an asset from. asset_name: The name of the requested asset. Returns: string contents of the plugin asset. Raises: KeyError: if the asset does not exist.
juraj-google-style
def directed_tripartition_indices(N): result = [] if (N <= 0): return result base = [0, 1, 2] for key in product(base, repeat=N): part = [[], [], []] for (i, location) in enumerate(key): part[location].append(i) result.append(tuple((tuple(p) for p in part))) return result
Return indices for directed tripartitions of a sequence. Args: N (int): The length of the sequence. Returns: list[tuple]: A list of tuples containing the indices for each partition. Example: >>> N = 1 >>> directed_tripartition_indices(N) [((0,), (), ()), ((), (0,), ()), ((), (), (0,))]
codesearchnet
def _PrunedDenseMatrixMultiplication(a, b, indices, transpose_a=False, adjoint_a=False, transpose_b=False, adjoint_b=False): transpose_a = transpose_a or adjoint_a transpose_b = transpose_b or adjoint_b a = math_ops.conj(a) if adjoint_a else a b = math_ops.conj(b) if adjoint_b else b rank = len(a.shape) dense_shape = (a.shape[-1] if transpose_a else a.shape[-2], b.shape[-2] if transpose_b else b.shape[-1]) if rank == 2: rows = indices[:, 0] cols = indices[:, 1] transpose = array_ops.transpose gather_op = array_ops.gather elif rank == 3: dense_shape = (a.shape[0],) + dense_shape rows = indices[:, :2] cols = array_ops_stack.stack([indices[:, 0], indices[:, 2]], axis=1) transpose = lambda x: array_ops.transpose(x, perm=[0, 2, 1]) gather_op = array_ops.gather_nd a_rows = gather_op(transpose(a) if transpose_a else a, indices=rows) b_cols = gather_op(b if transpose_b else transpose(b), indices=cols) values = math_ops.reduce_sum(a_rows * b_cols, axis=1) return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(indices=indices, values=values, dense_shape=dense_shape)
Multiplies two dense matrices at selected indices. The two inputs `a` and `b` must have matching rank (2 or 3). If using rank 3, the first rank is used for the batch number. The last two dimensions should also be compatible for matrix multiplication. TODO(tabakg): Consider C++ implementation. There is also a more efficient way to handle transposes here. Args: a: The left dense matrix (or batched matrices). b: The right dense matrix (or batched matrices). indices: The selected output indices where values should be produced. Other indices will be pruned (not computed in the first place). Indices are specified as a tensor of shape (length, rank), where length is the number of entries and rank is the rank of the dense inputs (2 or 3). transpose_a: Whether to transpose a. adjoint_a: Whether to take the conjugate transpose of a. transpose_b: Whether to transpose b. adjoint_b: Whether to take the conjugate transpose of b. Returns: A CSR matrix.
github-repos
def make_data(self, message): if (not isinstance(message, Message)): return message return message.export(self.transport_content_type)
make data string from message according to transport_content_type Returns: str: message data
codesearchnet
def download(self, task, default_ext, timeout=5, max_retry=3, overwrite=False, **kwargs): file_url = task['file_url'] task['success'] = False task['filename'] = None retry = max_retry if not overwrite: with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) if self.storage.exists(filename): self.logger.info('skip downloading file %s', filename) return self.fetched_num -= 1 while retry > 0 and not self.signal.get('reach_max_num'): try: response = self.session.get(file_url, timeout=timeout) except Exception as e: self.logger.error('Exception caught when downloading file %s, ' 'error: %s, remaining retry times: %d', file_url, e, retry - 1) else: if self.reach_max_num(): self.signal.set(reach_max_num=True) break elif response.status_code != 200: self.logger.error('Response status code %d, file %s', response.status_code, file_url) break elif not self.keep_file(task, response, **kwargs): break with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) self.logger.info('image self.storage.write(filename, response.content) task['success'] = True task['filename'] = filename break finally: retry -= 1
Download the image and save it to the corresponding path. Args: task (dict): The task dict got from ``task_queue``. timeout (int): Timeout of making requests for downloading images. max_retry (int): the max retry times if the request fails. **kwargs: reserved arguments for overriding.
juraj-google-style
def _execute(self, connection, query, fetch=True): cursor = connection.cursor() try: cursor.execute(query) except Exception as e: from ambry.mprlib.exceptions import BadSQLError raise BadSQLError("Failed to execute query: {}; {}".format(query, e)) if fetch: return cursor.fetchall() else: return cursor
Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result.
juraj-google-style
def from_rfc3339_nanos(value): with_nanos = _RFC3339_NANOS.match(value) if with_nanos is None: raise ValueError( "Timestamp: {!r}, does not match pattern: {!r}".format( value, _RFC3339_NANOS.pattern ) ) bare_seconds = datetime.datetime.strptime( with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION ) fraction = with_nanos.group("nanos") if fraction is None: micros = 0 else: scale = 9 - len(fraction) nanos = int(fraction) * (10 ** scale) micros = nanos return bare_seconds.replace(microsecond=micros, tzinfo=pytz.utc)
Convert a nanosecond-precision timestamp to a native datetime. .. note:: Python datetimes do not support nanosecond precision; this function therefore truncates such values to microseconds. Args: value (str): The RFC3339 string to convert. Returns: datetime.datetime: The datetime object equivalent to the timestamp in UTC. Raises: ValueError: If the timestamp does not match the RFC 3339 regular expression.
juraj-google-style
def send_log_message(self, message: LogMessage) -> None: pass
Sends a log message to be handled. Args: * message: LogMessage dictionary Returns: * None
github-repos
def setNetworkName(self, networkName='GRL'): print '%s call setNetworkName' % self.port print networkName try: cmd = 'networkname %s' % networkName datasetCmd = 'dataset networkname %s' % networkName self.hasActiveDatasetToCommit = True return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("setNetworkName() Error: " + str(e))
set Thread Network name Args: networkName: the networkname string to be set Returns: True: successful to set the Thread Networkname False: fail to set the Thread Networkname
juraj-google-style
def save_q_df(self, state_key, action_key, q_value): if isinstance(q_value, float) is False: raise TypeError("The type of q_value must be float.") new_q_df = pd.DataFrame([(state_key, action_key, q_value)], columns=["state_key", "action_key", "q_value"]) if self.q_df is not None: self.q_df = pd.concat([new_q_df, self.q_df]) self.q_df = self.q_df.drop_duplicates(["state_key", "action_key"]) else: self.q_df = new_q_df
Insert or update Q-Value in `self.q_df`. Args: state_key: State. action_key: Action. q_value: Q-Value. Exceptions: TypeError: If the type of `q_value` is not float.
juraj-google-style
def word_list(sowpods=False, start='', end=''): location = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'wordlists') if sowpods: filename = 'sowpods.txt' else: filename = 'twl.txt' filepath = os.path.join(location, filename) with open(filepath) as wordfile: for word in wordfile.readlines(): word = word.strip() if (start and end and word.startswith(start) and word.endswith(end)): (yield word) elif (start and word.startswith(start) and (not end)): (yield word) elif (end and word.endswith(end) and (not start)): (yield word) elif ((not start) and (not end)): (yield word)
Opens the word list file. Args: sowpods: a boolean to declare using the sowpods list or TWL (default) start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yeilds: a word at a time out of 178691 words for TWL, 267751 for sowpods. Much less if either start or end are used (filtering is applied here)
codesearchnet
def _RunIpRoute(self, args=None, options=None): args = (args or []) options = (options or {}) command = ['ip', 'route'] command.extend(args) for item in options.items(): command.extend(item) try: process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = process.communicate() except OSError as e: self.logger.warning('Exception running %s. %s.', command, str(e)) else: if process.returncode: message = 'Non-zero exit status running %s. %s.' self.logger.warning(message, command, stderr.strip()) else: return stdout.decode('utf-8', 'replace') return ''
Run a command with ip route and return the response. Args: args: list, the string ip route command args to execute. options: dict, the string parameters to append to the ip route command. Returns: string, the standard output from the ip route command execution.
codesearchnet
def Close(self): if (not self._connection): raise RuntimeError('Cannot close database not opened.') self._connection.commit() self._connection.close() self._connection = None self._cursor = None self.filename = None self.read_only = None
Closes the database file. Raises: RuntimeError: if the database is not opened.
codesearchnet
def find_all_sift(im_source, im_search, min_match_count=4, maxcnt=0): sift = _sift_instance() flann = cv2.FlannBasedMatcher({'algorithm': FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50)) kp_sch, des_sch = sift.detectAndCompute(im_search, None) if len(kp_sch) < min_match_count: return None kp_src, des_src = sift.detectAndCompute(im_source, None) if len(kp_src) < min_match_count: return None h, w = im_search.shape[1:] result = [] while True: matches = flann.knnMatch(des_sch, des_src, k=2) good = [] for m, n in matches: if m.distance < 0.9 * n.distance: good.append(m) if len(good) < min_match_count: break sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) img_pts = np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0) matches_mask = mask.ravel().tolist() h, w = im_search.shape[:2] pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) pypts = [] for npt in dst.astype(int).tolist(): pypts.append(tuple(npt[0])) lt, br = pypts[0], pypts[2] middle_point = (lt[0] + br[0]) / 2, (lt[1] + br[1]) / 2 result.append(dict( result=middle_point, rectangle=pypts, confidence=(matches_mask.count(1), len(good)) )) if maxcnt and len(result) >= maxcnt: break qindexes, tindexes = [], [] for m in good: qindexes.append(m.queryIdx) tindexes.append(m.trainIdx) def filter_index(indexes, arr): r = np.ndarray(0, np.float32) for i, item in enumerate(arr): if i not in qindexes: r = np.append(r, item) return r kp_src = filter_index(tindexes, kp_src) des_src = filter_index(tindexes, des_src) return result
使用sift算法进行多个相同元素的查找 Args: im_source(string): 图像、素材 im_search(string): 需要查找的图片 threshold: 阈值,当相识度小于该阈值的时候,就忽略掉 maxcnt: 限制匹配的数量 Returns: A tuple of found [(point, rectangle), ...] A tuple of found [{"point": point, "rectangle": rectangle, "confidence": 0.76}, ...] rectangle is a 4 points list
juraj-google-style
def __init__(self, channel): self.ListGroupStats = channel.unary_unary( "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListGroupStats", request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListGroupStatsRequest.SerializeToString, response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListGroupStatsResponse.FromString, ) self.ListEvents = channel.unary_unary( "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListEvents", request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListEventsRequest.SerializeToString, response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListEventsResponse.FromString, ) self.DeleteEvents = channel.unary_unary( "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/DeleteEvents", request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.DeleteEventsRequest.SerializeToString, response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.DeleteEventsResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def __init__(self, package_name, version_range=None, paths=None, verbose=False): self.package = None self._verbose = verbose self._sections = [] package = None it = iter_packages(package_name, range_=version_range) packages = sorted(it, key=lambda x: x.version, reverse=True) for package_ in packages: if self._verbose: print "searching for help in %s..." % package_.uri if package_.help: package = package_ break if package: help_ = package.help if isinstance(help_, basestring): sections = [["Help", help_]] elif isinstance(help_, list): sections = help_ if self._verbose: print "found %d help entries in %s." % (len(sections), package.uri) if package.num_variants == 0: base = package.base root = base else: variant = package.get_variant(0) base = variant.base root = variant.root formatter = scoped_formatter( base=base, root=root, config=config, version=VersionBinding(package.version), system=system) for section in sections: uri = section[1] uri = convert_old_command_expansions(uri) uri = uri.replace("$BROWSER", "").strip() uri = formatter.format(uri) section[1] = uri self.package = package self._sections = sections
Create a PackageHelp object. Args: package_name (str): Package to search. version_range (`VersionRange`): Versions to search.
juraj-google-style
def offset(self, mjd, new_scale, eop): delta = 0 for one, two in self.steps(new_scale): one = one.name.lower() two = two.name.lower() oper = "_scale_{}_minus_{}".format(two, one) roper = "_scale_{}_minus_{}".format(one, two) if hasattr(self, oper): delta += getattr(self, oper)(mjd, eop) elif hasattr(self, roper): delta -= getattr(self, roper)(mjd, eop) else: raise DateError("Unknown convertion {} => {}".format(one, two)) return delta
Compute the offset necessary in order to convert from one time-scale to another Args: mjd (float): new_scale (str): Name of the desired scale Return: float: offset to apply in seconds
juraj-google-style
def make_slices(self, tf_tensor, tensor_shape): tensor_layout = self.tensor_layout(tensor_shape) slice_shape = self.slice_shape(tensor_shape) def my_fn(pnum): if tensor_layout.is_fully_replicated: return tf_tensor else: slice_begin = self.slice_begin(tensor_shape, pnum) return tf.slice(tf_tensor, slice_begin, slice_shape) return parallel([tf_tensor.device] * self.size, my_fn, list(xrange(self.size)))
Turns a single tf.Tensor into a list of slices, one for each processor. Args: tf_tensor: tf.Tensor. tensor_shape: Shape. Returns: list of tf.tensor with length self.size.
juraj-google-style
def _has_old_request_ended(self, shard_state): assert shard_state.slice_start_time is not None assert shard_state.slice_request_id is not None request_ids = [shard_state.slice_request_id] logs = None try: logs = list(logservice.fetch(request_ids=request_ids)) except (apiproxy_errors.FeatureNotEnabledError, apiproxy_errors.CapabilityDisabledError) as e: logging.warning("Ignoring exception: %s", e) if not logs or not logs[0].finished: return False return True
Whether previous slice retry has ended according to Logs API. Args: shard_state: shard state. Returns: True if the request of previous slice retry has ended. False if it has not or unknown.
juraj-google-style
def _take_screenshot(self): raw_png = self._wda.screenshot() img = Image.open(BytesIO(raw_png)) return img
Take a screenshot, also called by Mixin Args: - filename(string): file name to save Returns: PIL Image object
juraj-google-style
def symmetric_difference(self, other): operation = bool.__xor__ self.cross_product(other, operation) return self
Constructs an unminimized DFA recognizing the symmetric difference of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the symmetric difference operation Returns: DFA: The resulting DFA
juraj-google-style
def backup_value(self, value, up_to): self.N += 1 self.W += value if self.parent is None or self is up_to: return self.parent.backup_value(value, up_to)
Propagates a value estimation up to the root node. Args: value: the value to be propagated (1 = black wins, -1 = white wins) up_to: the node to propagate until.
juraj-google-style
def copy_assets_to_destination_dir(asset_filename_map, destination_dir, saved_files=None): if saved_files is None: saved_files = set() assets_destination_dir = path_helpers.get_or_create_assets_dir(destination_dir) for asset_basename, asset_source_filepath in asset_filename_map.items(): asset_destination_filepath = file_io.join(compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_basename)) if file_io.file_exists(asset_source_filepath) and asset_source_filepath != asset_destination_filepath and (asset_destination_filepath not in saved_files): file_io.copy(asset_source_filepath, asset_destination_filepath, overwrite=True) saved_files.add(asset_destination_filepath) tf_logging.info('Assets written to: %s', compat.as_text(assets_destination_dir))
Copy all assets from source path to destination path. Args: asset_filename_map: a dict of filenames used for saving the asset in the SavedModel to full paths from which the filenames were derived. destination_dir: the destination directory that assets are stored in. saved_files: a set of destination filepaths that have already been copied and will be skipped
github-repos
def check_output_variable(self, variable): match = False if (variable in self.out_variables): match = True return match
Check to see if output variable was requested by downstream app. Using the auto generated dictionary of output variables check to see if provided variable was requested by downstream app. Args: variable (string): The variable name, not the full variable. Returns: (boolean): Boolean value indicator whether a match was found.
codesearchnet
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): dynamic_info_size_error_reported = False tasks_key = registry_key.GetSubkeyByName('Tasks') tree_key = registry_key.GetSubkeyByName('Tree') if not tasks_key or not tree_key: parser_mediator.ProduceExtractionWarning( 'Task Cache is missing a Tasks or Tree sub key.') return task_guids = {} for sub_key in tree_key.GetSubkeys(): for value_key, id_value in self._GetIdValue(sub_key): id_value_data_size = len(id_value.data) if id_value_data_size != 78: parser_mediator.ProduceExtractionWarning( 'unsupported Id value data size: {0:d}.'.format( id_value_data_size)) continue guid_string = id_value.GetDataAsObject() task_guids[guid_string] = value_key.name dynamic_info_map = self._GetDataTypeMap('dynamic_info_record') dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record') dynamic_info_size = dynamic_info_map.GetByteSize() dynamic_info2_size = dynamic_info2_map.GetByteSize() for sub_key in tasks_key.GetSubkeys(): dynamic_info_value = sub_key.GetValueByName('DynamicInfo') if not dynamic_info_value: continue dynamic_info_record_map = None dynamic_info_value_data_size = len(dynamic_info_value.data) if dynamic_info_value_data_size == dynamic_info_size: dynamic_info_record_map = dynamic_info_map elif dynamic_info_value_data_size == dynamic_info2_size: dynamic_info_record_map = dynamic_info2_map else: if not dynamic_info_size_error_reported: parser_mediator.ProduceExtractionWarning( 'unsupported DynamicInfo value data size: {0:d}.'.format( dynamic_info_value_data_size)) dynamic_info_size_error_reported = True continue try: dynamic_info_record = self._ReadStructureFromByteStream( dynamic_info_value.data, 0, dynamic_info_record_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse DynamicInfo record with error: {0!s}.'.format( exception)) name = task_guids.get(sub_key.name, sub_key.name) values_dict = {} values_dict['Task: {0:s}'.format(name)] = '[ID: {0:s}]'.format( sub_key.name) event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) event_data = TaskCacheEventData() event_data.task_name = name event_data.task_identifier = sub_key.name last_registered_time = dynamic_info_record.last_registered_time if last_registered_time: date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time) event = time_events.DateTimeValuesEvent( date_time, 'Last registered time') parser_mediator.ProduceEventWithEventData(event, event_data) launch_time = dynamic_info_record.launch_time if launch_time: date_time = dfdatetime_filetime.Filetime(timestamp=launch_time) event = time_events.DateTimeValuesEvent( date_time, 'Launch time') parser_mediator.ProduceEventWithEventData(event, event_data) unknown_time = getattr(dynamic_info_record, 'unknown_time', None) if unknown_time: date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UNKNOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def create_app(*, debug=False, threads=1, bigchaindb_factory=None): if (not bigchaindb_factory): bigchaindb_factory = BigchainDB app = Flask(__name__) app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app) CORS(app) app.debug = debug app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads) add_routes(app) return app
Return an instance of the Flask application. Args: debug (bool): a flag to activate the debug mode for the app (default: False). threads (int): number of threads to use Return: an instance of the Flask application.
codesearchnet
def is_storage(url, storage=None): if storage: return True split_url = url.split(': if ((len(split_url) == 2) and (split_url[0].lower() != 'file')): return True return False
Check if file is a local file or a storage file. File is considered local if: - URL is a local path. - URL starts by "file://" - a "storage" is provided. Args: url (str): file path or URL storage (str): Storage name. Returns: bool: return True if file is local.
codesearchnet
def laid_out_slice_num(self, tensor_shape): ret = self.slicewise(lambda: tf.to_int32(0)) tensor_layout = self.tensor_layout(tensor_shape) for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis: if mesh_axis is not None: def my_fn(x, pcoord, mesh_dim_size): return x * mesh_dim_size + pcoord ret = self.slicewise( my_fn, ret, self.laid_out_pcoord(mesh_axis), self.shape[mesh_axis].size) return ret
A LaidOutTensor with an int32 scalar, identical for identical slices. This is useful for synchronizing random operations. Args: tensor_shape: a TensorShape Returns: a LaidOutTensor where each slice is an integer scalar.
juraj-google-style
def get_configuration(variable, site_code=None): name = os.environ.get(CONFIGURATION_MODULE) __import__(name) module = sys.modules[name] setting_value = getattr(module, variable, None) site_overrides = getattr(module, 'SITE_OVERRIDES', None) if (site_overrides and (site_code is not None)): site_specific_overrides = site_overrides.get(site_code) if site_specific_overrides: override_value = site_specific_overrides.get(variable) if override_value: setting_value = override_value if (setting_value is None): raise RuntimeError('Worker is improperly configured: {} is unset in {}.'.format(variable, module)) return setting_value
Get a value from configuration. Retrieves the value corresponding to the given variable from the configuration module currently in use by the app. Specify a site_code value to check for a site-specific override. Arguments: variable (str): The name of a variable from the configuration module. Keyword Arguments: site_code (str): The SITE_OVERRIDES key to inspect for site-specific values Returns: The value corresponding to the variable, or None if the variable is not found.
codesearchnet
def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim): layout_rules = convert_to_layout_rules(layout) mesh_shape = convert_to_shape(mesh_shape) mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape) if (mesh_axis is None): return 1 else: return mesh_shape.dims[mesh_axis].size
How many ways does a tensor dimension get split. This is used to "cheat" when building the mtf graph and peek at how a tensor dimension will be split. Returns 1 if the tensor dimension is not split. Args: layout: an input to convert_to_layout_rules mesh_shape: an input to convert_to_shape tensor_dim: a Dimension Returns: an integer
codesearchnet
def fts_contrast2(self, fs, ft_name, inv): inv_fts = [self.fts(x) for x in inv if set(fs) <= self.fts(x)] for a in inv_fts: for b in inv_fts: if a != b: diff = a ^ b if len(diff) == 2: if all([nm == ft_name for (_, nm) in diff]): return True return False
Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`. Args: fs (list): feature specifications used to filter `inv`. ft_name (str): name of the feature where contrast must be present. inv (list): collection of segments represented as Unicode segments. Returns: bool: `True` if two segments in `inv` are identical in features except for feature `ft_name`
juraj-google-style
def extract_xml(input_): if type(input_) == str: file_object = open(input_, "rb") elif type(input_) == bytes: file_object = BytesIO(input_) else: file_object = input_ try: header = file_object.read(6) file_object.seek(0) if header.startswith(MAGIC_ZIP): _zip = zipfile.ZipFile(file_object) xml = _zip.open(_zip.namelist()[0]).read().decode() elif header.startswith(MAGIC_GZIP): xml = GzipFile(fileobj=file_object).read().decode() elif header.startswith(MAGIC_XML): xml = file_object.read().decode() else: file_object.close() raise InvalidAggregateReport("Not a valid zip, gzip, or xml file") file_object.close() except UnicodeDecodeError: raise InvalidAggregateReport("File objects must be opened in binary " "(rb) mode") except Exception as error: raise InvalidAggregateReport( "Invalid archive file: {0}".format(error.__str__())) return xml
Extracts xml from a zip or gzip file at the given path, file-like object, or bytes. Args: input_: A path to a file, a file like object, or bytes Returns: str: The extracted XML
juraj-google-style
def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding, data_format, data_type, expected, use_gpu, v2, use_negative_input=False, bfloat16_rtol=0.01): if use_gpu and (not test.is_gpu_available()): self.skipTest('No GPU is available.') if use_gpu and data_type == dtypes.float64 and test.is_built_with_rocm(): self.skipTest("ROCm pooling ops don't support float64.") if use_gpu and data_format == 'NCHW_VECT_C' and (not test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=(6, 1))): self.skipTest('NCHW_VECT_C requires sm61+.') if v2 and data_format != 'NHWC': self.skipTest('v2 not supported for %s' % data_format) if v2 and (not isinstance(padding, str)): self.skipTest('non-constant ksize/strides requires nonexplicit padding') if data_format == 'NCHW_VECT_C': if data_type != dtypes.float32: self.skipTest('quantization to qint8 not implemented for %r' % data_type) if input_sizes[-1] % 4 != 0: self.skipTest('Skipping test for depth %d' % input_sizes[-1]) total_size = 1 for s in input_sizes: total_size *= s tf_logging.info('Running %s test. %r %r %d %r %r %r %s', data_format, v2, input_sizes, total_size, pool_func, ksize, strides, data_type) y = -1 if use_negative_input else 1 x = [((f + 128) % 255 - 127) * y for f in range(total_size)] with self.cached_session(use_gpu=use_gpu): t = constant_op.constant(x, shape=input_sizes, dtype=data_type) if data_format in ('NCHW', 'NCHW_VECT_C', 'NCW'): if data_format == 'NCHW_VECT_C': t = test_util.NHWCToNCHW_VECT_C(t) t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8) else: t = test_util.NHWCToNCHW(t) ksize = test_util.NHWCToNCHW(ksize) strides = test_util.NHWCToNCHW(strides) if isinstance(padding, list): padding = test_util.NHWCToNCHW(padding) ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4]) strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4]) if v2: t = pool_func(t, ksize=ksize_placeholder, strides=strides_placeholder, padding=padding, data_format=data_format) else: t = pool_func(t, ksize=ksize, strides=strides, padding=padding, data_format=data_format) if data_format == 'NCHW_VECT_C': t = gen_array_ops.dequantize(t, -128, 127) t = test_util.NCHW_VECT_CToNHWC(t) elif data_format == 'NCHW': t = test_util.NCHWToNHWC(t) if v2: actual = t.eval(feed_dict={ksize_placeholder: ksize, strides_placeholder: strides}) else: actual = self.evaluate(t) self.assertShapeEqual(actual, t) self.assertAllCloseAccordingToType(expected, actual.flatten(), bfloat16_rtol=bfloat16_rtol)
Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. data_format: The data format we use to run the pooling operation. data_type: The data type to use to run the pooling operation. expected: An array containing the expected operation outputs. use_gpu: Whether we are running on GPU. v2: Whether to use v2 version. use_negative_input: If the input values should be negative. bfloat16_rtol: relative tolerance for bfloat16.
github-repos
def compile_date(self): result = self._dll.JLINKARM_GetCompileDateTime() return ctypes.cast(result, ctypes.c_char_p).value.decode()
Returns a string specifying the date and time at which the DLL was translated. Args: self (JLink): the ``JLink`` instance Returns: Datetime string.
codesearchnet
def thread_exists(self, thread_id): return self._requests_session.head(self._url.thread_api_url(thread_id=thread_id)).ok
Check if a thread exists or has 404'd. Args: thread_id (int): Thread ID Returns: bool: Whether the given thread exists on this board.
codesearchnet
def resolve_attr(obj, path): if (not path): return obj (head, _, tail) = path.partition('.') head_obj = getattr(obj, head) return resolve_attr(head_obj, tail)
A recursive version of getattr for navigating dotted paths. Args: obj: An object for which we want to retrieve a nested attribute. path: A dot separated string containing zero or more attribute names. Returns: The attribute referred to by obj.a1.a2.a3... Raises: AttributeError: If there is no such attribute.
codesearchnet
def setup(self, universe): try: prices = universe[self.name] except KeyError: prices = None if (prices is not None): self._prices = prices self.data = pd.DataFrame(index=universe.index, columns=['value', 'position'], data=0.0) self._prices_set = True else: self.data = pd.DataFrame(index=universe.index, columns=['price', 'value', 'position']) self._prices = self.data['price'] self._prices_set = False self._values = self.data['value'] self._positions = self.data['position'] self.data['outlay'] = 0.0 self._outlays = self.data['outlay']
Setup Security with universe. Speeds up future runs. Args: * universe (DataFrame): DataFrame of prices with security's name as one of the columns.
codesearchnet
def sh(self, cmd, ignore_error=False, cwd=None, shell=False, **kwargs): kwargs.update({'shell': shell, 'cwd': (cwd or self.fpath), 'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE, 'ignore_error': ignore_error}) log.debug((('cmd', cmd), ('kwargs', kwargs))) return sh(cmd, **kwargs)
Run a command with the current working directory set to self.fpath Args: cmd (str or tuple): cmdstring or listlike Keyword Arguments: ignore_error (bool): if False, raise an Exception if p.returncode is not 0 cwd (str): current working dir to run cmd with shell (bool): subprocess.Popen ``shell`` kwarg Returns: str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``)
codesearchnet
def filter_single_value(cls, part_info, error_msg=None): filtered = cls.filter_values(part_info) if len(filtered) != 1: if error_msg is None: error_msg = "Expected a single %s, got %s of them" % \ (cls.__name__, len(filtered)) raise BadValueError(error_msg) return filtered[0]
Filter the part_info dict list looking for a single instance of our class Args: part_info (dict): {part_name: [Info] or None} as returned from Controller.run_hook() error_msg (str, optional): Specific error message to show if there isn't a single value Returns: info subclass of cls
juraj-google-style
def port(alias_name, default=None, allow_none=False): warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) try: return int(_split_docker_link(alias_name)[2]) except KeyError as err: if default or allow_none: return default else: raise err
Get the port from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.port('DB') 5432
juraj-google-style
def datetimeobj(value, fmt=None): if fmt: return _datetimeobj_formats.get(fmt, (lambda v: datetimeobj_fmt(v, fmt)))(value) l = len(value) if ((19 <= l <= 24) and (value[3] == ' ')): try: return datetimeobj_d_b_Y_H_M_S(value) except (KeyError, ValueError): pass if (30 <= l <= 31): try: return datetimeobj_a__d_b_Y_H_M_S_z(value) except (KeyError, ValueError): pass if (l == 14): try: return datetimeobj_YmdHMS(value) except ValueError: pass try: return datetimeobj_epoch(value) except ValueError: pass return datetimeobj_any(value)
Parse a datetime to a datetime object. Uses fast custom parsing for common datetime formats or the slow dateutil parser for other formats. This is a trade off between ease of use and speed and is very useful for fast parsing of timestamp strings whose format may standard but varied or unknown prior to parsing. Common formats include: 1 Feb 2010 12:00:00 GMT Mon, 1 Feb 2010 22:00:00 +1000 20100201120000 1383470155 (seconds since epoch) See the other datetimeobj_*() functions for more details. Args: value: A string representing a datetime. Returns: A datetime object.
codesearchnet
def RegisterDefinition(self, artifact_definition): artifact_definition_name = artifact_definition.name.lower() if (artifact_definition_name in self._artifact_definitions): raise KeyError('Artifact definition already set for name: {0:s}.'.format(artifact_definition.name)) self._artifact_definitions[artifact_definition_name] = artifact_definition self._defined_artifact_names.add(artifact_definition.name) for source in artifact_definition.sources: if (source.type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP): self._artifact_name_references.update(source.names)
Registers an artifact definition. Artifact definitions are identified based on their lower case name. Args: artifact_definition (ArtifactDefinition): an artifact definition. Raises: KeyError: if artifact definition is already set for the corresponding name.
codesearchnet
def _psd_mask(x): (eigenvalues, _) = tf.linalg.eigh(x) return tf.cast((tf.reduce_min(input_tensor=eigenvalues, axis=(- 1)) >= 0), dtype=x.dtype)
Computes whether each square matrix in the input is positive semi-definite. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. Returns: mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each scalar is 1 if the corresponding matrix was PSD, otherwise 0.
codesearchnet
def _GetTable(self): result = [] lstr = str for row in self._table: result.append(('%s\n' % self.separator.join((lstr(v) for v in row)))) return ''.join(result)
Returns table, with column headers and separators. Returns: The whole table including headers as a string. Each row is joined by a newline and each entry by self.separator.
codesearchnet
def load_case(adapter, case_obj, update=False): existing_case = adapter.case(case_obj) if existing_case: if not update: raise CaseError("Case {0} already exists in database".format(case_obj['case_id'])) case_obj = update_case(case_obj, existing_case) try: adapter.add_case(case_obj, update=update) except CaseError as err: raise err return case_obj
Load a case to the database Args: adapter: Connection to database case_obj: dict update(bool): If existing case should be updated Returns: case_obj(models.Case)
juraj-google-style
def __init__(self, context, request): self._context = context self._request = request self._extractors = _create_extractors(request.col_params) self._filters = _create_filters(request.col_params, self._extractors) self._experiment = context.experiment()
Constructor. Args: context: A backend_context.Context instance. request: A ListSessionGroupsRequest protobuf.
juraj-google-style
def __init__(self, source_urn=None, token=None): super(InstantOutputPlugin, self).__init__() if not source_urn: raise ValueError("source_urn can't be empty.") if not token: raise ValueError("token can't be empty.") self.source_urn = source_urn self.token = token
OutputPlugin constructor. Args: source_urn: URN identifying source of the data (hunt or flow). token: Security token. Raises: ValueError: If one of the keyword arguments is empty.
juraj-google-style
def inspect_node(self, node_id): url = self._url('/nodes/{0}', node_id) return self._result(self._get(url), True)
Retrieve low-level information about a swarm node Args: node_id (string): ID of the node to be inspected. Returns: A dictionary containing data about this node. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def _GetIntegerValue(self, row, value_name): value = row.get(value_name, None) try: return int(value, 10) except (TypeError, ValueError): return None
Converts a specific value of the row to an integer. Args: row (dict[str, str]): fields of a single row, as specified in COLUMNS. value_name (str): name of the value within the row. Returns: int: value or None if the value cannot be converted.
codesearchnet
def _maybe_extract(compressed_filename, directory, extension=None): logger.info('Extracting {}'.format(compressed_filename)) if extension is None: basename = os.path.basename(compressed_filename) extension = basename.split('.', 1)[1] if 'zip' in extension: with zipfile.ZipFile(compressed_filename, "r") as zip_: zip_.extractall(directory) elif 'tar' in extension or 'tgz' in extension: with tarfile.open(compressed_filename, mode='r') as tar: tar.extractall(path=directory) logger.info('Extracted {}'.format(compressed_filename))
Extract a compressed file to ``directory``. Args: compressed_filename (str): Compressed file. directory (str): Extract to directory. extension (str, optional): Extension of the file; Otherwise, attempts to extract extension from the filename.
juraj-google-style
def get(logdir): with FileWriterCache._lock: if logdir not in FileWriterCache._cache: FileWriterCache._cache[logdir] = FileWriter(logdir, graph=ops.get_default_graph()) return FileWriterCache._cache[logdir]
Returns the FileWriter for the specified directory. Args: logdir: str, name of the directory. Returns: A `FileWriter`.
github-repos
def also_run_as_tf_function(f: Callable[..., Any]) -> Callable[..., None]: def decorated(*args, **kwds) -> None: def bound_f() -> None: f(*args, **kwds) with context.eager_mode(): bound_f() def_function.function(bound_f, autograph=False)() return decorated
Runs the decorated test twice--once as is, once inside a tf.function. This allows you to run a test both in eager execution and inside a tf.function, exercising the two execution modes supported in tf 2.0. The test assertions are automatically done inside tf.py_funcs, and tf.function ensures that they run in the proper order and with the proper side effects. Currently variable creation is not supported in tests annotated with this decorator since it's tricky to ensure the variable doesn't get repeatedly created when retracing the tf.function. Args: f: the test method to be decorated Returns: The decorated test method, which will run both in eager and inside a tf.function.
github-repos
def Add(self, path, age=None): if (not isinstance(path, string_types)): raise ValueError('Only strings should be added to a URN.') result = rdfvalue.RDFURN(self.Copy(age)) result.Update(path=utils.JoinPath(self._string_urn, path)) return result
Add a relative stem to the current value and return a new RDFURN. Note that this returns an RDFURN, not a ClientURN since the resulting object would not pass validation. Args: path: A string containing a relative path. age: The age of the object. If None set to current time. Returns: A new RDFURN that can be chained. Raises: ValueError: if the path component is not a string.
codesearchnet
def add_sched_block_instance(self, config_dict): schema = self._get_schema() LOG.debug('Adding SBI with config: %s', config_dict) validate(config_dict, schema) updated_block = self._add_status(config_dict) (scheduling_block_data, processing_block_data) = self._split_sched_block_instance(updated_block) name = ('scheduling_block:' + updated_block['id']) self._db.set_specified_values(name, scheduling_block_data) self._db.push_event(self.scheduling_event_name, updated_block['status'], updated_block['id']) for value in processing_block_data: name = ((('scheduling_block:' + updated_block['id']) + ':processing_block:') + value['id']) self._db.set_specified_values(name, value) self._db.push_event(self.processing_event_name, value['status'], value['id'])
Add Scheduling Block to the database. Args: config_dict (dict): SBI configuration
codesearchnet
def from_chars(chars): paulis = [pauli_from_char(c, n) for n, c in enumerate(chars) if c != "I"] if not paulis: return 1.0 * I if len(paulis) == 1: return 1.0 * paulis[0] return reduce(lambda a, b: a * b, paulis)
Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I". e.g. "XZIY" => X(0) * Z(1) * Y(3) Args: chars (str): Written in "X", "Y", "Z" or "I". Returns: Term: A `Term` object. Raises: ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I".
juraj-google-style
def Search(self, artifact=None, os_name=None, cpe=None, label=None): return [c for c in self.conditions if c.Search(artifact, os_name, cpe, label)]
Find the host attributes that trigger data collection. Args: artifact: An artifact name. os_name: An OS string. cpe: A CPE string. label: A label string. Returns: A list of conditions that contain the specified attributes.
codesearchnet
def add_data(self, data): if self.state == self.ErrorState: return self.raw_data += bytearray(data) still_processing = True while still_processing: still_processing = self.process_data()
Add data to our stream, emitting reports as each new one is seen Args: data (bytearray): A chunk of new data to add
juraj-google-style
def get_directory_list_doc(self, configs): if (not isinstance(configs, (tuple, list))): configs = [configs] util.check_list_type(configs, dict, 'configs', allow_none=False) return self.__directory_list_descriptor(configs)
JSON dict description of a protorpc.remote.Service in list format. Args: configs: Either a single dict or a list of dicts containing the service configurations to list. Returns: dict, The directory list document as a JSON dict.
codesearchnet
def local_hardware_info(): results = {'os': platform.system(), 'memory': (psutil.virtual_memory().total / (1024 ** 3)), 'cpus': (psutil.cpu_count(logical=False) or 1)} return results
Basic hardware information about the local machine. Gives actual number of CPU's in the machine, even when hyperthreading is turned on. CPU count defaults to 1 when true count can't be determined. Returns: dict: The hardware information.
codesearchnet
def to_json_str(self): _json = self.to_json() try: return json.dumps(_json, sort_keys=True, cls=JsonEncoder) except: logging.exception('Could not serialize JSON: %r', _json) raise
Convert data to json string representation. Returns: json representation as string.
codesearchnet
def latest_db_file(paths: List[str]) -> Optional[str]: dbs = {} for db_path in paths: matches = VERSION_RE.match(os.path.basename(db_path)) assert matches, f'Invalid path name {db_path}' try: version = int(matches.group(1)) except ValueError: continue dbs[version] = db_path if dbs: highest_version = sorted(dbs)[-1] return dbs[highest_version] return None
Returns the path with the highest `version` number. Raises: AssertionError: If any of the `paths` in the list is an invalid name. Args: paths: A list of file names.
juraj-google-style
def to_api_repr(self): config = copy.deepcopy(self._properties) if (self.options is not None): r = self.options.to_api_repr() if (r != {}): config[self.options._RESOURCE_NAME] = r return config
Build an API representation of this object. Returns: Dict[str, Any]: A dictionary in the format used by the BigQuery API.
codesearchnet
def get(self, path): if not path: parsed_path = '/vars' else: parsed_path = path weight_map = self.sharding_config['weight_map'] filenames = weight_map.get(parsed_path) or weight_map.get('/' + parsed_path + '/vars') if filenames is not None: if not isinstance(filenames, list): filenames = [filenames] self.current_shard_filenames = filenames filename = filenames[0] else: self.current_shard_filenames = [] filename = None if filename is not None and filename != self.current_shard_path.name: self.close() self.h5_file = self._get_h5_file(self.path.with_name(filename)) return super().get(path)
Get the H5 entry group. This method is only available in read mode. If the path is not found in the current shard, it will switch to the correct shard. Args: path: `str`. The variable path.
github-repos
def mount(dmg): temp_dir = __salt__['temp.dir'](prefix='dmg-') cmd = 'hdiutil attach -readonly -nobrowse -mountpoint {0} "{1}"'.format(temp_dir, dmg) return (__salt__['cmd.run'](cmd), temp_dir)
Attempt to mount a dmg file to a temporary location and return the location of the pkg file inside Args: dmg (str): The location of the dmg file to mount Returns: tuple: Tuple containing the results of the command along with the mount point CLI Example: .. code-block:: bash salt '*' macpackage.mount /tmp/software.dmg
codesearchnet
def interpolate_to_timestep(self, timestep, cumulative=None): assert ((timestep % self.header.analysis_period.timestep) == 0), 'Target timestep({}) must be divisable by current timestep({})'.format(timestep, self.header.analysis_period.timestep) if (cumulative is not None): assert isinstance(cumulative, bool), 'Expected Boolean. Got {}'.format(type(cumulative)) _new_values = [] _data_length = len(self._values) for d in xrange(_data_length): for _v in self._xxrange(self[d], self[((d + 1) % _data_length)], timestep): _new_values.append(_v) native_cumulative = self.header.data_type.cumulative if ((cumulative is True) or ((cumulative is None) and native_cumulative)): for (i, d) in enumerate(_new_values): _new_values[i] = (d / timestep) if (self.header.data_type.point_in_time is False): shift_dist = int((timestep / 2)) _new_values = (_new_values[(- shift_dist):] + _new_values[:(- shift_dist)]) a_per = self.header.analysis_period _new_a_per = AnalysisPeriod(a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month, a_per.end_day, a_per.end_hour, timestep, a_per.is_leap_year) _new_header = self.header.duplicate() _new_header._analysis_period = _new_a_per return HourlyContinuousCollection(_new_header, _new_values)
Interpolate data for a finer timestep using a linear interpolation. Args: timestep: Target timestep as an integer. Target timestep must be divisable by current timestep. cumulative: A boolean that sets whether the interpolation should treat the data colection values as cumulative, in which case the value at each timestep is the value over that timestep (instead of over the hour). The default will check the DataType to see if this type of data is typically cumulative over time. Return: A continuous hourly data collection with data interpolated to the input timestep.
codesearchnet
def __init__( self, matrix ): assert type( matrix ) is np.ndarray assert matrix.shape == ( 3, 3 ) self.matrix = matrix self.inv_matrix = np.linalg.inv( matrix )
Initialise a Cell object. Args: matrix (np.array): 3x3 numpy array containing the cell matrix. Returns: None
juraj-google-style
def timed_operation(msg, log_start=False): assert len(msg) if log_start: logger.info('Start {} ...'.format(msg)) start = timer() yield msg = msg[0].upper() + msg[1:] logger.info('{} finished, time:{:.4f} sec.'.format( msg, timer() - start))
Surround a context with a timer. Args: msg(str): the log to print. log_start(bool): whether to print also at the beginning. Example: .. code-block:: python with timed_operation('Good Stuff'): time.sleep(1) Will print: .. code-block:: python Good stuff finished, time:1sec.
juraj-google-style
def format_returnvalue(self, value): self._ensure_loaded() if (not self.return_info.is_data): return None if (self.return_info.type_name is not None): return typeinfo.type_system.format_value(value, self.return_info.type_name, self.return_info.formatter) return self.return_info.formatter(value)
Format the return value of this function as a string. Args: value (object): The return value that we are supposed to format. Returns: str: The formatted return value, or None if this function indicates that it does not return data
codesearchnet
def calc_sha(self, checksum): with LogTask('Calculating {}'.format(checksum)): with open(self.dst + '.hash', 'wt') as f: sha = utils.get_hash(self.dst, checksum) f.write(sha) self.exported_metadata[checksum] = sha
Calculate the checksum of the new exported disk, write it to a file, and update this managers 'exported_metadata'. Args: checksum(str): The type of the checksum
juraj-google-style
def evaluate(condition): success = False if (len(condition) > 0): try: (rule_name, ast_tokens, evaluate_function) = Condition.find_rule(condition) if (not (rule_name == 'undefined')): success = evaluate_function(ast_tokens) except AttributeError as exception: Logger.get_logger(__name__).error('Attribute error: %s', exception) else: success = True return success
Evaluate simple condition. >>> Condition.evaluate(' 2 == 2 ') True >>> Condition.evaluate(' not 2 == 2 ') False >>> Condition.evaluate(' not "abc" == "xyz" ') True >>> Condition.evaluate('2 in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('5 in [2, 4, 6, 8, 10]') False >>> Condition.evaluate('"apple" in ["apple", "kiwi", "orange"]') True >>> Condition.evaluate('5 not in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('"apple" not in ["kiwi", "orange"]') True Args: condition (str): Python condition as string. Returns: bool: True when condition evaluates to True.
codesearchnet
def _IsComparable(target): if _IsNumeric(target): return True for attr in _COMPARABLE_ATTRS: if not hasattr(target, attr): return False return True
Returns True if the target is comparable. Many things are considered comparable. An important exception is None, which in Python 2 compares less than anything besides None. None is a special case handled by _NoneSubject, so it's irrelevant what this returns for None. Args: target: any object whatsoever. Returns: True if the target is comparable, otherwise False.
github-repos
def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary): source_paths = _get_pdf_filenames_at(source_directory) yield len(source_paths) for source_path in source_paths: output = os.path.join(output_directory, os.path.basename(source_path)) compress_pdf(source_path, output, ghostscript_binary) yield output
Compress all PDF files in the current directory and place the output in the given output directory. This is a generator function that first yields the amount of files to be compressed, and then yields the output path of each file. Args: source_directory (str): Filepath to the source directory. output_directory (str): Filepath to the output directory. ghostscript_binary (str): Name of the Ghostscript binary. Returns: list(str): paths to outputs.
juraj-google-style
def _get_log_file(self, handler): if 'file_name_pattern' not in handler: filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap' else: filename = handler['file_name_pattern'] log_file = handler['log_dir'] if 'path' in handler: log_file = os.path.join(log_file, handler['path'], filename) else: log_file = os.path.join(log_file, filename) log_file = time.strftime(log_file, time.gmtime()) log_file = log_file.format(**handler) return log_file
Generate log file path for a given handler Args: handler: The handler configuration dictionary for which a log file path should be generated.
juraj-google-style
def run(self, test_names=None): logging.log_path = self.log_path if not self._pre_run(): return self.results logging.info('==========> %s <==========', self.TAG) if not test_names: if self.tests: test_names = list(self.tests) else: test_names = self.get_existing_test_names() self.results.requested = test_names self.summary_writer.dump(self.results.requested_test_names_dict(), records.TestSummaryEntryType.TEST_NAME_LIST) tests = self._get_test_methods(test_names) try: setup_class_result = self._setup_class() if setup_class_result: return setup_class_result for test_name, test_method in tests: max_consecutive_error = getattr(test_method, ATTR_MAX_CONSEC_ERROR, 0) repeat_count = getattr(test_method, ATTR_REPEAT_CNT, 0) max_retry_count = getattr(test_method, ATTR_MAX_RETRY_CNT, 0) if max_retry_count: self._exec_one_test_with_retry(test_name, test_method, max_retry_count) elif repeat_count: self._exec_one_test_with_repeat(test_name, test_method, repeat_count, max_consecutive_error) else: self.exec_one_test(test_name, test_method) return self.results except signals.TestAbortClass as e: e.details = 'Test class aborted due to: %s' % e.details self._skip_remaining_tests(e) return self.results except signals.TestAbortAll as e: e.details = 'All remaining tests aborted due to: %s' % e.details self._skip_remaining_tests(e) setattr(e, 'results', self.results) raise e finally: self._teardown_class() logging.info('Summary for test class %s: %s', self.TAG, self.results.summary_str())
Runs tests within a test class. One of these test method lists will be executed, shown here in priority order: 1. The test_names list, which is passed from cmd line. Invalid names are guarded by cmd line arg parsing. 2. The self.tests list defined in test class. Invalid names are ignored. 3. All function that matches test method naming convention in the test class. Args: test_names: A list of string that are test method names requested in cmd line. Returns: The test results object of this class.
github-repos
def hstack(tup): if all(ar.ndim is 1 for ar in tup): return concatenate(tup, axis=0) else: return concatenate(tup, axis=1)
Stack arrays in sequence horizontally (column wise), handling ``RemoteArray`` and ``DistArray`` without moving data. Args: tup (sequence of array_like) Returns: res: `ndarray`, if inputs were all local `RemoteArray`, if inputs were all on the same remote engine `DistArray`, if inputs were already scattered on different engines
juraj-google-style
def set_work_request(self, worker_name, sample_set, subkeys=None): if self.plugin_meta[worker_name]['sample_set_input']: yield self.work_request(worker_name, sample_set, subkeys) else: md5_list = self.get_sample_set(sample_set) for md5 in md5_list: if subkeys: yield self.work_request(worker_name, md5, subkeys) else: yield self.work_request(worker_name, md5)[worker_name]
Make a work request for an existing stored sample (or sample_set). Args: worker_name: 'strings', 'pe_features', whatever sample_set: the md5 of a sample_set in the Workbench data store subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output is a generator of the results of the worker output for the sample_set
juraj-google-style
def cycle_find(key, width=4): key_len = len(key) buf = '' it = deBruijn(width, 26) for i in range(key_len): buf += chr(ord('A') + next(it)) if buf == key: return 0 for i, c in enumerate(it): buf = buf[1:] + chr(ord('A') + c) if buf == key: return i + 1 return -1
Given an element of a de Bruijn sequence, find its index in that sequence. Args: key(str): The piece of the de Bruijn sequence to find. width(int): The width of each element in the sequence. Returns: int: The index of ``key`` in the de Bruijn sequence.
juraj-google-style
def tokeninfo(self, jwt): warnings.warn("/tokeninfo will be deprecated in future releases", DeprecationWarning) return self.post( url='https: data={'id_token': jwt}, headers={'Content-Type': 'application/json'} )
Returns user profile based on the user's jwt Validates a JSON Web Token (signature and expiration) and returns the user information associated with the user id (sub property) of the token. Args: jwt (str): User's jwt Returns: The user profile.
juraj-google-style
def grid(self, dimensions=None, **kwargs): return self.groupby(dimensions, container_type=GridSpace, **kwargs)
Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a GridSpace. Args: dimensions: Dimension/str or list Dimension or list of dimensions to group by Returns: grid: GridSpace GridSpace with supplied dimensions
juraj-google-style
def _rowwise_unsorted_segment_sum(values, indices, n): batch, k = tf.unstack(tf.shape(indices), num=2) indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n ret_flat = tf.unsorted_segment_sum( tf.reshape(values, [-1]), indices_flat, batch * n) return tf.reshape(ret_flat, [batch, n])
UnsortedSegmentSum on each row. Args: values: a `Tensor` with shape `[batch_size, k]`. indices: an integer `Tensor` with shape `[batch_size, k]`. n: an integer. Returns: A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
juraj-google-style
def write_plot(plot, filename, width=DEFAULT_PAGE_WIDTH, height=DEFAULT_PAGE_HEIGHT, unit=DEFAULT_PAGE_UNIT): svg = plot_to_svg(plot, width, height, unit) with open(filename, 'w') as outfile: outfile.write(svg)
Writes a plot SVG to a file. Args: plot (list): a list of layers to plot filename (str): the name of the file to write width (float): the width of the output SVG height (float): the height of the output SVG unit (str): the unit of the height and width
codesearchnet
def chunk_embedding_fn(chunk: Chunk) -> str: if chunk.embedding is None or chunk.embedding.dense_embedding is None: raise ValueError(f'Expected chunk to contain embedding. {chunk}') return '{' + ','.join((str(x) for x in chunk.embedding.dense_embedding)) + '}'
Convert embedding to PostgreSQL array string. Formats dense embedding as a PostgreSQL-compatible array string. Example: [1.0, 2.0] -> '{1.0,2.0}' Args: chunk: Input Chunk object. Returns: str: PostgreSQL array string representation of the embedding. Raises: ValueError: If chunk has no dense embedding.
github-repos
def take_shas_of_all_files(G, settings): global ERROR_FN sprint = settings["sprint"] error = settings["error"] ERROR_FN = error sha_dict = {} all_files = [] for target in G.nodes(data=True): sprint("About to take shas of files in target '{}'".format(target[0]), level="verbose") if 'dependencies' in target[1]: sprint("It has dependencies", level="verbose") deplist = [] for dep in target[1]['dependencies']: glist = glob.glob(dep) if glist: for oneglob in glist: deplist.append(oneglob) else: deplist.append(dep) target[1]['dependencies'] = list(deplist) for dep in target[1]['dependencies']: sprint(" - {}".format(dep), level="verbose") all_files.append(dep) if 'output' in target[1]: sprint("It has outputs", level="verbose") for out in acts.get_all_outputs(target[1]): sprint(" - {}".format(out), level="verbose") all_files.append(out) if len(all_files): sha_dict['files'] = {} extant_files = [] for item in all_files: if item not in extant_files and os.path.isfile(item): extant_files.append(item) pool = Pool() results = pool.map(get_sha, extant_files) pool.close() pool.join() for fn, sha in zip(extant_files, results): sha_dict['files'][fn] = {'sha': sha} return sha_dict sprint("No dependencies", level="verbose")
Takes sha1 hash of all dependencies and outputs of all targets Args: The graph we are going to build The settings dictionary Returns: A dictionary where the keys are the filenames and the value is the sha1 hash
juraj-google-style
def _update(self, item, feed_item): self._api().update(profileId=self.profile_id, body=item).execute()
Updates a new item in CM. Args: item: The CM object to update. feed_item: The feed item from the Bulkdozer feed representing the item to update.
github-repos
def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=1e-2): if not self.structure: return None sg = SpacegroupAnalyzer(self.structure) symmops = sg.get_point_group_operations(cartesian=cartesian) points = np.dot(kpoint, [m.rotation_matrix for m in symmops]) rm_list = [] for i in range(len(points) - 1): for j in range(i + 1, len(points)): if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol): rm_list.append(i) break return np.delete(points, rm_list, axis=0)
Returns a list of unique symmetrically equivalent k-points. Args: kpoint (1x3 array): coordinate of the k-point cartesian (bool): kpoint is in cartesian or fractional coordinates tol (float): tolerance below which coordinates are considered equal Returns: ([1x3 array] or None): if structure is not available returns None
juraj-google-style
def read_vocab(args, column_name): vocab_path = os.path.join(args.analysis, (feature_transforms.VOCAB_ANALYSIS_FILE % column_name)) if (not file_io.file_exists(vocab_path)): return [] (vocab, _) = feature_transforms.read_vocab_file(vocab_path) return vocab
Reads a vocab file if it exists. Args: args: command line flags column_name: name of column to that has a vocab file. Returns: List of vocab words or [] if the vocab file is not found.
codesearchnet
def GetTypeChecker(field): if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and field.type == _FieldDescriptor.TYPE_STRING): return UnicodeValueChecker() if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: if SupportsOpenEnums(field): return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32] else: return EnumValueChecker(field.enum_type) return _VALUE_CHECKERS[field.cpp_type]
Returns a type checker for a message field of the specified types. Args: field: FieldDescriptor object for this field. Returns: An instance of TypeChecker which can be used to verify the types of values assigned to a field of the specified type.
juraj-google-style
def preface_inference(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): self._preface_inference() return f(self, *args, **kwargs) return wrapper
Wraps given function with things to run before every inference call. Args: f: The method of `EnergyInference` to wrap. Returns: wrapper: The wrapped function.
github-repos
def get_drives(self, id_or_uri): uri = self._client.build_uri(id_or_uri=id_or_uri) + self.DRIVES_PATH return self._client.get(id_or_uri=uri)
Gets the list of drives allocated to this SAS logical JBOD. Args: id_or_uri: Can be either the SAS logical JBOD ID or the SAS logical JBOD URI. Returns: list: A list of Drives
juraj-google-style
def register_for_auto_class(cls, auto_class='AutoConfig'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class
Register this class with a given auto class. This should only be used for custom configurations as the ones in the library are already mapped with `AutoConfig`. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`): The auto class to register this new configuration with.
github-repos
def _get_token( request=None, allowed_auth_schemes=('OAuth', 'Bearer'), allowed_query_keys=('bearer_token', 'access_token')): allowed_auth_schemes = _listlike_guard( allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True) auth_header = os.environ.get('HTTP_AUTHORIZATION') if auth_header: for auth_scheme in allowed_auth_schemes: if auth_header.startswith(auth_scheme): return auth_header[len(auth_scheme) + 1:] return None if request: allowed_query_keys = _listlike_guard( allowed_query_keys, 'allowed_query_keys', iterable_only=True) for key in allowed_query_keys: token, _ = request.get_unrecognized_field_info(key) if token: return token
Get the auth token for this request. Auth token may be specified in either the Authorization header or as a query param (either access_token or bearer_token). We'll check in this order: 1. Authorization header. 2. bearer_token query param. 3. access_token query param. Args: request: The current request, or None. Returns: The token in the request or None.
juraj-google-style
def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None): x = layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)(x) if not use_bias: bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3 bn_name = None if name is None else name + '_bn' x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if activation is not None: ac_name = None if name is None else name + '_ac' x = layers.Activation(activation, name=ac_name)(x) return x
Utility function to apply conv + BN. Args: x: input tensor. filters: filters in `Conv2D`. kernel_size: kernel size as in `Conv2D`. strides: strides in `Conv2D`. padding: padding mode in `Conv2D`. activation: activation in `Conv2D`. use_bias: whether to use a bias in `Conv2D`. name: name of the ops; will become `name + '_ac'` for the activation and `name + '_bn'` for the batch norm layer. Returns: Output tensor after applying `Conv2D` and `BatchNormalization`.
github-repos
def return_selected_form_items(form_info): selected_keys = [] selected_names = [] for chosen in form_info: if chosen['choice']: selected_keys.append(chosen['key']) selected_names.append(chosen['name']) return (selected_keys, selected_names)
It returns chosen keys list from a given form. Args: form_info: serialized list of dict form data Returns: selected_keys(list): Chosen keys list selected_names(list): Chosen channels' or subscribers' names.
codesearchnet
def string_to_scopes(scopes): if (not scopes): return [] elif isinstance(scopes, six.string_types): return scopes.split(' ') else: return scopes
Converts stringifed scope value to a list. If scopes is a list then it is simply passed through. If scopes is an string then a list of each individual scope is returned. Args: scopes: a string or iterable of strings, the scopes. Returns: The scopes in a list.
codesearchnet
def WriteToPath(obj, filepath): with io.open(filepath, mode="w", encoding="utf-8") as filedesc: WriteToFile(obj, filedesc)
Serializes and writes given Python object to the specified YAML file. Args: obj: A Python object to serialize. filepath: A path to the file into which the object is to be written.
juraj-google-style
def isplaybook(obj): return (isinstance(obj, Iterable) and ((not isinstance(obj, string_types)) and (not isinstance(obj, Mapping))))
Inspects the object and returns if it is a playbook Args: obj (object): The object to be inspected by this function Returns: boolean: True if the object is a list and False if it is not
codesearchnet
def pil_image(self, fill_value=None, compute=True): channels, mode = self.finalize(fill_value) res = channels.transpose('y', 'x', 'bands') img = dask.delayed(PILImage.fromarray)(np.squeeze(res.data), mode) if compute: img = img.compute() return img
Return a PIL image from the current image. Args: fill_value (int or float): Value to use for NaN null values. See :meth:`~trollimage.xrimage.XRImage.finalize` for more info. compute (bool): Whether to return a fully computed PIL.Image object (True) or return a dask Delayed object representing the Image (False). This is True by default.
juraj-google-style
def simple_value(self, value: Any, *, name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, css_classes: Optional[Sequence[str]]=None, max_summary_len_for_str: int=80) -> Html: del name, parent, root_path def value_repr() -> str: if isinstance(value, str): if len(value) < max_summary_len_for_str: return repr(value) else: return value return utils.format(value, compact=False, verbose=False, hide_default_values=True, python_format=True, use_inferred=True, max_bytes_len=64) return Html.element('span', [Html.escape(value_repr)], css_classes=['simple-value', self.css_class_name(value), css_classes]).add_style('\n \n .simple-value {\n color: blue;\n display: inline-block;\n white-space: pre-wrap;\n padding: 0.2em;\n margin-top: 0.15em;\n }\n .simple-value.str {\n color: darkred;\n font-style: italic;\n }\n .simple-value.int, .simple-value.float {\n color: darkblue;\n }\n ')
Renders a simple value. Args: value: The value to render. name: The name of the value. parent: The parent of the value. root_path: The root path of the value. css_classes: CSS classes to add to the HTML element. max_summary_len_for_str: The maximum length of the string to display. Returns: The rendered HTML as the simple value.
github-repos
def delete_edge(self, ind_node, dep_node): graph = self.graph if dep_node not in graph.get(ind_node, []): raise KeyError( "No edge exists between %s and %s." % (ind_node, dep_node) ) graph[ind_node].remove(dep_node)
Delete an edge from the graph. Args: ind_node (str): The independent node to delete an edge from. dep_node (str): The dependent node that has a dependency on the ind_node. Raises: KeyError: Raised when the edge doesn't already exist.
juraj-google-style