code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, channel): self.GetAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/GetAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.GetAgentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.Agent.FromString, ) self.SearchAgents = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/SearchAgents', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.SearchAgentsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.SearchAgentsResponse.FromString, ) self.TrainAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/TrainAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.TrainAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.ExportAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/ExportAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.ExportAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.ImportAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/ImportAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.ImportAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.RestoreAgent = channel.unary_unary( '/google.cloud.dialogflow.v2.Agents/RestoreAgent', request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.RestoreAgentRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def add_spin_by_element(self, spins): for site in self.sites: new_sp = {} for sp, occu in site.species.items(): sym = sp.symbol oxi_state = getattr(sp, "oxi_state", None) new_sp[Specie(sym, oxidation_state=oxi_state, properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu site.species = new_sp
Add spin states to a structure. Args: spisn (dict): Dict of spins associated with elements or species, e.g. {"Ni":+5} or {"Ni2+":5}
juraj-google-style
def delete_dict_keys(dict_, key_list): invalid_keys = (set(key_list) - set(dict_.keys())) valid_keys = (set(key_list) - invalid_keys) for key in valid_keys: del dict_[key] return dict_
r""" Removes items from a dictionary inplace. Keys that do not exist are ignored. Args: dict_ (dict): dict like object with a __del__ attribute key_list (list): list of keys that specify the items to remove CommandLine: python -m utool.util_dict --test-delete_dict_keys Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2} >>> key_list = ['duck', 'bread', 'cider'] >>> delete_dict_keys(dict_, key_list) >>> result = ut.repr4(dict_, nl=False) >>> print(result) {'churches': 1, 'very small rocks': 2}
codesearchnet
def read_graph_op_creation_stack_trace(self, graph_op_creation_digest): return (graph_op_creation_digest.host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in graph_op_creation_digest.stack_frame_ids])
Read the stack trace of a given graph op creation object. Args: graph_op_creation_digest: The GraphOpCreationDigest object of interest. Returns: A tuple consisting of: 1. The host name. 2. The stack trace, as a list of (file_path, lineno, func) tuples.
github-repos
def describe_images(self, idaho_image_results): results = idaho_image_results['results'] results = [r for r in results if ('IDAHOImage' in r['type'])] self.logger.debug(('Describing %s IDAHO images.' % len(results))) catids = set([r['properties']['catalogID'] for r in results]) description = {} for catid in catids: description[catid] = {} description[catid]['parts'] = {} images = [r for r in results if (r['properties']['catalogID'] == catid)] for image in images: description[catid]['sensorPlatformName'] = image['properties']['sensorPlatformName'] part = int(image['properties']['vendorDatasetIdentifier'].split(':')[1][(- 3):]) color = image['properties']['colorInterpretation'] bucket = image['properties']['tileBucketName'] identifier = image['identifier'] boundstr = image['properties']['footprintWkt'] try: description[catid]['parts'][part] except: description[catid]['parts'][part] = {} description[catid]['parts'][part][color] = {} description[catid]['parts'][part][color]['id'] = identifier description[catid]['parts'][part][color]['bucket'] = bucket description[catid]['parts'][part][color]['boundstr'] = boundstr return description
Describe the result set of a catalog search for IDAHO images. Args: idaho_image_results (dict): Result set of catalog search. Returns: results (json): The full catalog-search response for IDAHO images corresponding to the given catID.
codesearchnet
def GetExtractionStatusUpdateCallback(self): if (self._mode == self.MODE_LINEAR): return self._PrintExtractionStatusUpdateLinear if (self._mode == self.MODE_WINDOW): return self._PrintExtractionStatusUpdateWindow return None
Retrieves the extraction status update callback function. Returns: function: status update callback function or None if not available.
codesearchnet
def _getFuncArgs(func): code = func.func_code Defaults = func.func_defaults nargs = code.co_argcount ArgNames = code.co_varnames[:nargs] Args = OrderedDict() argCount = len(ArgNames) defCount = (len(Defaults) if Defaults else 0) diff = (argCount - defCount) for i in range(0, diff): Args[ArgNames[i]] = {} for i in range(diff, argCount): Args[ArgNames[i]] = {'default': Defaults[(i - diff)]} return Args
r"""Gives the details on the args of the given func. Args: func (function): The function to get details on.
codesearchnet
def Expand(self, macro_ref_str): match = _MACRO_RE.match(macro_ref_str) if ((match is None) or (match.group(0) != macro_ref_str)): raise PDDMError(('Failed to parse macro reference: "%s"' % macro_ref_str)) if (match.group('name') not in self._macros): raise PDDMError(('No macro named "%s".' % match.group('name'))) return self._Expand(match, [], macro_ref_str)
Expands the macro reference. Args: macro_ref_str: String of a macro reference (i.e. foo(a, b)). Returns: The text from the expansion. Raises: PDDMError if there are any issues.
codesearchnet
def connect(self, *args, auto_reconnect=False, **kwargs): connection_info = { 'auto_reconnect': auto_reconnect, 'args': args, 'kwargs': kwargs, } self.connect_info['connection'] = connection_info if 'user' not in self.connect_info: raise Exception('`set_user_info` must be called before connecting to server.') connection = loop.create_connection(lambda: self, *args, **kwargs) asyncio.Task(connection)
Connects to the given server. Args: auto_reconnect (bool): Automatically reconnect on disconnection. Other arguments to this function are as usually supplied to :meth:`asyncio.BaseEventLoop.create_connection`.
juraj-google-style
def _CheckType(value, check_type, name, allow_none=True): if ((value is None) and allow_none): return if (not isinstance(value, check_type)): raise TypeError(("%s type doesn't match %s." % (name, check_type)))
Check that the type of an object is acceptable. Args: value: The object whose type is to be checked. check_type: The type that the object must be an instance of. name: Name of the object, to be placed in any error messages. allow_none: True if value can be None, false if not. Raises: TypeError: If value is not an acceptable type.
codesearchnet
def __init__(self, name, aliases=None, description=None, urls=None): super(FormatDefinition, self).__init__( name, aliases=aliases, description=description, urls=urls) self.metadata = {}
Initializes a format data type definition. Args: name (str): name. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. urls (Optional[list[str]]): URLs.
juraj-google-style
def generate_full_symmops(symmops, tol): UNIT = np.eye(4) generators = [op.affine_matrix for op in symmops if not np.allclose(op.affine_matrix, UNIT)] if not generators: return symmops else: full = list(generators) for g in full: for s in generators: op = np.dot(g, s) d = np.abs(full - op) < tol if not np.any(np.all(np.all(d, axis=2), axis=1)): full.append(op) d = np.abs(full - UNIT) < tol if not np.any(np.all(np.all(d, axis=2), axis=1)): full.append(UNIT) return [SymmOp(op) for op in full]
Recursive algorithm to permute through all possible combinations of the initially supplied symmetry operations to arrive at a complete set of operations mapping a single atom to all other equivalent atoms in the point group. This assumes that the initial number already uniquely identifies all operations. Args: symmops ([SymmOp]): Initial set of symmetry operations. Returns: Full set of symmetry operations.
juraj-google-style
def parse_args(args=None): parser = argparse.ArgumentParser(description='Main script to run LIVVkit.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, fromfile_prefix_chars='@') parser.add_argument('-o', '--out-dir', default=os.path.join(os.getcwd(), ('vv_' + time.strftime('%Y-%m-%d'))), help='Location to output the LIVVkit webpages.') parser.add_argument('-v', '--verify', nargs=2, default=None, help=' '.join(['Specify the locations of the test and bench bundle to', 'compare (respectively).'])) parser.add_argument('-V', '--validate', action='store', nargs='+', default=None, help=' '.join(['Specify the location of the configuration files for', 'validation tests.'])) parser.add_argument('-e', '--extension', action='store', nargs='+', default=None, dest='validate', metavar='EXTENSION', help=' '.join(['Specify the location of the configuration files for', 'LIVVkit extensions.'])) parser.add_argument('-p', '--publish', action='store_true', help=' '.join(['Also produce a publication quality copy of the figure in', 'the output directory (eps, 600d pi).'])) parser.add_argument('-s', '--serve', nargs='?', type=int, const=8000, help=' '.join(['Start a simple HTTP server for the output website specified', 'by OUT_DIR on port SERVE.'])) parser.add_argument('--version', action='version', version='LIVVkit {}'.format(livvkit.__version__), help="Show LIVVkit's version number and exit") return init(parser.parse_args(args))
Handles the parsing of options for LIVVkit's command line interface Args: args: The list of arguments, typically sys.argv[1:]
codesearchnet
def potcar_eatom_list_from_outcar( filename='OUTCAR' ): with open( filename ) as f: outcar = f.read() eatom_re = re.compile( "energy of atom\s+\d+\s+EATOM=\s*([-\d\.]+)" ) eatom = [ float( e ) for e in eatom_re.findall( outcar ) ] return eatom
Returns a list of EATOM values for the pseudopotentials used. Args: filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'. Returns: (List(Float)): A list of EATOM values, in the order they appear in the OUTCAR.
juraj-google-style
def get_subclasses(self, t): if isinstance(t, pytd.ClassType): subclasses = self.direct_subclasses.get(t, []) return sum((self.get_subclasses(pytd.ClassType(c.name, c)) for c in subclasses), [t]) else: raise NotImplementedError(f"Can't extract subclasses from {type(t)}")
Get all classes derived from this type. Args: t: A pytd.Type Returns: A list of pytd.Type.
github-repos
def __parameter_descriptor(self, param): descriptor = {} param_type, param_format = self.__field_to_parameter_type_and_format(param) if param.required: descriptor['required'] = True descriptor['type'] = param_type if param_format: descriptor['format'] = param_format default = self.__parameter_default(param) if default is not None: descriptor['default'] = default if param.repeated: descriptor['repeated'] = True enum_descriptor = self.__parameter_enum(param) if enum_descriptor is not None: descriptor['enum'] = enum_descriptor descriptor['enumDescriptions'] = [''] * len(enum_descriptor) return descriptor
Creates descriptor for a parameter. Args: param: The parameter to be described. Returns: Dictionary containing a descriptor for the parameter.
juraj-google-style
def CleanClientVersions(clients=None, dry_run=True, token=None): if (not clients): index = client_index.CreateClientIndex(token=token) clients = index.LookupClients(['.']) clients.sort() with data_store.DB.GetMutationPool() as pool: logging.info('checking %d clients', len(clients)) client_infos = data_store.DB.MultiResolvePrefix(clients, 'aff4:type', data_store.DB.ALL_TIMESTAMPS) for (client, type_list) in client_infos: logging.info('%s: has %d versions', client, len(type_list)) cleared = 0 kept = 1 last_kept = type_list[0][2] for (_, _, ts) in type_list[1:]: if ((last_kept - ts) > ((60 * 60) * 1000000)): last_kept = ts kept += 1 else: if (not dry_run): pool.DeleteAttributes(client, ['aff4:type'], start=ts, end=ts) cleared += 1 if (pool.Size() > 10000): pool.Flush() logging.info('%s: kept %d and cleared %d', client, kept, cleared)
A script to remove excessive client versions. Especially when a client is heavily cloned, we sometimes write an excessive number of versions of it. Since these version all go into the same database row and are displayed as a dropdown list in the adminui, it is sometimes necessary to clear them out. This deletes version from clients so that we have at most one version per hour. Args: clients: A list of ClientURN, if empty cleans all clients. dry_run: whether this is a dry run token: datastore token.
codesearchnet
def create_new_username(ip, devicetype=None, timeout=_DEFAULT_TIMEOUT): res = Resource(_api_url(ip), timeout) prompt = 'Press the Bridge button, then press Return: ' if (sys.version_info.major == 2): _ = raw_input(prompt) else: _ = input(prompt) if (devicetype is None): devicetype = 'qhue response = res(devicetype=devicetype, http_method='post') return response[0]['success']['username']
Interactive helper function to generate a new anonymous username. Args: ip: ip address of the bridge devicetype (optional): devicetype to register with the bridge. If unprovided, generates a device type based on the local hostname. timeout (optional, default=5): request timeout in seconds Raises: QhueException if something went wrong with username generation (for example, if the bridge button wasn't pressed).
codesearchnet
def load_extra(cls, filename): try: with open(filename, 'rb') as configuration_file: cls.load_extra_data(configuration_file.read()) sys.stderr.write("Config successfully loaded from {0:s}\n".format( filename)) return True except IOError: return False
Loads extra JSON configuration parameters from a file on the filesystem. Args: filename: str, the filename to open. Returns: bool: True if the extra configuration parameters were read.
juraj-google-style
async def _pb_request(self, endpoint, request_pb, response_pb): logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) res = await self._base_request( 'https: 'application/x-protobuf', 'proto', request_pb.SerializeToString() ) try: response_pb.ParseFromString(base64.b64decode(res.body)) except binascii.Error as e: raise exceptions.NetworkError( 'Failed to decode base64 response: {}'.format(e) ) except google.protobuf.message.DecodeError as e: raise exceptions.NetworkError( 'Failed to decode Protocol Buffer response: {}'.format(e) ) logger.debug('Received Protocol Buffer response:\n%s', response_pb) status = response_pb.response_header.status if status != hangouts_pb2.RESPONSE_STATUS_OK: description = response_pb.response_header.error_description raise exceptions.NetworkError( 'Request failed with status {}: \'{}\'' .format(status, description) )
Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails.
juraj-google-style
def set_contrast(self, contrast): self._contrast = contrast self.x_spread = 2 * (1.0 - contrast) self.y_spread = 2.0 - 2 * (1.0 - contrast) self._build_cdict()
Adjusts the image contrast. Contrast refers to the rate of change of color with color level. At low contrast, color changes gradually over many intensity levels, while at high contrast it can change rapidly within a few levels Args: contrast: float A number between 0 and 1. Note that upon initialization the colormap has a default contrast value of 0.5. Returns: void
juraj-google-style
def delete(self): if self.exists(): try: self._api.objects_delete(self._bucket, self._key) except Exception as e: raise e
Deletes this item from its bucket. Raises: Exception if there was an error deleting the item.
codesearchnet
def set_requestable(self, requestable=True): self.data['is_requestdata_type'] = requestable if requestable: self.data['private'] = False
Set the dataset to be of type requestable or not Args: requestable (bool): Set whether dataset is requestable. Defaults to True. Returns: None
codesearchnet
def _write_submit_script(self, script_string, script_filename): try: with open(script_filename, 'w') as f: f.write(script_string) except KeyError as e: logger.error('Missing keys for submit script : %s', e) raise ep_error.SchedulerMissingArgs(e.args, self.label) except IOError as e: logger.error('Failed writing to submit script: %s', script_filename) raise ep_error.ScriptPathError(script_filename, e) return True
Load the template string with config values and write the generated submit script to a submit script file. Args: - template_string (string) : The template string to be used for the writing submit script - script_filename (string) : Name of the submit script Returns: - True: on success Raises: SchedulerMissingArgs : If template is missing args ScriptPathError : Unable to write submit script out
codesearchnet
def post_create_app(cls, app, **settings): super(MarshmallowAwareApp, cls).post_create_app(app, **settings) marsh.init_app(app) return app
Automatically register and init the Flask Marshmallow extension. Args: app (flask.Flask): The application instance in which to initialize Flask Marshmallow upon. Kwargs: settings (dict): The settings passed to this method from the parent app. Returns: flask.Flask: The Flask application that was passed in.
juraj-google-style
def Map(self, function): new_table = self.__class__() new_table._table = [self.header] for row in self: filtered_row = function(row) if filtered_row: new_table.Append(filtered_row) return new_table
Applies the function to every row in the table. Args: function: A function applied to each row. Returns: A new TextTable() Raises: TableError: When transform is not invalid row entry. The transform must be compatible with Append().
juraj-google-style
def flag_is_related(self, flag): same_worksheet = (flag.worksheet == self.worksheet) if isinstance(flag.location, (tuple, list)): return ((flag.location[0] >= self.start[0]) and (flag.location[0] < self.end[0]) and (flag.location[1] >= self.start[1]) and (flag.location[1] < self.end[1]) and same_worksheet) else: return same_worksheet
Checks for relationship between a flag and this block. Returns: True if the flag is related to this block.
codesearchnet
def _log_epoch_metrics(self, epoch, logs): if not logs: return train_logs = {k: v for k, v in logs.items() if not k.startswith('val_')} val_logs = {k: v for k, v in logs.items() if k.startswith('val_')} train_logs = self._collect_learning_rate(train_logs) if self.write_steps_per_second: train_logs['steps_per_second'] = self._compute_steps_per_second() with summary_ops_v2.record_if(True): if train_logs: with self._train_writer.as_default(): for name, value in train_logs.items(): summary_ops_v2.scalar('epoch_' + name, value, step=epoch) if val_logs: with self._val_writer.as_default(): for name, value in val_logs.items(): name = name[4:] summary_ops_v2.scalar('epoch_' + name, value, step=epoch)
Writes epoch metrics out as scalar summaries. Args: epoch: Int. The global step to use for TensorBoard. logs: Dict. Keys are scalar summary names, values are scalars.
github-repos
def infer_edge(tpm, a, b, contexts): def a_in_context(context): 'Given a context C(A), return the states of the full system with A\n OFF and ON, respectively.\n ' a_off = ((context[:a] + OFF) + context[a:]) a_on = ((context[:a] + ON) + context[a:]) return (a_off, a_on) def a_affects_b_in_context(context): 'Return ``True`` if A has an effect on B, given a context.' (a_off, a_on) = a_in_context(context) return (tpm[a_off][b] != tpm[a_on][b]) return any((a_affects_b_in_context(context) for context in contexts))
Infer the presence or absence of an edge from node A to node B. Let |S| be the set of all nodes in a network. Let |A' = S - {A}|. We call the state of |A'| the context |C| of |A|. There is an edge from |A| to |B| if there exists any context |C(A)| such that |Pr(B | C(A), A=0) != Pr(B | C(A), A=1)|. Args: tpm (np.ndarray): The TPM in state-by-node, multidimensional form. a (int): The index of the putative source node. b (int): The index of the putative sink node. Returns: bool: ``True`` if the edge |A -> B| exists, ``False`` otherwise.
codesearchnet
def request_file(link, outfile, force_rerun_flag=False): if force_rerun(flag=force_rerun_flag, outfile=outfile): req = requests.get(link) if (req.status_code == 200): with open(outfile, 'w') as f: f.write(req.text) log.debug('Loaded and saved {} to {}'.format(link, outfile)) else: log.error('{}: request error {}'.format(link, req.status_code)) return outfile
Download a file given a URL if the outfile does not exist already. Args: link (str): Link to download file. outfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does exist, unless force_rerun_flag is True. force_rerun_flag (bool): Flag to force re-downloading of the file if it exists already. Returns: str: Path to downloaded file.
codesearchnet
def add_dataset(self, dataset, datasets_to_check=None): showcase_dataset = self._get_showcase_dataset_dict(dataset) if (datasets_to_check is None): datasets_to_check = self.get_datasets() for dataset in datasets_to_check: if (showcase_dataset['package_id'] == dataset['id']): return False self._write_to_hdx('associate', showcase_dataset, 'package_id') return True
Add a dataset Args: dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if the dataset was added, False if already present
codesearchnet
def has_valid_soma(data_wrapper): try: make_soma(data_wrapper.soma_points()) return CheckResult(True) except SomaError: return CheckResult(False)
Check if a data block has a valid soma Returns: CheckResult with result
codesearchnet
def get_job_results(self, job_resource_name: str) -> List[TrialResult]: response = self.service.projects().programs().jobs().getResult( parent=job_resource_name).execute() trial_results = [] for sweep_result in response['result']['sweepResults']: sweep_repetitions = sweep_result['repetitions'] key_sizes = [(m['key'], len(m['qubits'])) for m in sweep_result['measurementKeys']] for result in sweep_result['parameterizedResults']: data = base64.standard_b64decode(result['measurementResults']) measurements = unpack_results(data, sweep_repetitions, key_sizes) trial_results.append(TrialResult( params=ParamResolver( result.get('params', {}).get('assignments', {})), repetitions=sweep_repetitions, measurements=measurements)) return trial_results
Returns the actual results (not metadata) of a completed job. Params: job_resource_name: A string of the form `projects/project_id/programs/program_id/jobs/job_id`. Returns: An iterable over the TrialResult, one per parameter in the parameter sweep.
juraj-google-style
def up(name, debug=False): if debug: env.ensemble_debug = True filenames_to_try = [ name, '%s.yml' % name, '%s.yaml' % name, ] for filename in filenames_to_try: if os.path.exists(filename): with open(filename, 'r') as f: config = yaml.load(f) break else: abort('Ensemble manifest not found: %s' % name) uncache() try: do_up(config) except exceptions.ConfigException, e: abort('Config error: ' + str(e))
Create servers and containers as required to meet the configuration specified in _name_. Args: * name: The name of the yaml config file (you can omit the .yml extension for convenience) Example: fab ensemble.up:wordpress
juraj-google-style
def AddPath(self, path): node = self._root for name in path.split('.'): if (name not in node): node[name] = {} elif (not node[name]): return node = node[name] node.clear()
Adds a field path into the tree. If the field path to add is a sub-path of an existing field path in the tree (i.e., a leaf node), it means the tree already matches the given path so nothing will be added to the tree. If the path matches an existing non-leaf node in the tree, that non-leaf node will be turned into a leaf node with all its children removed because the path matches all the node's children. Otherwise, a new path will be added. Args: path: The field path to add.
codesearchnet
def __init__( self, title, energy, stoichiometry ): self.title = title self.energy = energy self.stoichiometry = Counter( stoichiometry )
Initialise a Calculation object Args: title (Str): The title string for this calculation. energy (Float): Final energy in eV. stoichiometry (Dict{Str:Int}): A dict desribing the calculation stoichiometry, e.g. { 'Ti': 1, 'O': 2 } Returns: None
juraj-google-style
def update_task_ids(self, encoder_vocab_size): for (idx, task) in enumerate(self.task_list): task.set_task_id((idx + encoder_vocab_size)) tf.logging.info(('Task %d (%s) has id %d.' % (idx, task.name, task.task_id)))
Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset.
codesearchnet
def multiple_replace(string, replacements): pattern = re.compile('|'.join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL) return pattern.sub((lambda x: replacements[x.group(0)]), string)
Simultaneously replace multiple strigns in a string Args: string (str): Input string replacements (Dict[str,str]): Replacements dictionary Returns: str: String with replacements
codesearchnet
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True): if not graph and (not context.executing_eagerly()): graph = ops.get_default_graph() op_log = merge_default_with_oplog(graph, op_log, run_meta, add_trace) with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log: log.write(op_log.SerializeToString())
Log provided 'op_log', and add additional model information below. The API also assigns ops in tf.compat.v1.trainable_variables() an op type called '_trainable_variables'. The API also logs 'flops' statistics for ops with op.RegisterStatistics() defined. flops calculation depends on Tensor shapes defined in 'graph', which might not be complete. 'run_meta', if provided, completes the shape information with best effort. Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. log_dir: directory to write the log file. op_log: (Optional) OpLogProto proto to be written. If not provided, an new one is created. run_meta: (Optional) RunMetadata proto that helps flops computation using run time shape information. add_trace: Whether to add python code trace information. Used to support "code" view.
github-repos
def build_deps(self): build_requires = self.metadata['setup_requires'] if self.has_test_suite: build_requires += (self.metadata['tests_require'] + self.metadata['install_requires']) if ('setuptools' not in build_requires): build_requires.append('setuptools') return sorted(self.name_convert_deps_list(deps_from_pyp_format(build_requires, runtime=False)))
Same as runtime_deps, but build dependencies. Test and install requires are included if package contains test suite to prevent %check phase crashes because of missing dependencies Returns: list of build dependencies of the package
codesearchnet
def unpack(self, gpsd_socket_response): try: fresh_data = json.loads(gpsd_socket_response) class_name = fresh_data.pop('class') for key in self.packages[class_name]: if class_name == 'GST' and key == 'lat' or 'lon': setattr(self, 'sd' + key, fresh_data.get(key, 'n/a')) setattr(self, key, fresh_data.get(key, 'n/a')) except AttributeError: sys.stderr.write('There is an unexpected exception unpacking JSON object') return except (ValueError, KeyError) as error: sys.stderr.write(str(error)) return
Sets new socket data as DataStream attributes in those initialised dictionaries Arguments: gpsd_socket_response (json object): Provides: self attributes, e.g., self.lat, self.gdop Raises: AttributeError: 'str' object has no attribute 'keys' when the device falls out of the system ValueError, KeyError: most likely extra, or mangled JSON data, should not happen, but that applies to a lot of things.
juraj-google-style
def DistFitDataset(Dat): (r,c) = Dat.shape Poiss = np.zeros(r) Norm = np.zeros(r) LogNorm = np.zeros(r) for i in range(r): temp = GetDistFitError(Dat[i]) Poiss[i] = temp['poiss'] Norm[i] = temp['norm'] LogNorm[i] = temp['lognorm'] d = {} d['poiss'] = Poiss d['norm'] = Norm d['lognorm'] = LogNorm return d
Given a data matrix, this returns the per-gene fit error for the Poisson, Normal, and Log-Normal distributions. Args: Dat (array): numpy array with shape (genes, cells) Returns: d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.
juraj-google-style
def read_config(config_path=CONFIG_PATH): if (not os.path.isfile(config_path)): raise IOError(('No config file found at %s' % config_path)) config_parser = configparser.ConfigParser() config_parser.read(config_path) config = _config_parser_to_defaultdict(config_parser) return config
Read the config information from the config file. Args: config_path (str): Relative path to the email config file. Returns: defaultdict: A defaultdict with the config information. Raises: IOError
codesearchnet
def ensure_app_cache_dir(appname, *args): from ubelt import util_path dpath = get_app_cache_dir(appname, *args) util_path.ensuredir(dpath) return dpath
Calls `get_app_cache_dir` but ensures the directory exists. Args: appname (str): the name of the application *args: any other subdirectories may be specified SeeAlso: get_app_cache_dir Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> assert exists(dpath)
juraj-google-style
def _stop_profiler(self, save=True): if not self._profiler_started: return try: backend.tensorboard.stop_trace(save=save) except Exception as e: logging.error('Failed to stop profiler: %s', e) finally: self._profiler_started = False
Stops the profiler if currently active. Args: save: Whether to save the profiler results to TensorBoard.
github-repos
def write_index_and_rst_files(self, overwrite: bool = False, mock: bool = False) -> None: for f in self.files_to_index: if isinstance(f, FileToAutodocument): f.write_rst( prefix=self.rst_prefix, suffix=self.rst_suffix, heading_underline_char=self.source_rst_heading_underline_char, overwrite=overwrite, mock=mock, ) elif isinstance(f, AutodocIndex): f.write_index_and_rst_files(overwrite=overwrite, mock=mock) else: fail("Unknown thing in files_to_index: {!r}".format(f)) self.write_index(overwrite=overwrite, mock=mock)
Writes both the individual RST files and the index. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't
juraj-google-style
def __and__(self, other: 'TensorFluent') -> 'TensorFluent': return self._binary_op(self, other, tf.logical_and, tf.bool)
Returns a TensorFluent for the and logical operator. Args: self: The first operand. other: The second operand. Returns: A TensorFluent wrapping the operator's output.
juraj-google-style
def _handle_location(self, location): if (not isinstance(location, ElementTree.Element)): element = self.find(location) if (element is None): raise ValueError('Invalid path!') else: element = location return element
Return an element located at location with flexible args. Args: location: String xpath to use in an Element.find search OR an Element (which is simply returned). Returns: The found Element. Raises: ValueError if the location is a string that results in a find of None.
codesearchnet
def create(self, value): if (self._optional and ((value is None) or (len(value) == 0))): return None if hasattr(self._type, 'resource_type'): if (not isinstance(value, dict)): raise ValueError('Resources must be specified as a dict of title to parameters') if ((not self._many) and (len(value) > 1)): raise ValueError('Only one resource can be provided for this TroposphereType variable') result = [self._type.from_dict(title, v) for (title, v) in value.items()] elif self._many: result = [self._type.from_dict(None, v) for v in value] elif (not isinstance(value, dict)): raise ValueError('TroposphereType for a single non-resourcetype must be specified as a dict of parameters') else: result = [self._type.from_dict(None, value)] if self._validate: for v in result: v._validate_props() return (result[0] if (not self._many) else result)
Create the troposphere type from the value. Args: value (Union[dict, list]): A dictionary or list of dictionaries (see class documentation for details) to use as parameters to create the Troposphere type instance. Each dictionary will be passed to the `from_dict` method of the type. Returns: Union[list, type]: Returns the value converted to the troposphere type
codesearchnet
def click_exists(self, timeout=0): e = self.get(timeout=timeout, raise_error=False) if (e is None): return False e.click() return True
Wait element and perform click Args: timeout (float): timeout for wait Returns: bool: if successfully clicked
codesearchnet
def has_arg(fn, arg_name): if sys.version_info < (3,): if isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType): arg_spec = inspect.getargspec(fn) else: try: arg_spec = inspect.getargspec(fn.__call__) except AttributeError: return False return (arg_name in arg_spec.args) elif sys.version_info < (3, 6): arg_spec = inspect.getfullargspec(fn) return (arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs) else: try: signature = inspect.signature(fn) except ValueError: signature = inspect.signature(fn.__call__) parameter = signature.parameters.get(arg_name) if parameter is None: return False return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY))
Checks if a callable accepts a given keyword argument. Args: fn: callable to inspect arg_name: string, keyword argument name to check Returns: bool, whether `fn` accepts a `arg_name` keyword argument.
juraj-google-style
def read_from_hdx(identifier, configuration=None): user = User(configuration=configuration) result = user._load_from_hdx('user', identifier) if result: return user return None
Reads the user given by identifier from HDX and returns User object Args: identifier (str): Identifier of user configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[User]: User object if successful read, None if not
juraj-google-style
def __init__(self, boundaries, values, name=None): super(PiecewiseConstantDecay, self).__init__() if len(boundaries) != len(values) - 1: raise ValueError('The length of boundaries should be 1 less than the length of values') self.boundaries = boundaries self.values = values self.name = name
Piecewise constant from boundaries and interval values. Args: boundaries: A list of `Tensor`s or `int`s or `float`s with strictly increasing entries, and with all elements having the same type as the optimizer step. values: A list of `Tensor`s or `float`s or `int`s that specifies the values for the intervals defined by `boundaries`. It should have one more element than `boundaries`, and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Raises: ValueError: if the number of elements in the lists do not match.
github-repos
def indentjoin(strlist, indent='\n ', suffix=''): r indent_ = indent strlist = list(strlist) if len(strlist) == 0: return '' return indent_ + indent_.join([six.text_type(str_) + suffix for str_ in strlist])
r""" Convineince indentjoin similar to '\n '.join(strlist) but indent is also prefixed Args: strlist (?): indent (str): suffix (str): Returns: str: joined list
juraj-google-style
def get_equiv_transformations(self, transformation_sets, film_vectors, substrate_vectors): for (film_transformations, substrate_transformations) in transformation_sets: films = [reduce_vectors(*np.dot(f, film_vectors)) for f in film_transformations] substrates = [reduce_vectors(*np.dot(s, substrate_vectors)) for s in substrate_transformations] for (f, s) in product(films, substrates): if self.is_same_vectors(f, s): (yield [f, s])
Applies the transformation_sets to the film and substrate vectors to generate super-lattices and checks if they matches. Returns all matching vectors sets. Args: transformation_sets(array): an array of transformation sets: each transformation set is an array with the (i,j) indicating the area multipes of the film and subtrate it corresponds to, an array with all possible transformations for the film area multiple i and another array for the substrate area multiple j. film_vectors(array): film vectors to generate super lattices substrate_vectors(array): substrate vectors to generate super lattices
codesearchnet
def classify_coupling(coupling): lower, upper = coupling if lower is None and upper is None: return CouplingClass.Uncoupled elif lower is None or upper is None: return CouplingClass.DirectionalReverse elif lower == 0.0 and upper == 0.0: return CouplingClass.Inconsistent elif lower <= 0.0 and upper >= 0.0: return CouplingClass.DirectionalForward elif abs(lower - upper) < 1e-6: return CouplingClass.Full else: return CouplingClass.Partial
Return a constant indicating the type of coupling. Depending on the type of coupling, one of the constants from :class:`.CouplingClass` is returned. Args: coupling: Tuple of minimum and maximum flux ratio
juraj-google-style
def _determine_and_instrument_traced_tensors(self, graph_order, ops_in_exec_path, tensor_trace_points, report_handler): traced_tensors = [] checkpoint_operations = set([tensor.op for tensor, _ in tensor_trace_points]) for op_id, op in enumerate(graph_order.operations): if checkpoint_operations and op not in checkpoint_operations: continue if self._skip_op(op_id, op, ops_in_exec_path, report_handler): continue for i in range(len(op.outputs)): out_tensor = op.outputs[i] if not self._skip_tensor(op_id, out_tensor, report_handler): traced_tensors.append(out_tensor) return traced_tensors
Determines the tensors to trace and instruments the trace details. Args: graph_order: graph_order tuple containing graph (tf.graph), operations (list of operations), op_to_idx (op id mapping), (tensors) list of tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether there is a cycle in the graph), topological_order_or_cycle (list of ops in topological order or list of ops creating a cycle). ops_in_exec_path: Set of ops in the execution path. tensor_trace_points: Collection of programatic tensor trace points. report_handler: An instance of tensor_tracer_report.TTReportHandle. Returns: List of tensors to be traced.
github-repos
def stats(self): self.raise_error_if_not_open() per_key_stats = self.stats_per_key() return stats.DataStats.concatenate(per_key_stats.values())
Return statistics calculated overall features in the container. Note: The feature container has to be opened in advance. Returns: DataStats: Statistics overall data points of all features.
codesearchnet
class PatchTSMixerForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None prediction_outputs: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
Output type of [`PatchTSMixerForPreTrainingOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, patch_length)`): Prediction output from the pretrain head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss
github-repos
def activate(fn=None): if (not isfunction(fn)): _engine.activate() return None if ((iscoroutinefunction is not None) and iscoroutinefunction(fn)): return activate_async(fn, _engine) @functools.wraps(fn) def wrapper(*args, **kw): _engine.activate() try: fn(*args, **kw) finally: _engine.disable() return wrapper
Enables the HTTP traffic interceptors. This function can be used as decorator. Arguments: fn (function|coroutinefunction): Optional function argument if used as decorator. Returns: function: decorator wrapper function, only if called as decorator, otherwise ``None``. Example:: # Standard use case pook.activate() pook.mock('server.com/foo').reply(404) res = requests.get('server.com/foo') assert res.status_code == 404 pook.disable() # Decorator use case @pook.activate def test_request(): pook.mock('server.com/foo').reply(404) res = requests.get('server.com/foo') assert res.status_code == 404
codesearchnet
def repertoire(self, direction, mechanism, purview): if (direction == Direction.CAUSE): return self.cause_repertoire(mechanism, purview) elif (direction == Direction.EFFECT): return self.effect_repertoire(mechanism, purview) return validate.direction(direction)
Return the cause or effect repertoire based on a direction. Args: direction (Direction): |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism for which to calculate the repertoire. purview (tuple[int]): The purview over which to calculate the repertoire. Returns: np.ndarray: The cause or effect repertoire of the mechanism over the purview. Raises: ValueError: If ``direction`` is invalid.
codesearchnet
def build_docs(output_dir, code_url_prefix, search_hints): output_dir = pathlib.Path(output_dir) site_path = pathlib.Path('/', FLAGS.site_path) doc_controls.set_deprecated(tf.compat.v1) try: doc_controls.set_deprecated(tf.estimator) except AttributeError: pass doc_controls.set_deprecated(tf.feature_column) doc_controls.set_deprecated(tf.keras.preprocessing) doc_controls.set_custom_page_builder_cls(tf.raw_ops, RawOpsPageInfo) for name, obj in tf_inspect.getmembers(tf.raw_ops): if not name.startswith('_'): doc_controls.hide_from_search(obj) for cls in [tf.Module, tf.keras.layers.Layer, tf.keras.optimizers.Optimizer]: doc_controls.decorate_all_class_attributes(decorator=doc_controls.do_not_doc_in_subclasses, cls=cls, skip=['__init__']) do_not_document = ['tf.__internal__', 'tf.keras.__internal__', 'tf.keras.wrappers', 'tf.__operators__', 'tf.tools', 'tf.compat.v1.pywrap_tensorflow', 'tf.pywrap_tensorflow', 'tf.flags', 'tf.batch_mat_mul_v3', 'tf.sparse_segment_sum_grad'] for path in do_not_document: item = tf for part in path.split('.')[1:]: item = getattr(item, part, None) if item is None: continue doc_controls.do_not_generate_docs(item) base_dirs, code_url_prefixes = base_dir.get_base_dirs_and_prefixes(code_url_prefix) doc_generator = generate_lib.DocGenerator(root_title='TensorFlow 2', py_modules=[('tf', tf)], base_dir=base_dirs, search_hints=search_hints, code_url_prefix=code_url_prefixes, site_path=site_path, visitor_cls=TfExportAwareVisitor, private_map=_PRIVATE_MAP, extra_docs=_EXTRA_DOCS, callbacks=base_dir.get_callbacks()) doc_generator.build(output_dir) @contextlib.contextmanager def edit_yaml_file(path): content = yaml.safe_load(path.read_text()) yield content with path.open('w') as f: yaml.dump(content, f, default_flow_style=False) toc_path = output_dir / 'tf/_toc.yaml' with edit_yaml_file(toc_path) as toc: toc['toc'][0]['section'][0]['path'] = str(site_path / 'tf_overview') redirects_path = output_dir / 'tf/_redirects.yaml' with edit_yaml_file(redirects_path) as redirects: redirects['redirects'].append({'from': str(site_path / 'tf_overview'), 'to': str(site_path / 'tf')}) num_files = len(list(output_dir.rglob('*'))) if num_files < MIN_NUM_FILES_EXPECTED: raise ValueError(f'The TensorFlow api should be more than {MIN_NUM_FILES_EXPECTED} files(found {num_files}).')
Build api docs for tensorflow v2. Args: output_dir: A string path, where to put the files. code_url_prefix: prefix for "Defined in" links. search_hints: Bool. Include meta-data search hints at the top of each file.
github-repos
def macro_state(self, micro_state): assert len(micro_state) == len(self.micro_indices) reindexed = self.reindex() return utils.state_of(reindexed.output_indices, micro_state)
Compute the macro-state of this blackbox. This is just the state of the blackbox's output indices. Args: micro_state (tuple[int]): The state of the micro-elements in the blackbox. Returns: tuple[int]: The state of the output indices.
juraj-google-style
def FixedUnPooling(x, shape, unpool_mat=None, data_format='channels_last'): data_format = get_data_format(data_format, keras_mode=False) shape = shape2d(shape) output_shape = StaticDynamicShape(x) output_shape.apply((1 if (data_format == 'NHWC') else 2), (lambda x: (x * shape[0]))) output_shape.apply((2 if (data_format == 'NHWC') else 3), (lambda x: (x * shape[1]))) if ((shape[0] == 2) and (shape[1] == 2) and (unpool_mat is None) and (data_format == 'NHWC')): ret = UnPooling2x2ZeroFilled(x) else: if (unpool_mat is None): mat = np.zeros(shape, dtype='float32') mat[0][0] = 1 unpool_mat = tf.constant(mat, name='unpool_mat') elif isinstance(unpool_mat, np.ndarray): unpool_mat = tf.constant(unpool_mat, name='unpool_mat') assert (unpool_mat.shape.as_list() == list(shape)) if (data_format == 'NHWC'): x = tf.transpose(x, [0, 3, 1, 2]) x = tf.expand_dims(x, (- 1)) mat = tf.expand_dims(unpool_mat, 0) ret = tf.tensordot(x, mat, axes=1) if (data_format == 'NHWC'): ret = tf.transpose(ret, [0, 2, 4, 3, 5, 1]) else: ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5]) shape3_dyn = [output_shape.get_dynamic(k) for k in range(1, 4)] ret = tf.reshape(ret, tf.stack(([(- 1)] + shape3_dyn))) ret.set_shape(tf.TensorShape(output_shape.get_static())) return ret
Unpool the input with a fixed matrix to perform kronecker product with. Args: x (tf.Tensor): a 4D image tensor shape: int or (h, w) tuple unpool_mat: a tf.Tensor or np.ndarray 2D matrix with size=shape. If is None, will use a matrix with 1 at top-left corner. Returns: tf.Tensor: a 4D image tensor.
codesearchnet
def bounding_box(locations): x_values = list(map(itemgetter(0), locations)) x_min, x_max = min(x_values), max(x_values) y_values = list(map(itemgetter(1), locations)) y_min, y_max = min(y_values), max(y_values) return Rect(x_min, y_min, x_max - x_min, y_max - y_min)
Computes the bounding box of an iterable of (x, y) coordinates. Args: locations: iterable of (x, y) tuples. Returns: `Rect`: Coordinates of the bounding box.
juraj-google-style
def _recommend_command(command, description, indent=2, create_link=False): indent_str = ' ' * indent if create_link: font_attr = [debugger_cli_common.MenuItem('', command), 'bold'] else: font_attr = 'bold' lines = [RL(indent_str) + RL(command, font_attr) + ':', indent_str + ' ' + description] return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
Generate a RichTextLines object that describes a recommended command. Args: command: (str) The command to recommend. description: (str) A description of what the command does. indent: (int) How many spaces to indent in the beginning. create_link: (bool) Whether a command link is to be applied to the command string. Returns: (RichTextLines) Formatted text (with font attributes) for recommending the command.
github-repos
def ssh(container, cmd='', user='root', password='root'): ip = get_ip(container) ssh_cmd = ("sshpass -p '%s' ssh -A -t -o StrictHostKeyChecking=no '%s'@%s" % (password, user, ip)) local(('ssh -A -t -o StrictHostKeyChecking=no -i "%s" %s@%s %s %s' % (env.key_filename, env.user, env.host, ssh_cmd, cmd)))
SSH into a running container, using the host as a jump host. This requires the container to have a running sshd process. Args: * container: Container name or ID * cmd='': Command to run in the container * user='root': SSH username * password='root': SSH password
codesearchnet
def AddSymbolicLink(self, path, linked_path): if self.file_system.FileEntryExistsByPath(path): raise ValueError('Path: {0:s} already set.'.format(path)) self._AddParentDirectories(path) self.file_system.AddFileEntry(path, file_entry_type=definitions.FILE_ENTRY_TYPE_LINK, link_data=linked_path)
Adds a symbolic link to the fake file system. Args: path (str): path of the symbolic link within the fake file system. linked_path (str): path that is linked. Raises: ValueError: if the path is already set.
codesearchnet
def _compile_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent: etype2compiler = {'constant': self._compile_constant_expression, 'pvar': self._compile_pvariable_expression, 'randomvar': self._compile_random_variable_expression, 'arithmetic': self._compile_arithmetic_expression, 'boolean': self._compile_boolean_expression, 'relational': self._compile_relational_expression, 'func': self._compile_function_expression, 'control': self._compile_control_flow_expression, 'aggregation': self._compile_aggregation_expression} etype = expr.etype if (etype[0] not in etype2compiler): raise ValueError('Expression type unknown: {}'.format(etype)) with self.graph.as_default(): compiler_fn = etype2compiler[etype[0]] return compiler_fn(expr, scope, batch_size, noise)
Compile the expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent.
codesearchnet
def from_path(cls, path, format=None): name = None data = None if (format is None): formats = (FileFormat.py, FileFormat.yaml) else: formats = (format,) try: mode = os.stat(path).st_mode except (IOError, OSError): raise PackageMetadataError(('Path %r did not exist, or was not accessible' % path)) is_dir = stat.S_ISDIR(mode) for name_ in config.plugins.package_repository.filesystem.package_filenames: for format_ in formats: if is_dir: filepath = os.path.join(path, ('%s.%s' % (name_, format_.extension))) exists = os.path.isfile(filepath) else: if (format is None): if (os.path.splitext(path)[1] != format_.extension): continue filepath = path exists = True if exists: data = load_from_file(filepath, format_, disable_memcache=True) break if data: name = data.get('name') if ((name is not None) or isinstance(name, basestring)): break if (data is None): raise PackageMetadataError(('No package definition file found at %s' % path)) if ((name is None) or (not isinstance(name, basestring))): raise PackageMetadataError(("Error in %r - missing or non-string field 'name'" % filepath)) package = create_package(name, data, package_cls=cls) result = package._get_preprocessed(data) if result: (package, data) = result package.filepath = filepath package.includes = set() def visit(d): for (k, v) in d.iteritems(): if isinstance(v, SourceCode): package.includes |= (v.includes or set()) elif isinstance(v, dict): visit(v) visit(data) package._validate_includes() return package
Load a developer package. A developer package may for example be a package.yaml or package.py in a user's source directory. Args: path: Directory containing the package definition file, or file path for the package file itself format: which FileFormat to use, or None to check both .py and .yaml Returns: `Package` object.
codesearchnet
class CSVLogger(Callback): def __init__(self, filename, separator=',', append=False): super().__init__() self.sep = separator self.filename = file_utils.path_to_string(filename) self.append = append self.writer = None self.keys = None self.append_header = True self.csv_file = None def on_train_begin(self, logs=None): if self.append: if file_utils.exists(self.filename): with file_utils.File(self.filename, 'r') as f: self.append_header = not bool(len(f.readline())) mode = 'a' else: mode = 'w' if self.csv_file and (not self.csv_file.closed): self.csv_file.close() self.csv_file = file_utils.File(self.filename, mode) self.writer = None self.keys = None def on_epoch_end(self, epoch, logs=None): logs = logs or {} def handle_value(k): is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0 if isinstance(k, str): return k elif isinstance(k, collections.abc.Iterable) and (not is_zero_dim_ndarray): return f'"[{', '.join(map(str, k))}]"' else: return k if self.keys is None: self.keys = sorted(logs.keys()) val_keys_found = False for key in self.keys: if key.startswith('val_'): val_keys_found = True break if not val_keys_found and self.keys: self.keys.extend(['val_' + k for k in self.keys]) if not self.writer: class CustomDialect(csv.excel): delimiter = self.sep fieldnames = ['epoch'] + (self.keys or []) self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames, dialect=CustomDialect) if self.append_header: self.writer.writeheader() row_dict = collections.OrderedDict({'epoch': epoch}) row_dict.update(((key, handle_value(logs.get(key, 'NA'))) for key in self.keys)) self.writer.writerow(row_dict) self.csv_file.flush() def on_train_end(self, logs=None): if self.csv_file and (not self.csv_file.closed): self.csv_file.close() self.writer = None
Callback that streams epoch results to a CSV file. Supports all values that can be represented as a string, including 1D iterables such as `np.ndarray`. Args: filename: Filename of the CSV file, e.g. `'run/log.csv'`. separator: String used to separate elements in the CSV file. append: Boolean. True: append if file exists (useful for continuing training). False: overwrite existing file. Example: ```python csv_logger = CSVLogger('training.log') model.fit(X_train, Y_train, callbacks=[csv_logger]) ```
github-repos
def __init__(self, size, dropout=None, named_tensors=None, scope='lstm', summary_labels=(), return_final_state=True): self.size = size self.dropout = dropout self.return_final_state = return_final_state super(Lstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
LSTM layer. Args: size: LSTM size. dropout: Dropout rate.
juraj-google-style
def make_batches(size, batch_size): num_batches = int(np.ceil(size / float(batch_size))) return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, num_batches)]
Returns a list of batch indices (tuples of indices). Args: size: Integer, total size of the data to slice into batches. batch_size: Integer, batch size. Returns: A list of tuples of array indices.
github-repos
def assignSeasonSchedule(self, season, month, day, schedule): season += 1 schedule += 1 if ((season < 1) or (season > Extents.Seasons) or (schedule < 1) or (schedule > Extents.Schedules) or (month > 12) or (month < 0) or (day < 0) or (day > 31)): ekm_log(((((((('Out of bounds: month ' + str(month)) + ' day ') + str(day)) + ' schedule ') + str(schedule)) + ' season ') + str(season))) return False idx_mon = (('Season_' + str(season)) + '_Start_Day') idx_day = (('Season_' + str(season)) + '_Start_Month') idx_schedule = (('Season_' + str(season)) + '_Schedule') if (idx_mon not in self.m_seasons_sched_params): ekm_log(('Incorrect index: ' + idx_mon)) return False if (idx_day not in self.m_seasons_sched_params): ekm_log(('Incorrect index: ' + idx_day)) return False if (idx_schedule not in self.m_seasons_sched_params): ekm_log(('Incorrect index: ' + idx_schedule)) return False self.m_seasons_sched_params[idx_mon] = month self.m_seasons_sched_params[idx_day] = day self.m_seasons_sched_params[idx_schedule] = schedule return True
Define a single season and assign a schedule Args: season (int): A :class:`~ekmmeters.Seasons` value or in range(Extent.Seasons). month (int): Month 1-12. day (int): Day 1-31. schedule (int): A :class:`~ekmmeters.LCDItems` value or in range(Extent.Schedules). Returns: bool: True on completion and ACK.
codesearchnet
def convex_hull_collide(nodes1, nodes2): polygon1 = _helpers.simple_convex_hull(nodes1) (_, polygon_size1) = polygon1.shape polygon2 = _helpers.simple_convex_hull(nodes2) (_, polygon_size2) = polygon2.shape if ((polygon_size1 == 2) and (polygon_size2 == 2)): return line_line_collide(polygon1, polygon2) else: return _helpers.polygon_collide(polygon1, polygon2)
Determine if the convex hulls of two curves collide. .. note:: This is a helper for :func:`from_linearized`. Args: nodes1 (numpy.ndarray): Control points of a first curve. nodes2 (numpy.ndarray): Control points of a second curve. Returns: bool: Indicating if the convex hulls collide.
codesearchnet
def __update_cleanup_paths(new_path): cleanup_dirs = settings.CFG["cleanup_paths"].value cleanup_dirs = set(cleanup_dirs) cleanup_dirs.add(new_path) cleanup_dirs = list(cleanup_dirs) settings.CFG["cleanup_paths"] = cleanup_dirs
Add the new path to the list of paths to clean up afterwards. Args: new_path: Path to the directory that need to be cleaned up.
juraj-google-style
def download_image(self, handle, dest): shutil.copyfile(self._prefixed(handle), dest)
Copies over the handl to the destination Args: handle (str): path to copy over dest (str): path to copy to Returns: None
juraj-google-style
def tool(name): global g_tools def decorator(fn): g_tools[name] = fn return fn return decorator
Decorator for defining lint tools. Args: name (str): The name of the tool. This name will be used to identify the tool in `pelconf.yaml`.
codesearchnet
def get_all_instances(include_fastboot=False): if include_fastboot: serial_list = list_adb_devices() + list_fastboot_devices() return get_instances(serial_list) return get_instances(list_adb_devices())
Create AndroidDevice instances for all attached android devices. Args: include_fastboot: Whether to include devices in bootloader mode or not. Returns: A list of AndroidDevice objects each representing an android device attached to the computer.
github-repos
def _produce_posterior_estimate(posterior_dist, posterior_estimate_mode, raw_var_name): conds = [tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.sample), name='equal_sample_mode'), tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.mean), name='equal_mean_mode'), tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.last_sample), name='equal_last_sample_mode')] results = [(lambda : posterior_dist.sample()), (lambda : posterior_dist.mean()), (lambda : posterior_dist.last_sample())] def default_case_branch_raising_error(): err_msg = 'Invalid posterior estimate mode.' raise_err = tf.Assert(tf.constant(False), data=[tf.constant(err_msg)]) with tf.control_dependencies([raise_err]): return posterior_dist.mean() if hasattr(posterior_dist, 'last_sample'): cases = {conds[0]: results[0], conds[1]: results[1], conds[2]: results[2]} else: cases = {conds[0]: results[0], conds[1]: results[1]} z_sample = tf.case(cases, exclusive=True, default=default_case_branch_raising_error, name='{}_posterior_estimate'.format(raw_var_name)) return z_sample
Create tensor representing estimate of posterior. Args: posterior_dist: An instance of `tfp.distributions.Distribution`. The variational posterior from which to produce an estimate of the variable in question. posterior_estimate_mode: A `Tensor` of dtype `tf.string`, which determines the inference mode. raw_var_name: The name of the variable over which inference is done. Returns: `z_sample`, a `Tensor` representing an estimate derived from the posterior distribution.
codesearchnet
def datetime_string(day, month, year, hour, minute): if ((hour < 0) or (hour > 23)): hour = 0 if ((minute < 0) or (minute > 60)): minute = 0 return ('%d-%02d-%02dT%02d:%02d:00' % (year, month, day, hour, minute))
Build a date string using the provided day, month, year numbers. Automatically adds a leading zero to ``day`` and ``month`` if they only have one digit. Args: day (int): Day number. month(int): Month number. year(int): Year number. hour (int): Hour of the day in 24h format. minute (int): Minute of the hour. Returns: str: Date in the format *YYYY-MM-DDThh:mm:ss*.
codesearchnet
def process_entry(self, entry): try: corrections = self.get_corrections_dict(entry) except CompatibilityError: return None entry.correction = sum(corrections.values()) return entry
Process a single entry with the chosen Corrections. Args: entry: A ComputedEntry object. Returns: An adjusted entry if entry is compatible, otherwise None is returned.
juraj-google-style
def trace_min_buffer_capacity(self): cmd = enums.JLinkTraceCommand.GET_MIN_CAPACITY data = ctypes.c_uint32(0) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to get min trace buffer size.') return data.value
Retrieves the minimum capacity the trace buffer can be configured with. Args: self (JLink): the ``JLink`` instance. Returns: The minimum configurable capacity for the trace buffer.
juraj-google-style
def contains_method(self, method): return method in itertools.chain(self._literal, self._wildcard, self._regex)
Check if there is at least one handler for *method*. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. Returns: ``True`` if there is at least one route defined for *method*, ``False`` otherwise
juraj-google-style
def until(coro, coro_test, assert_coro=None, *args, **kw): @asyncio.coroutine def assert_coro(value): return (not value) return (yield from whilst(coro, coro_test, *args, assert_coro=assert_coro, **kw))
Repeatedly call `coro` coroutine function until `coro_test` returns `True`. This function is the inverse of `paco.whilst()`. This function is a coroutine. Arguments: coro (coroutinefunction): coroutine function to execute. coro_test (coroutinefunction): coroutine function to test. assert_coro (coroutinefunction): optional assertion coroutine used to determine if the test passed or not. *args (mixed): optional variadic arguments to pass to `coro` function. Raises: TypeError: if input arguments are invalid. Returns: list: result values returned by `coro`. Usage:: calls = 0 async def task(): nonlocal calls calls += 1 return calls async def calls_gt_4(): return calls > 4 await paco.until(task, calls_gt_4) # => [1, 2, 3, 4, 5]
codesearchnet
def from_bigquery(sql): if isinstance(sql, bq.Query): sql = sql._expanded_sql() parts = sql.split('.') if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts): sql = '(' + sql + ')' else: sql = '`' + sql + '`' metrics = Metrics(bigquery=sql) return metrics
Create a Metrics instance from a bigquery query or table. Returns: a Metrics instance. Args: sql: A BigQuery table name or a query.
juraj-google-style
def invalid_fields(self, data, original_data): errors = [] for field in original_data: if isinstance(field, (set, list, tuple, dict)): continue if (field not in self.fields.keys()): errors.append(field) if errors: raise ValidationError('Invalid field', field_names=errors)
Validator that checks if any keys provided aren't in the schema. Say your schema has support for keys ``a`` and ``b`` and the data provided has keys ``a``, ``b``, and ``c``. When the data is loaded into the schema, a :class:`marshmallow.ValidationError` will be raised informing the developer that excess keys have been provided. Raises: marshmallow.ValidationError: Raised if extra keys exist in the passed in data.
codesearchnet
def abs(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.abs, tf.float32)
Returns a TensorFluent for the abs function. Args: x: The input fluent. Returns: A TensorFluent wrapping the abs function.
juraj-google-style
def absl_to_cpp(level): if (not isinstance(level, int)): raise TypeError('Expect an int level, found {}'.format(type(level))) if (level >= 0): return 0 else: return (- level)
Converts an absl log level to a cpp log level. Args: level: int, an absl.logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in Abseil C++.
codesearchnet
def get_timestamped_export_dir(export_dir_base): attempts = 0 while (attempts < MAX_DIRECTORY_CREATION_ATTEMPTS): export_timestamp = int(time.time()) export_dir = os.path.join(tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(export_timestamp))) if (not tf_v1.gfile.Exists(export_dir)): return export_dir time.sleep(1) attempts += 1 logging.warn('Export directory %s already exists; retrying (attempt %d/%d)', export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS) raise RuntimeError('Failed to obtain a unique export directory name after %d attempts.'.MAX_DIRECTORY_CREATION_ATTEMPTS)
Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.
codesearchnet
def get_datasets(self): (assoc_result, datasets_dicts) = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id', action=self.actions()['list_datasets']) datasets = list() if assoc_result: for dataset_dict in datasets_dicts: dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration) datasets.append(dataset) return datasets
Get any datasets in the showcase Returns: List[Dataset]: List of datasets
codesearchnet
def unique(): def _apply_fn(dataset): return dataset.unique() return _apply_fn
Creates a `Dataset` from another `Dataset`, discarding duplicates. Use this transformation to produce a dataset that contains one instance of each unique element in the input. For example: ```python dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1]) # Using `unique()` will drop the duplicate elements. dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 } ``` Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
github-repos
def compute_capability_from_device_desc(device_attrs): match = _PHYSICAL_DEVICE_DESCRIPTION_REGEX.search(device_attrs.physical_device_desc) if not match: return GpuInfo(None, None) cc = (int(match.group(2)), int(match.group(3))) if match.group(2) else None return GpuInfo(match.group(1), cc)
Returns the GpuInfo given a DeviceAttributes proto. Args: device_attrs: A DeviceAttributes proto. Returns A gpu_info tuple. Both fields are None if `device_attrs` does not have a valid physical_device_desc field.
github-repos
def __extend_with_api_ref(raw_testinfo): api_name = raw_testinfo['api'] if (not os.path.isabs(api_name)): api_path = os.path.join(tests_def_mapping['PWD'], *api_name.split('/')) if os.path.isfile(api_path): api_name = api_path try: block = tests_def_mapping['api'][api_name] raw_testinfo['api_def'] = utils.deepcopy_dict(block) except KeyError: raise exceptions.ApiNotFound('{} not found!'.format(api_name))
extend with api reference Raises: exceptions.ApiNotFound: api not found
codesearchnet
def install_hook(self, hook_name, hook_content): hook_path = os.path.join(self.path, '.git/hooks', hook_name) with open(hook_path, 'w') as f: f.write(hook_content) os.chmod(hook_path, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
Install the repository hook for this repo. Args: hook_name (str) hook_content (str)
juraj-google-style
def reveal_undocumented(symbol_name, target_module=None): if symbol_name not in _HIDDEN_ATTRIBUTES: raise LookupError('Symbol %s is not a hidden symbol' % symbol_name) symbol_basename = symbol_name.split('.')[-1] original_module, attr_value = _HIDDEN_ATTRIBUTES[symbol_name] if not target_module: target_module = original_module setattr(target_module, symbol_basename, attr_value)
Reveals a symbol that was previously removed by `remove_undocumented`. This should be used by tensorflow internal tests only. It explicitly defeats the encapsulation afforded by `remove_undocumented`. It throws an exception when the symbol was not hidden in the first place. Args: symbol_name: a string representing the full absolute path of the symbol. target_module: if specified, the module in which to restore the symbol.
github-repos
def show_bokehjs(bokehjs_action, develop=False): print() if develop: print("Installed Bokeh for DEVELOPMENT:") else: print("Installed Bokeh:") if bokehjs_action in ['built', 'installed']: print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if bokehjs_action=='built' else bright(yellow("PREVIOUSLY")))) else: print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED"))) print()
Print a useful report after setuptools output describing where and how BokehJS is installed. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree develop (bool, optional) : whether the command was for "develop" mode (default: False) Returns: None
juraj-google-style
def set_position_i(self, ivalue): ivalue_msb = (int(ivalue) >> 8) ivalue_lsb = (int(ivalue) & 255) data = [] data.append(11) data.append(self.servoid) data.append(RAM_WRITE_REQ) data.append(POSITION_KI_RAM) data.append(BYTE2) data.append(ivalue_lsb) data.append(ivalue_msb) send_data(data)
Set the I gain of the position PID Args: ivalue (int): I value
codesearchnet
def chosen_angle_to_half_turns( half_turns: Optional[Union[sympy.Basic, float]] = None, rads: Optional[float] = None, degs: Optional[float] = None, default: float = 1.0, ) -> Union[sympy.Basic, float]: if len([1 for e in [half_turns, rads, degs] if e is not None]) > 1: raise ValueError('Redundant angle specification. ' 'Use ONE of half_turns, rads, or degs.') if rads is not None: return rads / np.pi if degs is not None: return degs / 180 if half_turns is not None: return half_turns return default
Returns a half_turns value based on the given arguments. At most one of half_turns, rads, degs must be specified. If none are specified, the output defaults to half_turns=1. Args: half_turns: The number of half turns to rotate by. rads: The number of radians to rotate by. degs: The number of degrees to rotate by default: The half turns angle to use if nothing else is specified. Returns: A number of half turns.
juraj-google-style
def probability_density(self, X): self.check_fit() U, V = self.split_matrix(X) a = (self.theta + 1) * np.power(np.multiply(U, V), -(self.theta + 1)) b = np.power(U, -self.theta) + np.power(V, -self.theta) - 1 c = -(2 * self.theta + 1) / self.theta return a * np.power(b, c)
Compute probability density function for given copula family. Args: X: `np.ndarray` Returns: np.array: Probability density for the input values.
juraj-google-style