code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def pcoll_to_pcoll_id(pipeline, original_context): class PCollVisitor(PipelineVisitor): def __init__(self): self.pcoll_to_pcoll_id = {} def enter_composite_transform(self, transform_node): self.visit_transform(transform_node) def visit_transform(self, transform_node): for pcoll in transform_node.outputs.values(): self.pcoll_to_pcoll_id[str(pcoll)] = original_context.pcollections.get_id(pcoll) v = PCollVisitor() pipeline.visit(v) return v.pcoll_to_pcoll_id
Returns a dict mapping PCollections string to PCollection IDs. Using a PipelineVisitor to iterate over every node in the pipeline, records the mapping from PCollections to PCollections IDs. This mapping will be used to query cached PCollections. Returns: (dict from str to str) a dict mapping str(pcoll) to pcoll_id.
github-repos
def get_single_upstream_artifact_full_path(context, task_id, path): return os.path.abspath(os.path.join(context.config['work_dir'], 'cot', task_id, path))
Return the full path where an upstream artifact should be located. Artifact may not exist. If you want to be sure if does, use ``get_and_check_single_upstream_artifact_full_path()`` instead. This function is mainly used to move artifacts to the expected location. Args: context (scriptworker.context.Context): the scriptworker context. task_id (str): the task id of the task that published the artifact path (str): the relative path of the artifact Returns: str: absolute path to the artifact should be.
codesearchnet
def is_lambda(fun): return (isinstance(fun, type(LAMBDA)) and (fun.__name__ == LAMBDA.__name__))
Check whether the given function is a lambda function. .. testsetup:: from proso.func import is_lambda .. testcode:: def not_lambda_fun(): return 1 lambda_fun = lambda: 1 print( is_lambda(not_lambda_fun), is_lambda(lambda_fun) ) .. testoutput:: False True Args: fun (function) Returns: bool: True if the given function is a lambda function, False otherwise
codesearchnet
def upload_files(tree, file_diff): config.LOGGER.info("\nUploading {0} new file(s) to Kolibri Studio...".format(len(file_diff))) tree.upload_files(file_diff) tree.reattempt_upload_fails() return file_diff
upload_files: Upload files to Kolibri Studio Args: tree (ChannelManager): manager to handle communication to Kolibri Studio file_diff ([str]): list of files to upload Returns: None
juraj-google-style
def validate_task_schema(context, schema_key='schema_file'): schema_path = context.config schema_keys = schema_key.split('.') for key in schema_keys: schema_path = schema_path[key] task_schema = load_json_or_yaml(schema_path, is_path=True) log.debug('Task is validated against this schema: {}'.format(task_schema)) try: validate_json_schema(context.task, task_schema) except ScriptWorkerTaskException as e: raise TaskVerificationError('Cannot validate task against schema. Task: {}.'.format(context.task)) from e
Validate the task definition. Args: context (scriptworker.context.Context): the scriptworker context. It must contain a task and the config pointing to the schema file schema_key: the key in `context.config` where the path to the schema file is. Key can contain dots (e.g.: 'schema_files.file_a'), in which case Raises: TaskVerificationError: if the task doesn't match the schema
juraj-google-style
def _philox_scramble_seed(seed): key = constant_op.constant([163851598941452064], dtypes.uint64) counter = math_ops.cast(seed, dtypes.uint64) mix = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2([4], key=key, counter=counter, dtype=dtypes.uint32, alg=Algorithm.PHILOX.value) key = array_ops.reshape(_uint32s_to_uint64(mix[:2]), [1]) counter = array_ops_stack.stack([0, _uint32s_to_uint64(mix[2:])], axis=0) return (key, counter)
Determines the key and counter for Philox PRNG with the given seed. Args: seed: An integer tensor of shape [2]. The seed to calculate the key and counter from. Returns: A pair (key, counter) suitable for V2 stateless RNG ops like `StatelessRandomUniformV2`.
github-repos
def getenv(key, value=None): key = path2fsn(key) if (is_win and PY2): return environ.get(key, value) return os.getenv(key, value)
Like `os.getenv` but returns unicode under Windows + Python 2 Args: key (pathlike): The env var to get value (object): The value to return if the env var does not exist Returns: `fsnative` or `object`: The env var or the passed value if it doesn't exist
codesearchnet
def composition_prediction(self, composition, to_this_composition=True): preds = self.list_prediction(list(composition.keys()), to_this_composition) output = [] for p in preds: if to_this_composition: subs = {v: k for (k, v) in p['substitutions'].items()} else: subs = p['substitutions'] charge = 0 for (k, v) in composition.items(): charge += (subs[k].oxi_state * v) if (abs(charge) < 1e-08): output.append(p) logging.info('{} charge balanced substitutions found'.format(len(output))) return output
Returns charged balanced substitutions from a starting or ending composition. Args: composition: starting or ending composition to_this_composition: If true, substitutions with this as a final composition will be found. If false, substitutions with this as a starting composition will be found (these are slightly different) Returns: List of predictions in the form of dictionaries. If to_this_composition is true, the values of the dictionary will be from the list species. If false, the keys will be from that list.
codesearchnet
def GenerateMemoryReport(metagraph, detailed_report=True, cluster=None): if cluster is None: cluster = gcluster.Cluster(disable_detailed_stats=True, disable_timeline=True) item = gitem.Item(metagraph) peak_usage = cluster.DeterminePeakMemoryUsage(item) report = '' for device, snapshot in peak_usage.items(): peak_usage = snapshot[0] report += 'Peak usage for device ' + device + ': ' + str(peak_usage) + ' bytes\n' if detailed_report: live_tensors = snapshot[1] for tensor in live_tensors: op_name = tensor[0] output_id = tensor[1] mem_used = tensor[2] report += ' ' + str(op_name) + ':' + str(output_id) + ' uses ' + str(mem_used) + ' bytes\n' return report
Analyze the peak memory usage for the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. detailed_report: print the live tensors in addition to the peak memory usage. cluster: Analyze the memory using the specified cluster, or the local machine if no cluster was specified. Returns: A string with the formatted memory usage.
github-repos
def match_tokens(ast_tokens, ast_types): ast_final_types = ([ast.Module, ast.Expr] + ast_types) return all((isinstance(ast_token, ast_type) for (ast_token, ast_type) in zip(ast_tokens, ast_final_types)))
Verify that each token in order does match the expected types. The list provided by `get_tokens` does have three more elements at the beginning of the list which should be always the same for a condition (Module and Expr). Those are automatically added first to the final list of expected types so you don't have to specify it yourself each time. >>> tokens = Condition.get_tokens('2 == 3') >>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num]) True Args: ast_entries (list): list of AST tokens parsers previously. ast_types (list): list of expected AST types. Returns: bool: when all tokes match the expected types
codesearchnet
def remove_from_queue(self, index): updid = '0' objid = 'Q:0/' + str(index + 1) self.avTransport.RemoveTrackFromQueue([ ('InstanceID', 0), ('ObjectID', objid), ('UpdateID', updid), ])
Remove a track from the queue by index. The index number is required as an argument, where the first index is 0. Args: index (int): The (0-based) index of the track to remove
juraj-google-style
def _check_stop(self): return False
Hook for subclasses to provide their own stop condition. Returns: True if the session should stop, False otherwise.
github-repos
def encode_csv(data_dict, column_names): import csv import six values = [str(data_dict[x]) for x in column_names] str_buff = six.StringIO() writer = csv.writer(str_buff, lineterminator='') writer.writerow(values) return str_buff.getvalue()
Builds a csv string. Args: data_dict: dict of {column_name: 1 value} column_names: list of column names Returns: A csv string version of data_dict
codesearchnet
def delete(self, response_choice=1, async=False, callback=None): return self._manage_child_object(nurest_object=self, method=HTTP_METHOD_DELETE, async=async, callback=callback, response_choice=response_choice)
Delete object and call given callback in case of call. Args: response_choice (int): Automatically send a response choice when confirmation is needed async (bool): Boolean to make an asynchronous call. Default is False callback (function): Callback method that will be triggered in case of asynchronous call Example: >>> entity.delete() # will delete the enterprise from the server
juraj-google-style
def __get_distribution_tags(self, client, arn): return { t['Key']: t['Value'] for t in client.list_tags_for_resource( Resource=arn )['Tags']['Items'] }
Returns a dict containing the tags for a CloudFront distribution Args: client (botocore.client.CloudFront): Boto3 CloudFront client object arn (str): ARN of the distribution to get tags for Returns: `dict`
juraj-google-style
def strip_el_text(el, max_depth=0, cur_depth=0): el_text = strip_str(el.text if el.text is not None else "") if cur_depth < max_depth: for child in el: el_text += " "+strip_el_text(child, max_depth=max_depth, cur_depth=cur_depth+1) else: children = list(el) if children is not None and len(children) > 0: if children[-1].tail is not None: el_text += " "+strip_str(children[-1].tail) if cur_depth > 0: if el.tail is not None: el_text += " "+strip_str(el.tail) return strip_str(el_text)
Recursively strips the plain text out of the given XML etree element up to the desired depth. Args: el: The etree element to scan. max_depth: The depth to which to recursively strip text (default: 0). cur_depth: The current recursive depth to which we've scanned so far. Returns: The stripped, plain text from within the element.
juraj-google-style
def save_pretrained(self, save_directory: Union[str, os.PathLike], config_file_name: Optional[Union[str, os.PathLike]]=None, push_to_hub: bool=False, **kwargs): try: self.validate(strict=True) except ValueError as exc: raise ValueError(str(exc) + '\n\nFix these issues to save the configuration.') use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if kwargs.get('token', None) is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') kwargs['token'] = use_auth_token config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME if os.path.isfile(save_directory): raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file') os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) output_config_file = os.path.join(save_directory, config_file_name) self.to_json_file(output_config_file, use_diff=True) logger.info(f'Configuration saved in {output_config_file}') if push_to_hub: self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))
Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the [`~GenerationConfig.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file will be saved (will be created if it does not exist). config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`): Name of the generation configuration JSON file to be saved in `save_directory`. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
github-repos
def StartsWith(this, that): this_iter = iter(this) that_iter = iter(that) while True: try: this_value = next(that_iter) except StopIteration: return True try: that_value = next(this_iter) except StopIteration: return False if (this_value != that_value): return False
Checks whether an items of one iterable are a prefix of another. Args: this: An iterable that needs to be checked. that: An iterable of which items must match the prefix of `this`. Returns: `True` if `that` is a prefix of `this`, `False` otherwise.
codesearchnet
def draw(self): for (age, level) in enumerate(self.tree.get_branches()): if (age in self.ages): thickness = self._get_thickness(age) color = self._get_color(age) for branch in level: self._draw_branch(branch, color, thickness, age)
Draws the tree. Args: ages (array): Contains the ages you want to draw.
codesearchnet
def get_user_information(self): url = "https: headers = self.__gen_headers() headers["Content-Type"] = "application/json" r = requests.get(url, headers=headers) return r.json()
Gets the current user information, including sensor ID Args: None Returns: dictionary object containing information about the current user
juraj-google-style
def save(obj, filename, protocol=4): with open(filename, 'wb') as f: pickle.dump(obj, f, protocol=protocol)
Serialize an object to disk using pickle protocol. Args: obj: The object to serialize. filename: Path to the output file. protocol: Version of the pickle protocol.
juraj-google-style
def bin(values, bins, labels=None): bins = np.asarray(bins) if labels is None: labels = (bins[:-1] + np.diff(bins)/2.) else: labels = np.asarray(labels) dtype = 'float' if labels.dtype.kind == 'f' else 'O' binned = np.full_like(values, (np.nan if dtype == 'f' else None), dtype=dtype) for lower, upper, label in zip(bins[:-1], bins[1:], labels): condition = (values > lower) & (values <= upper) binned[np.where(condition)[0]] = label return binned
Bins data into declared bins Bins data into declared bins. By default each bin is labelled with bin center values but an explicit list of bin labels may be defined. Args: values: Array of values to be binned bins: List or array containing the bin boundaries labels: List of labels to assign to each bin If the bins are length N the labels should be length N-1 Returns: Array of binned values
juraj-google-style
def __init__(self, module, dropout, weights=['weight_hh_l0']): super().__init__() self.module,self.weights,self.dropout = module,weights,dropout self._setup()
Default constructor for the WeightDrop module Args: module (torch.nn.Module): A pytorch layer being wrapped dropout (float): a dropout value to apply weights (list(str)): the parameters of the wrapped **module** which should be fractionally dropped.
juraj-google-style
def has_permission(self, perm): return self.user.superuser or self.auth.has_permission(perm)
Checks if current user (or role) has the given permission. Args: perm: Permmission code or object. Depends on the :attr:`~zengine.auth.auth_backend.AuthBackend` implementation. Returns: Boolean.
juraj-google-style
def get_angle(self, i: int, j: int, k: int) -> float: v1 = (self[i].coords - self[j].coords) v2 = (self[k].coords - self[j].coords) return get_angle(v1, v2, units='degrees')
Returns angle specified by three sites. Args: i: Index of first site. j: Index of second site. k: Index of third site. Returns: Angle in degrees.
codesearchnet
def __init__(self, data_type_definition): super(DataTypeMap, self).__init__() self._data_type_definition = data_type_definition
Initializes a data type map. Args: data_type_definition (DataTypeDefinition): data type definition. Raises: FormatError: if the data type map cannot be determined from the data type definition.
juraj-google-style
def send_message_for_lane_change(sender, **kwargs): current = kwargs['current'] owners = kwargs['possible_owners'] if ('lane_change_invite' in current.task_data): msg_context = current.task_data.pop('lane_change_invite') else: msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG wfi = WFCache(current).get_instance() TaskInvitation.objects.filter(instance=wfi, role=current.role, wf_name=wfi.wf.name).delete() today = datetime.today() for recipient in owners: inv = TaskInvitation(instance=wfi, role=recipient, wf_name=wfi.wf.name, progress=30, start_date=today, finish_date=(today + timedelta(15))) inv.title = (current.task_data.get('INVITATION_TITLE') or wfi.wf.title) inv.save() try: recipient.send_notification(title=msg_context['title'], message=('%s %s' % (wfi.wf.title, msg_context['body'])), typ=1, url='', sender=sender) except: pass
Sends a message to possible owners of the current workflows next lane. Args: **kwargs: ``current`` and ``possible_owners`` are required. sender (User): User object
codesearchnet
def add_case(self, case, update=False): existing_case = self.case(case) if (existing_case and (not update)): raise CaseError('Case {} already exists'.format(case['case_id'])) if existing_case: self.db.case.find_one_and_replace({'case_id': case['case_id']}, case) else: self.db.case.insert_one(case) return case
Add a case to the case collection If the case exists and update is False raise error. Args: db (MongoClient): A connection to the mongodb case (dict): A case dictionary update(bool): If existing case should be updated Returns: mongo_case_id(ObjectId)
codesearchnet
def as_list(self): if (self._dims is None): raise ValueError('as_list() is not defined on an unknown TensorShape.') return [dim.value for dim in self._dims]
Returns a list of integers or `None` for each dimension. Returns: A list of integers or `None` for each dimension. Raises: ValueError: If `self` is an unknown shape with an unknown rank.
codesearchnet
def show_plot(plot, width=PREVIEW_WIDTH, height=PREVIEW_HEIGHT): return SVG(data=plot_to_svg(plot, width, height))
Preview a plot in a jupyter notebook. Args: plot (list): the plot to display (list of layers) width (int): the width of the preview height (int): the height of the preview Returns: An object that renders in Jupyter as the provided plot
juraj-google-style
def convert_compartment_entry(self, compartment, adjacencies): d = OrderedDict() d['id'] = compartment.id if (adjacencies is not None): d['adjacent_to'] = adjacencies order = {key: i for (i, key) in enumerate(['name'])} prop_keys = set(compartment.properties) for prop in sorted(prop_keys, key=(lambda x: (order.get(x, 1000), x))): if (compartment.properties[prop] is not None): d[prop] = compartment.properties[prop] return d
Convert compartment entry to YAML dict. Args: compartment: :class:`psamm.datasource.entry.CompartmentEntry`. adjacencies: Sequence of IDs or a single ID of adjacent compartments (or None).
codesearchnet
def job_tasks(self, job_name): try: job = self._cluster_spec[job_name] except KeyError: raise ValueError('No such job in cluster: %r' % job_name) ret = [None for _ in range(max(job.keys()) + 1)] for i, task in job.items(): ret[i] = task return ret
Returns a mapping from task ID to address in the given job. NOTE: For backwards compatibility, this method returns a list. If the given job was defined with a sparse set of task indices, the length of this list may not reflect the number of tasks defined in this job. Use the `tf.train.ClusterSpec.num_tasks` method to find the number of tasks defined in a particular job. Args: job_name: The string name of a job in this cluster. Returns: A list of task addresses, where the index in the list corresponds to the task index of each task. The list may contain `None` if the job was defined with a sparse set of task indices. Raises: ValueError: If `job_name` does not name a job in this cluster.
github-repos
def AFF4Path(self, client_urn): if (not self.HasField('pathtype')): raise ValueError("Can't determine AFF4 path without a valid pathtype.") first_component = self[0] dev = first_component.path if first_component.HasField('offset'): dev += ':{}'.format((first_component.offset if ((len(self) > 1) and (first_component.pathtype == PathSpec.PathType.OS) and (self[1].pathtype == PathSpec.PathType.TSK)): result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev] start = 1 else: result = [self.AFF4_PREFIXES[first_component.pathtype]] start = 0 for p in self[start]: component = p.path if p.HasField('offset'): component += ':{}'.format((p.offset if p.HasField('stream_name'): component += (':' + p.stream_name) result.append(component) return client_urn.Add('/'.join(result))
Returns the AFF4 URN this pathspec will be stored under. Args: client_urn: A ClientURN. Returns: A urn that corresponds to this pathspec. Raises: ValueError: If pathspec is not of the correct type.
codesearchnet
def _determine_hpp_url(self, platform, action): base_uri = settings.BASE_HPP_URL.format(platform) service = action + '.shtml' result = '/'.join([base_uri, service]) return result
This returns the Adyen HPP endpoint based on the provided platform, and action. Args: platform (str): Adyen platform, ie 'live' or 'test'. action (str): the HPP action to perform. possible actions: select, pay, skipDetails, directory
juraj-google-style
def isfunc(x): return any([(inspect.isfunction(x) and (not asyncio.iscoroutinefunction(x))), (inspect.ismethod(x) and (not asyncio.iscoroutinefunction(x)))])
Returns `True` if the given value is a function or method object. Arguments: x (mixed): value to check. Returns: bool
codesearchnet
def bruteVersionStr(self, valu): try: (valu, info) = self.core.model.type('it:semver').norm(valu) subs = info.get('subs') return (valu, subs) except s_exc.BadTypeValu: subs = s_version.parseVersionParts(valu) if (subs is None): raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr', mesg='Unable to brute force version parts out of the string') if subs: valu = s_version.packVersion(subs.get('major'), subs.get('minor', 0), subs.get('patch', 0)) return (valu, subs)
Brute force the version out of a string. Args: valu (str): String to attempt to get version information for. Notes: This first attempts to parse strings using the it:semver normalization before attempting to extract version parts out of the string. Returns: int, dict: The system normalized version integer and a subs dictionary.
codesearchnet
def download(url): filepath = get_file(fname='tmp.zip', origin=url, extract=True) base_dir = os.path.dirname(filepath) weights_file = os.path.join(base_dir, 'weights.h5') params_file = os.path.join(base_dir, 'params.json') preprocessor_file = os.path.join(base_dir, 'preprocessor.pickle') return weights_file, params_file, preprocessor_file
Download a trained weights, config and preprocessor. Args: url (str): target url.
juraj-google-style
def supports_default_grad(t): if t.dtype == dtypes.resource: handle_data = resource_variable_ops.get_eager_safe_handle_data(t) if handle_data is None or not handle_data.is_set or len(handle_data.shape_and_type) != 1: return False return True
Whether tensor `t` supports creating a default gradient. This function assumes that `t` is of a trainable type. Args: t: Tensor Returns: Bool
github-repos
def process_module(self, mod_info, mod_ast): module_name = mod_info.module_name module = Module(module_name, mod_info.filename, mod_ast) self._resolver.allow_singletons = False module.ast = self._resolver.resolve_builtin_types(module.ast) self._modules[module_name] = module try: self._resolver.allow_singletons = True module.ast = self._resolve_external_and_local_types(module.ast) module.ast = self._resolver.resolve_builtin_types(module.ast) self._resolver.allow_singletons = False module.ast = module.ast.Visit(visitors.AdjustTypeParameters()) module_map = {'': module.ast, module_name: module.ast} module.ast.Visit(visitors.FillInLocalPointers(module_map)) except: del self._modules[module_name] raise if module_name: self.add_module_prefixes(module_name) return module.ast
Create a module from a loaded ast and save it to the loader cache. Args: mod_info: The metadata of the module being imported. mod_ast: The pytd.TypeDeclUnit representing the module. Returns: The ast (pytd.TypeDeclUnit) as represented in this loader.
github-repos
def round_to_nearest(dt, n_round_sec=1.0): ts = (ts_from_dt(strip_timezone(dt)) + (n_round_sec / 2.0)) res = dt_from_ts((ts - (ts % n_round_sec))) return res.replace(tzinfo=dt.tzinfo)
Round datetime up or down to nearest divisor. Round datetime up or down to nearest number of seconds that divides evenly by the divisor. Any timezone is preserved but ignored in the rounding. Args: dt: datetime n_round_sec : int or float Divisor for rounding Examples: - ``n_round_sec`` = 0.1: nearest 10th of a second. - ``n_round_sec`` = 1: nearest second. - ``n_round_sec`` = 30: nearest half minute.
codesearchnet
def simplify_exprs(exprs, result_type, stop_term, skip_term): expr_set = set() for e in exprs: if e is stop_term: return stop_term elif e is skip_term: continue elif isinstance(e, result_type): expr_set = expr_set.union(e.exprs) else: expr_set.add(e) if len(expr_set) > 1: return result_type(expr_set) elif expr_set: return expr_set.pop() else: return skip_term
Simplify a set of subexpressions for a conjunction or disjunction. Args: exprs: An iterable. The subexpressions. result_type: _And or _Or. The type of result (unless it simplifies down to something simpler). stop_term: FALSE for _And, TRUE for _Or. If this term is encountered, it will be immediately returned. skip_term: TRUE for _And, FALSE for _Or. If this term is encountered, it will be ignored. Returns: A BooleanTerm.
github-repos
def add_bonds(self, neighbors, center, color=None, opacity=None, radius=0.1): points = vtk.vtkPoints() points.InsertPoint(0, center.x, center.y, center.z) n = len(neighbors) lines = vtk.vtkCellArray() for i in range(n): points.InsertPoint((i + 1), neighbors[i].coords) lines.InsertNextCell(2) lines.InsertCellPoint(0) lines.InsertCellPoint((i + 1)) pd = vtk.vtkPolyData() pd.SetPoints(points) pd.SetLines(lines) tube = vtk.vtkTubeFilter() if (vtk.VTK_MAJOR_VERSION <= 5): tube.SetInputConnection(pd.GetProducerPort()) else: tube.SetInputData(pd) tube.SetRadius(radius) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(tube.GetOutputPort()) actor = vtk.vtkActor() actor.SetMapper(mapper) if (opacity is not None): actor.GetProperty().SetOpacity(opacity) if (color is not None): actor.GetProperty().SetColor(color) self.ren.AddActor(actor)
Adds bonds for a site. Args: neighbors: Neighbors of the site. center: The site in the center for all bonds. color: Color of the tubes representing the bonds opacity: Opacity of the tubes representing the bonds radius: Radius of tube s representing the bonds
codesearchnet
def _indexOfEndTag(istack): if len(istack) <= 0: return 0 if not istack[0].isOpeningTag(): return 0 cnt = 0 opener = istack[0] for index, el in enumerate(istack[1:]): if el.isOpeningTag() and \ el.getTagName().lower() == opener.getTagName().lower(): cnt += 1 elif el.isEndTagTo(opener): if cnt == 0: return index + 1 cnt -= 1 return 0
Go through `istack` and search endtag. Element at first index is considered as opening tag. Args: istack (list): List of :class:`.HTMLElement` objects. Returns: int: Index of end tag or 0 if not found.
juraj-google-style
def ListFileEntries(self, base_path_specs, output_writer): for base_path_spec in base_path_specs: file_system = resolver.Resolver.OpenFileSystem(base_path_spec) file_entry = resolver.Resolver.OpenFileEntry(base_path_spec) if (file_entry is None): logging.warning('Unable to open base path specification:\n{0:s}'.format(base_path_spec.comparable)) return self._ListFileEntry(file_system, file_entry, '', output_writer)
Lists file entries in the base path specification. Args: base_path_specs (list[dfvfs.PathSpec]): source path specification. output_writer (StdoutWriter): output writer.
codesearchnet
def _kl_gamma_gamma(g0, g1, name=None): with ops.name_scope(name, 'kl_gamma_gamma', values=[g0.concentration, g0.rate, g1.concentration, g1.rate]): return (g0.concentration - g1.concentration) * math_ops.digamma(g0.concentration) + math_ops.lgamma(g1.concentration) - math_ops.lgamma(g0.concentration) + g1.concentration * math_ops.log(g0.rate) - g1.concentration * math_ops.log(g1.rate) + g0.concentration * (g1.rate / g0.rate - 1.0)
Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma. Args: g0: instance of a Gamma distribution object. g1: instance of a Gamma distribution object. name: (optional) Name to use for created operations. Default is "kl_gamma_gamma". Returns: kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
github-repos
def long_description(): cwd = os.path.abspath(os.path.dirname(__file__)) readme_path = os.path.join(cwd, 'README.md') if (not os.path.exists(readme_path)): return pylink.__long_description__ try: import pypandoc return pypandoc.convert(readme_path, 'rst') except (IOError, ImportError): pass return open(readme_path, 'r').read()
Reads and returns the contents of the README. On failure, returns the project long description. Returns: The project's long description.
codesearchnet
def elevation(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `elevation`'.format(value)) if value < -1000.0: raise ValueError('value need to be greater or equal -1000.0 ' 'for field `elevation`') if value >= 9999.9: raise ValueError('value need to be smaller 9999.9 ' 'for field `elevation`') self._elevation = value
Corresponds to IDD Field `elevation` Args: value (float): value for IDD Field `elevation` Unit: m Default value: 0.0 value >= -1000.0 value < 9999.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def _format_line(headers, fields): assert (len(fields) == len(headers)), (fields, headers) fields = [(('%2.4f' % field) if isinstance(field, float) else str(field)) for field in fields] return ' '.join((((' ' * max(0, (len(header) - len(field)))) + field) for (header, field) in zip(headers, fields)))
Format a line of a table. Arguments: headers: A list of strings that are used as the table headers. fields: A list of the same length as `headers` where `fields[i]` is the entry for `headers[i]` in this row. Elements can be of arbitrary types. Pass `headers` to print the header row. Returns: A pretty string.
codesearchnet
def __recognize_union(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a union') recognized_types = [] message = '' union_types = generic_type_args(expected_type) logger.debug('Union types {}'.format(union_types)) for possible_type in union_types: (recognized_type, msg) = self.recognize(node, possible_type) if (len(recognized_type) == 0): message += msg recognized_types.extend(recognized_type) recognized_types = list(set(recognized_types)) if ((bool in recognized_types) and (bool_union_fix in recognized_types)): recognized_types.remove(bool_union_fix) if (len(recognized_types) == 0): return (recognized_types, message) elif (len(recognized_types) > 1): message = '{}{}Could not determine which of the following types this is: {}'.format(node.start_mark, os.linesep, recognized_types) return (recognized_types, message) return (recognized_types, '')
Recognize a node that we expect to be one of a union of types. Args: node: The node to recognize. expected_type: Union[...something...] Returns: The specific type that was recognized, multiple, or none.
codesearchnet
def _postprocess_flat_outputs(outputs): if outputs is None: outputs = tuple() if not isinstance(outputs, collections_abc.Sequence): outputs = (outputs,) outputs += (control_flow_ops.no_op(),) try: outputs = [o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o) for o in outputs] except Exception as e: raise ValueError('XLA computation function return values must all either be Operations or convertible to Tensors. Got error: "%s"' % str(e)) output_operations = [o for o in outputs if isinstance(o, ops.Operation)] output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)] if outputs != output_tensors + output_operations: raise ValueError('XLA computation function must return zero or more Tensor values followed by zero or more Operations.') new_output_tensors = [] for t in output_tensors: with ops.device(t.device if t.device else ''): new_output_tensors.append(array_ops.identity(t)) return (new_output_tensors, output_operations)
Validates flat outputs and adds back device assignments. Args: outputs: Output from `computation` inside `xla.compile`. Returns: Tensors and Operations extracted from outputs.
github-repos
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding, v2): pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad if v2: return pool_func(orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding) else: padding, explicit_paddings = nn_ops.convert_padding(padding) return pool_func(orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding, explicit_paddings)
Max Pooling Gradient. Args: orig_input: A float Tensor. The original input tensor. orig_output: A float Tensor. The original output tensor. grad: A float Tensor. The 4D (batch x rows x cols x depth) output backprop. window_rows: integer. Kernel size along rows dimension. window_cols: integer. Kernel size along cols dimension. row_stride: integer. Stride along rows dimension col_stride: integer. Stride along cols dimension padding: PoolingOpDef.Padding. Padding type. Returns: A Tensor.
github-repos
def localize(dt, force_to_local=True): if not isinstance(dt, datetime_tz): if not dt.tzinfo: return datetime_tz(dt, tzinfo=localtz()) dt = datetime_tz(dt) if force_to_local: return dt.astimezone(localtz()) return dt
Localize a datetime to the local timezone. If dt is naive, returns the same datetime with the local timezone, otherwise uses astimezone to convert. Args: dt: datetime object. force_to_local: Force all results to be in local time. Returns: A datetime_tz object.
juraj-google-style
def execute_tests(self): with open(self.notebook_path, 'r') as nb_f: nb = nbformat.read(nb_f, as_version=4) ExecutePreprocessor.timeout = self.timeout_secs ep = ExecutePreprocessor(allow_errors=True) exec_nb, _ = ep.preprocess(nb, {'metadata': {'path': self.dir + '/'}}) test_count = 0 error_count = 0 errors = OrderedDict() code_cells = {} for cell in exec_nb['cells']: if cell['cell_type'] == 'code': code_cells[cell['execution_count']] = cell for cell_num in sorted(self.tests.keys()): if cell_num not in code_cells: test_count += 1 error_count += 1 errors[cell_num, '', ''] = 'Given cell does not exist.' else: cell = code_cells[cell_num] for test in self.tests[cell_num]: cls, setup = list(test.items())[0] test_count += 1 try: getattr(sys.modules['testlib'], cls)(setup).check(cell) except Exception as e: error_count += 1 errors[cell_num, cls, setup] = str(e) return (test_count, error_count, errors)
Executes notebook and compares to test spec. Returns: # of tests, # of errors, error_dict where error_dict maps (cell number, test class, expected output) to string
github-repos
def lineitem_patch_v1(config, auth, patch, li): return API_DV360(config, auth).advertisers().lineItems().patch(advertiserId=li['advertiserId'], lineItemId=li['lineItemId'], updateMask=patch, body=li).execute()
Patches a DV360 Line Item Args: auth: StarThinker authentication scheme patch: List of field names to patch li: Line item with updates to push Returns: Updated Line Item
github-repos
def update_nsval( self, *, nsval: str = None, ns: str = None, val: str = None ) -> None: if not (ns and val) and nsval: (ns, val) = nsval.split(":", 1) elif not (ns and val) and not nsval: log.error("Did not update NSArg - no ns:val or nsval provided") self.namespace = ns self.value = val
Update Namespace and valueast. Args: nsval: e.g. HGNC:AKT1 ns: namespace val: value of entity
juraj-google-style
def closure(self, rules): closure = set() todo = set(rules) while todo: rule = todo.pop() closure.add(rule) if rule.at_end: continue symbol = rule.rhs[rule.pos] for production in self.nonterminals[symbol]: for first in self.first(rule.rest): if (EPSILON in production.rhs): new_rule = DottedRule(production, 1, first) else: new_rule = DottedRule(production, 0, first) if (new_rule not in closure): todo.add(new_rule) return frozenset(closure)
Fills out the entire closure based on some initial dotted rules. Args: rules - an iterable of DottedRules Returns: frozenset of DottedRules
codesearchnet
def gradient(poly): return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim))
Gradient of a polynomial. Args: poly (Poly) : polynomial to take gradient of. Returns: (Poly) : The resulting gradient. Examples: >>> q0, q1, q2 = chaospy.variable(3) >>> poly = 2*q0 + q1*q2 >>> print(chaospy.gradient(poly)) [2, q2, q1]
codesearchnet
def update_profiles(adapter): for case in adapter.cases(): if case.get('profile_path'): profiles = get_profiles(adapter, case['profile_path']) profiled_individuals = deepcopy(case['individuals']) for individual in profiled_individuals: ind_id = individual['ind_id'] try: profile = profiles[ind_id] individual['profile'] = profile except KeyError: LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}") updated_case = deepcopy(case) updated_case['individuals'] = profiled_individuals adapter.add_case(updated_case, update=True)
For all cases having vcf_path, update the profile string for the samples Args: adapter (MongoAdapter): Adapter to mongodb
juraj-google-style
def plot_pie(self, key="wall_time", minfract=0.05, **kwargs): timers = self.timers() n = len(timers) import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec fig = plt.gcf() gspec = GridSpec(n, 1) for idx, timer in enumerate(timers): ax = plt.subplot(gspec[idx, 0]) ax.set_title(str(timer)) timer.pie(ax=ax, key=key, minfract=minfract, show=False) return fig
Plot pie charts of the different timers. Args: key: Keyword used to extract data from timers. minfract: Don't show sections whose relative weight is less that minfract. Returns: `matplotlib` figure
juraj-google-style
def _raise_if_annotated(self, func): if (hasattr(func, ANNOTATED) and getattr(func, ANNOTATED)): msg = 'Functions decorated with {!r} should not be decorated with {!r}.\nPlease reverse the order of the decorators!'.format(self.__class__.__name__, Annotate.__name__) raise TypeError(msg)
Raise TypeError if a function is decorated with Annotate, as such functions cause visual bugs when decorated with Animate. Animate should be wrapped by Annotate instead. Args: func (function): Any callable. Raises: TypeError
codesearchnet
def __init__(self, mol): if isinstance(mol, Molecule): if not mol.is_ordered: raise ValueError("OpenBabel Molecule only supports ordered " "molecules.") obmol = ob.OBMol() obmol.BeginModify() for site in mol: coords = [c for c in site.coords] atomno = site.specie.Z obatom = ob.OBAtom() obatom.thisown = 0 obatom.SetAtomicNum(atomno) obatom.SetVector(*coords) obmol.AddAtom(obatom) del obatom obmol.ConnectTheDots() obmol.PerceiveBondOrders() obmol.SetTotalSpinMultiplicity(mol.spin_multiplicity) obmol.SetTotalCharge(mol.charge) obmol.Center() obmol.Kekulize() obmol.EndModify() self._obmol = obmol elif isinstance(mol, ob.OBMol): self._obmol = mol
Initializes with pymatgen Molecule or OpenBabel"s OBMol. Args: mol: pymatgen's Molecule or OpenBabel OBMol
juraj-google-style
def find_elements(driver, elem_path, by=CSS, timeout=TIMEOUT, poll_frequency=0.5): wait = WebDriverWait(driver, timeout, poll_frequency) return wait.until(EC.presence_of_all_elements_located((by, elem_path)))
Find and return all elements once located find_elements locates all elements on the page, waiting for up to timeout seconds. The elements, when located, are returned. If not located, a TimeoutException is raised. Args: driver (selenium webdriver or element): A driver or element elem_path (str): String used to located the element by (selenium By): Selenium By reference timeout (int): Selenium Wait timeout, in seconds poll_frequency (float): Selenium Wait polling frequency, in seconds Returns: list of elements: Selenium element Raises: TimeoutException: Raised when target element isn't located
codesearchnet
def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool: signature = base64.b64decode(signature) try: crypto.verify(amazon_cert, signature, request_body, 'sha1') result = True except crypto.Error: result = False return result
Verifies Alexa request signature. Args: amazon_cert: Pycrypto X509 Amazon certificate. signature: Base64 decoded Alexa request signature from Signature HTTP header. request_body: full HTTPS request body Returns: result: True if verification was successful, False if not.
codesearchnet
def get(self, name_or_uri): name_or_uri = quote(name_or_uri) return self._client.get(name_or_uri)
Get the role by its URI or Name. Args: name_or_uri: Can be either the Name or the URI. Returns: dict: Role
codesearchnet
def check_status(self, **kwargs): for work in self: work.check_status() if kwargs.pop("show", False): self.show_status(**kwargs)
Check the status of the works in self. Args: show: True to show the status of the flow. kwargs: keyword arguments passed to show_status
juraj-google-style
def line_number_above(): call_site_lineno = tf_inspect.stack()[1][2] if sys.version_info < (3, 8): return call_site_lineno - 1 else: with open(__file__, 'rb') as f: source_text = f.read().decode('utf-8') source_tree = ast.parse(source_text) prev_node = _find_preceding_ast_node(source_tree, call_site_lineno) return prev_node.lineno
Get lineno of the AST node immediately above this function's call site. It is assumed that there is no empty line(s) between the call site and the preceding AST node. Returns: The lineno of the preceding AST node, at the same level of the AST. If the preceding AST spans multiple lines: - In Python 3.8+, the lineno of the first line is returned. - In older Python versions, the lineno of the last line is returned.
github-repos
def increment(self, counter_name, delta): current_value = self.counters.get(counter_name, 0) new_value = current_value + delta self.counters[counter_name] = new_value return new_value
Increment counter value. Args: counter_name: counter name as String. delta: increment delta as Integer. Returns: new counter value.
juraj-google-style
def synthesize(self, duration, freqs_in_hz=[440.0]): freqs = np.array(freqs_in_hz) scaling = (1 / len(freqs)) sr = int(self.samplerate) cps = (freqs / sr) ts = ((duration / Seconds(1)) * sr) ranges = np.array([np.arange(0, (ts * c), c) for c in cps]) raw = (np.sin((ranges * (2 * np.pi))) * scaling).sum(axis=0) return AudioSamples(raw, self.samplerate)
Synthesize one or more sine waves Args: duration (numpy.timdelta64): The duration of the sound to be synthesized freqs_in_hz (list of float): Numbers representing the frequencies in hz that should be synthesized
codesearchnet
def _decontextualise_connection(self, connection): ctx = stack.top if ctx is not None and connection in ctx.ldap3_manager_connections: ctx.ldap3_manager_connections.remove(connection)
Remove a connection from the appcontext. Args: connection (ldap3.Connection): connection to remove from the appcontext
juraj-google-style
def ajax(cls, url, param={}, method='get'): param = urllib.parse.urlencode(param) if method.lower() == 'get': req = urllib.request.Request(url + '?' + param) elif method.lower() == 'post': param = param.encode('utf-8') req = urllib.request.Request(url, data=param) else: raise Exception("invalid method '{}' (GET/POST)".format(method)) rsp = urllib.request.urlopen(req) if rsp: rsp_json = rsp.read().decode('utf-8') rsp_dict = json.loads(rsp_json) return rsp_dict return None
Get info by ajax Args: url: string Returns: dict: json decoded into a dict
juraj-google-style
def _ParseMRUListExEntryValue( self, parser_mediator, registry_key, entry_index, entry_number, codepage='cp1252', **kwargs): value_string = '' value = registry_key.GetValueByName('{0:d}'.format(entry_number)) if value is None: parser_mediator.ProduceExtractionWarning( 'missing MRUListEx value: {0:d} in key: {1:s}.'.format( entry_number, registry_key.path)) elif not value.DataIsBinaryData(): logger.debug(( '[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: ' '{2:s}.').format(self.NAME, entry_number, registry_key.path)) elif value.data: utf16le_string_map = self._GetDataTypeMap('utf16le_string') context = dtfabric_data_maps.DataTypeMapContext() try: path = self._ReadStructureFromByteStream( value.data, 0, utf16le_string_map, context=context) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse MRUListEx entry value: {0:d} with error: ' '{1!s}').format(entry_number, exception)) return value_string path = path.rstrip('\x00') shell_item_data = value.data[context.byte_size:] if not shell_item_data: parser_mediator.ProduceExtractionWarning(( 'missing shell item in MRUListEx value: {0:d} in key: ' '{1:s}.').format(entry_number, registry_key.path)) value_string = 'Path: {0:s}'.format(path) else: shell_items_parser = shell_items.ShellItemsParser(registry_key.path) shell_items_parser.ParseByteStream( parser_mediator, shell_item_data, codepage=codepage) value_string = 'Path: {0:s}, Shell item: [{1:s}]'.format( path, shell_items_parser.CopyToPath()) return value_string
Parses the MRUListEx entry value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUListEx value. entry_index (int): MRUListEx entry index. entry_number (int): entry number. codepage (Optional[str]): extended ASCII string codepage. Returns: str: MRUList entry value.
juraj-google-style
def _parse_line(line): line, timestamp = line.rsplit(",", 1) line, command = line.rsplit(",", 1) path, username = line.rsplit(",", 1) return { "timestamp": timestamp.strip(), "command": command.strip(), "username": username.strip(), "path": path, }
Convert one line from the extended log to dict. Args: line (str): Line which will be converted. Returns: dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` \ keys. Note: Typical line looks like this:: /home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777 Filename may contain ``,`` character, so I am ``rsplitting`` the line from the end to the beginning.
juraj-google-style
def get_geostationary_bounding_box(geos_area, nb_points=50): xmax, ymax = get_geostationary_angle_extent(geos_area) x = np.cos(np.linspace(-np.pi, 0, nb_points / 2)) * (xmax - 0.001) y = -np.sin(np.linspace(-np.pi, 0, nb_points / 2)) * (ymax - 0.001) ll_x, ll_y, ur_x, ur_y = (np.array(geos_area.area_extent) / geos_area.proj_dict['h']) x = np.clip(np.concatenate([x, x[::-1]]), min(ll_x, ur_x), max(ll_x, ur_x)) y = np.clip(np.concatenate([y, -y]), min(ll_y, ur_y), max(ll_y, ur_y)) return _lonlat_from_geos_angle(x, y, geos_area)
Get the bbox in lon/lats of the valid pixels inside *geos_area*. Args: nb_points: Number of points on the polygon
juraj-google-style
def FromString(cls, desc): if language.stream is None: language.get_language() parse_exp = Optional(time_interval('time') - Literal(':').suppress()) - language.stream('stream') - Literal('=').suppress() - number('value') try: data = parse_exp.parseString(desc) time = 0 if 'time' in data: time = data['time'][0] return SimulationStimulus(time, data['stream'][0], data['value']) except (ParseException, ParseSyntaxException): raise ArgumentError("Could not parse stimulus descriptor", descriptor=desc)
Create a new stimulus from a description string. The string must have the format: [time: ][system ]input X = Y where X and Y are integers. The time, if given must be a time_interval, which is an integer followed by a time unit such as second(s), minute(s), etc. Args: desc (str): A string description of the stimulus. Returns: SimulationStimulus: The parsed stimulus object.
juraj-google-style
def _write_input(self, input_dir='.'): with open(os.path.join(input_dir, self.input_file), 'wt', encoding='utf-8') as inp: for (k, v) in self.control_params.items(): inp.write('{} {}\n'.format(k, self._format_param_val(v))) for (idx, mol) in enumerate(self.mols): filename = os.path.join(input_dir, '{}.{}'.format(idx, self.control_params['filetype'])).encode('ascii') if (self.control_params['filetype'] == 'pdb'): self.write_pdb(mol, filename, num=(idx + 1)) else: a = BabelMolAdaptor(mol) pm = pb.Molecule(a.openbabel_mol) pm.write(self.control_params['filetype'], filename=filename, overwrite=True) inp.write('\n') inp.write('structure {}.{}\n'.format(os.path.join(input_dir, str(idx)), self.control_params['filetype'])) for (k, v) in self.param_list[idx].items(): inp.write(' {} {}\n'.format(k, self._format_param_val(v))) inp.write('end structure\n')
Write the packmol input file to the input directory. Args: input_dir (string): path to the input directory
codesearchnet
async def get_all(self, direction: msg.StreamDirection=msg.StreamDirection.Forward, from_position: Optional[Union[(msg.Position, msg._PositionSentinel)]]=None, max_count: int=100, resolve_links: bool=True, require_master: bool=False, correlation_id: uuid.UUID=None): correlation_id = correlation_id cmd = convo.ReadAllEvents(msg.Position.for_direction(direction, from_position), max_count, resolve_links, require_master, direction=direction, credentials=self.credential) result = (await self.dispatcher.start_conversation(cmd)) return (await result)
Read a range of events from the whole database. Args: direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_position (optional): The position to read from. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events >>> async for event in conn.get_all(max_count=5): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get_all( max_count=10, direction=StreamDirection.Backward ): >>> print(event)
codesearchnet
def diff(self, **kwargs): path = '%s/%s/diff' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
Generate the commit diff. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the diff could not be retrieved Returns: list: The changes done in this commit
juraj-google-style
def get_colorscale(cmap, levels=None, cmin=None, cmax=None): ncolors = (levels if isinstance(levels, int) else None) if isinstance(levels, list): ncolors = (len(levels) - 1) if (isinstance(cmap, list) and (len(cmap) != ncolors)): raise ValueError(('The number of colors in the colormap must match the intervals defined in the color_levels, expected %d colors found %d.' % (ncolors, len(cmap)))) try: palette = process_cmap(cmap, ncolors) except Exception as e: colorscale = colors.PLOTLY_SCALES.get(cmap) if (colorscale is None): raise e return colorscale if isinstance(levels, int): colorscale = [] scale = np.linspace(0, 1, (levels + 1)) for i in range((levels + 1)): if (i == 0): colorscale.append((scale[0], palette[i])) elif (i == levels): colorscale.append((scale[(- 1)], palette[(- 1)])) else: colorscale.append((scale[i], palette[(i - 1)])) colorscale.append((scale[i], palette[i])) return colorscale elif isinstance(levels, list): (palette, (cmin, cmax)) = color_intervals(palette, levels, clip=(cmin, cmax)) return colors.make_colorscale(palette)
Converts a cmap spec to a plotly colorscale Args: cmap: A recognized colormap by name or list of colors levels: A list or integer declaring the color-levels cmin: The lower bound of the color range cmax: The upper bound of the color range Returns: A valid plotly colorscale
codesearchnet
def _on_change(self, field_updates: Dict[utils.KeyPath, FieldUpdate]):
Event that is triggered when field values in the subtree are updated. This event will be called * On per-field basis when object is modified via attribute. * In batch when multiple fields are modified via `rebind` method. When a field in an object tree is updated, all ancestors' `_on_change` event will be triggered in order, from the nearest one to furthest one. Args: field_updates: Updates made to the subtree. Key path is relative to current object.
github-repos
def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None): response = self.setObsoletedByResponse( pid, obsoletedByPid, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
See Also: setObsoletedByResponse() Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns:
juraj-google-style
def add_value(self, value_type, value_min, value_max): if len(self._employers) > 0: self._logger.log( 'warn', 'Adding a value after employers have been created' ) value = (value_type, (value_min, value_max)) self._value_ranges.append(value) self._limit = self._num_employers*len(self._value_ranges) self._logger.log( 'debug', 'Limit set to {}'.format(self._limit) )
Add a tunable value to the ABC (fitness function must be configured to handle it) Args: value_type (string): type of the value, 'int' or 'float' value_min (int or float): minimum bound for the value value_max (int or float): maximum bound for the value Returns: None
juraj-google-style
def Verify(self, mempool): for descriptor in self.Descriptors: if not descriptor.Verify(): return False return super(StateTransaction, self).Verify(mempool)
Verify the transaction. Args: mempool: Returns: bool: True if verified. False otherwise.
juraj-google-style
def stat_float_times(cls, newvalue=None): if newvalue is not None: cls._stat_float_times = bool(newvalue) return cls._stat_float_times
Determine whether a file's time stamps are reported as floats or ints. Calling without arguments returns the current value. The value is shared by all instances of FakeOsModule. Args: newvalue: If `True`, mtime, ctime, atime are reported as floats. Otherwise, they are returned as ints (rounding down).
juraj-google-style
def CollectFromKnowledgeBase(cls, knowledge_base): for preprocess_plugin in cls._knowledge_base_plugins.values(): logger.debug('Running knowledge base preprocessor plugin: {0:s}'.format(preprocess_plugin.__class__.__name__)) try: preprocess_plugin.Collect(knowledge_base) except errors.PreProcessFail as exception: logger.warning('Unable to collect knowledge base value with error: {0!s}'.format(exception))
Collects values from knowledge base values. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information.
codesearchnet
def find_iteration( url: Union[methods, str], itermode: Optional[str] = None, iterkey: Optional[str] = None, ) -> Tuple[str, str]: if isinstance(url, methods): if not itermode: itermode = url.value[1] if not iterkey: iterkey = url.value[2] if not iterkey or not itermode: raise ValueError("Iteration not supported for: {}".format(url)) elif itermode not in ITERMODE: raise ValueError("Iteration not supported for: {}".format(itermode)) return itermode, iterkey
Find iteration mode and iteration key for a given :class:`slack.methods` Args: url: :class:`slack.methods` or string url itermode: Custom iteration mode iterkey: Custom iteration key Returns: :py:class:`tuple` (itermode, iterkey)
juraj-google-style
def init_from_storage_write_to_datastore(self, batch_size=100, allowed_epsilon=None, skip_image_ids=None, max_num_images=None): if allowed_epsilon is None: allowed_epsilon = copy.copy(DEFAULT_EPSILON) self._dataset_batches = {} images = self._read_image_list(skip_image_ids) if max_num_images: images = images[:max_num_images] for batch_idx, batch_start in enumerate(range(0, len(images), batch_size)): batch = images[batch_start:batch_start+batch_size] batch_id = DATASET_BATCH_ID_PATTERN.format(batch_idx) batch_epsilon = allowed_epsilon[batch_idx % len(allowed_epsilon)] self.add_batch(batch_id, {'epsilon': batch_epsilon}) for image_id, image_path in batch: self.add_image(batch_id, image_id, {'dataset_image_id': os.path.basename(image_path)[:-4], 'image_path': image_path}) self.write_to_datastore()
Initializes dataset batches from the list of images in the datastore. Args: batch_size: batch size allowed_epsilon: list of allowed epsilon or None to use default skip_image_ids: list of image ids to skip max_num_images: maximum number of images to read
juraj-google-style
def orient_directed_graph(self, data, graph): warnings.warn("The algorithm is ran on the skeleton of the given graph.") return self.orient_undirected_graph(data, nx.Graph(graph))
Run the algorithm on a directed_graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.DiGraph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution on the given skeleton. .. warning:: The algorithm is ran on the skeleton of the given graph.
juraj-google-style
def write_serializable_array(self, array): if array is None: self.write_byte(0) else: self.write_var_int(len(array)) for item in array: item.Serialize(self)
Write an array of serializable objects to the stream. Args: array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin
juraj-google-style
def get_splitter_instance(split_type): if split_type is None: return NoneSplitter() elif split_type == 'Line': return LineSplitter() elif split_type == 'RecordIO': return RecordIOSplitter() else: raise ValueError('Invalid Split Type: %s' % split_type)
Return an Instance of :class:`sagemaker.local.data.Splitter` according to the specified `split_type`. Args: split_type (str): either 'Line' or 'RecordIO'. Can be left as None to signal no data split will happen. Returns :class:`sagemaker.local.data.Splitter`: an Instance of a Splitter
juraj-google-style
def GetArtifacts(self, os_name=None, name_list=None, source_type=None, exclude_dependents=False, provides=None, reload_datastore_artifacts=False): self._CheckDirty(reload_datastore_artifacts=reload_datastore_artifacts) results = set() for artifact in itervalues(self._artifacts): if (os_name and artifact.supported_os and (os_name not in artifact.supported_os)): continue if (name_list and (artifact.name not in name_list)): continue if source_type: source_types = [c.type for c in artifact.sources] if (source_type not in source_types): continue if (exclude_dependents and GetArtifactPathDependencies(artifact)): continue if (not provides): results.add(artifact) else: for provide_string in artifact.provides: if (provide_string in provides): results.add(artifact) break return results
Retrieve artifact classes with optional filtering. All filters must match for the artifact to be returned. Args: os_name: string to match against supported_os name_list: list of strings to match against artifact names source_type: rdf_artifacts.ArtifactSource.SourceType to match against source_type exclude_dependents: if true only artifacts with no dependencies will be returned provides: return the artifacts that provide these dependencies reload_datastore_artifacts: If true, the data store sources are queried for new artifacts. Returns: set of artifacts matching filter criteria
codesearchnet
def is_time_included(self, time): if (self._timestamps_data is None): self._calculate_timestamps() return (time.moy in self._timestamps_data)
Check if time is included in analysis period. Return True if time is inside this analysis period, otherwise return False Args: time: A DateTime to be tested Returns: A boolean. True if time is included in analysis period
codesearchnet
def add_message(self, message_type): name = self.__normalized_name(message_type) if (name not in self.__schemas): self.__schemas[name] = None schema = self.__message_to_schema(message_type) self.__schemas[name] = schema return name
Add a new message. Args: message_type: protorpc.message.Message class to be parsed. Returns: string, The JSON Schema id. Raises: KeyError if the Schema id for this message_type would collide with the Schema id of a different message_type that was already added.
codesearchnet
def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn): try: flow = self.session.client('ec2', region) flow.create_flow_logs( ResourceIds=[vpc_id], ResourceType='VPC', TrafficType='ALL', LogGroupName=vpc_id, DeliverLogsPermissionArn=iam_role_arn ) fvpc = VPC.get(vpc_id) fvpc.set_property('vpc_flow_logs_status', 'ACTIVE') self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id)) auditlog( event='vpc_flow_logs.create_vpc_flow', actor=self.ns, data={ 'account': account.account_name, 'region': region, 'vpcId': vpc_id, 'arn': iam_role_arn } ) except Exception: self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format( account, region, vpc_id ))
Create a new VPC Flow log Args: account (:obj:`Account`): Account to create the flow in region (`str`): Region to create the flow in vpc_id (`str`): ID of the VPC to create the flow for iam_role_arn (`str`): ARN of the IAM role used to post logs to the log group Returns: `None`
juraj-google-style
def _process_counter_example(self, mma, w_string): if len(w_string) == 1: self.observation_table.smi_vector.append(w_string) for exp in self.observation_table.em_vector: self._fill_table_entry(w_string, exp) diff = len(w_string) same = 0 membership_answer = self._membership_query(w_string) while True: i = (same + diff) / 2 access_string = self._run_in_hypothesis(mma, w_string, i) if membership_answer != self._membership_query(access_string + w_string[i:]): diff = i else: same = i if diff - same == 1: break access_string = self._run_in_hypothesis(mma, w_string, diff - 1) wrong_transition = access_string + w_string[diff - 1] if wrong_transition not in self.observation_table.smi_vector: self.observation_table.smi_vector.append(wrong_transition) for exp in self.observation_table.em_vector: self._fill_table_entry(wrong_transition, exp) return exp = w_string[diff:] self.observation_table.em_vector.append(exp) for row in self.observation_table.sm_vector + self.observation_table.smi_vector: self._fill_table_entry(row, exp)
Process a counterexample in the Rivest-Schapire way. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed Return: None
juraj-google-style
def set_unrecognized_field(self, key, value, variant): if not isinstance(variant, Variant): raise TypeError('Variant type %s is not valid.' % variant) self.__unrecognized_fields[key] = value, variant
Set an unrecognized field, used when decoding a message. Args: key: The name or number used to refer to this unknown value. value: The value of the field. variant: Type information needed to interpret the value or re-encode it. Raises: TypeError: If the variant is not an instance of messages.Variant.
juraj-google-style
def post(self, url, headers=None, params=None, **kwargs): if (len(kwargs) > 1): raise InvalidArgumentsError('Too many extra args ({} > 1)'.format(len(kwargs))) if kwargs: kwarg = next(iter(kwargs)) if (kwarg not in ('json', 'data')): raise InvalidArgumentsError(('Invalid kwarg: ' + kwarg)) resp = self.session.post(url, headers=headers, params=params, **kwargs) resp.raise_for_status() return _to_json(resp)
Send a JSON POST request with the given request headers, additional URL query parameters, and the given JSON in the request body. The extra query parameters are merged with any which already exist in the URL. The 'json' and 'data' parameters may not both be given. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional) json: json to send in the body of the Request. This must be a JSON-serializable object. (optional) data: raw request body data. May be a dictionary, list of tuples, bytes, or file-like object to send in the body of the Request. (optional)
codesearchnet
def _get_key_counter(seed, alg): if alg == Algorithm.AUTO_SELECT.value: key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(seed) elif alg == Algorithm.PHILOX.value: key, counter = _philox_scramble_seed(seed) elif alg == Algorithm.THREEFRY.value: key = array_ops.reshape(_uint32s_to_uint64(math_ops.cast(seed, dtypes.uint32)), [1]) counter = array_ops.zeros([1], dtypes.uint64) else: raise ValueError(unsupported_alg_error_msg(alg)) return (key, counter)
Calculates the key and counter to pass to raw RNG ops. This function calculates the key and counter that will be passed to the raw RNG ops like `StatelessRandomUniformV2`. Depending on the input `alg`, the key and counter may be scrambled or copied from `seed`. If `alg` is `"auto_select"`, the key and counter will be determined at runtime based on device type. Args: seed: An integer tensor of shape [2]. The seed to calculate the key and counter from. alg: The RNG algorithm. See `tf.random.stateless_uniform` for an explanation. Returns: A pair (key, counter) suitable for V2 stateless RNG ops like `StatelessRandomUniformV2`.
github-repos
def export_kml_file(self): kml = create_elem('kml') kml.Document = create_elem('Document') for place in sorted(self.values(), key=(lambda x: x.name)): kml.Document.append(place.tokml()) return etree.ElementTree(kml)
Generate KML element tree from ``Placemarks``. Returns: etree.ElementTree: KML element tree depicting ``Placemarks``
codesearchnet
def reviews(self, **kwargs): path = self._get_id_path('reviews') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the reviews for a particular movie id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def render_template(cmd_derived_from_alias, pos_args_table): try: cmd_derived_from_alias = normalize_placeholders(cmd_derived_from_alias, inject_quotes=True) template = jinja.Template(cmd_derived_from_alias) rendered = shlex.split(template.render(pos_args_table)) if '' in rendered: check_runtime_errors(cmd_derived_from_alias, pos_args_table) return rendered except Exception as exception: if isinstance(exception, CLIError): raise split_exception_message = str(exception).split() error_index = split_exception_message[-1] if error_index.isdigit(): split_exception_message.insert(-1, 'index') error_msg = RENDER_TEMPLATE_ERROR.format(' '.join(split_exception_message), cmd_derived_from_alias) error_msg += '\n{}^'.format(' ' * (len(error_msg) - len(cmd_derived_from_alias) + int(error_index) - 1)) else: exception_str = str(exception).replace('"{{', '}}').replace('}}"', '}}') error_msg = RENDER_TEMPLATE_ERROR.format(cmd_derived_from_alias, exception_str) raise CLIError(error_msg)
Render cmd_derived_from_alias as a Jinja template with pos_args_table as the arguments. Args: cmd_derived_from_alias: The string to be injected with positional arguemnts. pos_args_table: The dictionary used to rendered. Returns: A processed string with positional arguments injected.
juraj-google-style