code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _read_from_hdx(self, object_type, value, fieldname='id', action=None, **kwargs): if (not fieldname): raise HDXError(('Empty %s field name!' % object_type)) if (action is None): action = self.actions()['show'] data = {fieldname: value} data.update(kwargs) try: result = self.configuration.call_remoteckan(action, data) return (True, result) except NotFound: return (False, ('%s=%s: not found!' % (fieldname, value))) except Exception as e: raisefrom(HDXError, ('Failed when trying to read: %s=%s! (POST)' % (fieldname, value)), e)
Makes a read call to HDX passing in given parameter. Args: object_type (str): Description of HDX object type (for messages) value (str): Value of HDX field fieldname (str): HDX field name. Defaults to id. action (Optional[str]): Replacement CKAN action url to use. Defaults to None. **kwargs: Other fields to pass to CKAN. Returns: Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)
codesearchnet
def pprint_table(table, out=sys.stdout, rstrip=False): def max_width_col(table, col_idx): '\n Get the maximum width of the given column index\n ' return max([len(row[col_idx]) for row in table]) if rstrip: for (row_idx, row) in enumerate(table): table[row_idx] = [c.rstrip() for c in row] col_paddings = [] ncols = len(table[0]) for i in range(ncols): col_paddings.append(max_width_col(table, i)) for row in table: out.write(row[0].ljust((col_paddings[0] + 1))) for i in range(1, len(row)): col = row[i].rjust((col_paddings[i] + 2)) out.write(col) out.write('\n')
Prints out a table of data, padded for alignment Each row must have the same number of columns. Args: table: The table to print. A list of lists. out: Output stream (file-like object) rstrip: if True, trailing withespaces are removed from the entries.
codesearchnet
def confirm(prompt='Really?', color='warning', yes_values=('y', 'yes'), abort_on_unconfirmed=False, abort_options=None): if isinstance(yes_values, str): yes_values = (yes_values,) prompt = '{prompt} [{yes_value}/N] '.format(prompt=prompt, yes_value=yes_values[0]) if color: prompt = printer.colorize(prompt, color=color) try: answer = input(prompt) except KeyboardInterrupt: print() confirmed = False else: answer = answer.strip().lower() confirmed = (answer in yes_values) do_abort_on_unconfirmed = ((not confirmed) and (bool(abort_on_unconfirmed) or ((abort_on_unconfirmed == 0) and (abort_on_unconfirmed is not False)))) if do_abort_on_unconfirmed: if (abort_options is None): abort_options = {} if (abort_on_unconfirmed is True): abort_options.setdefault('return_code', 0) elif isinstance(abort_on_unconfirmed, int): abort_options.setdefault('return_code', abort_on_unconfirmed) elif isinstance(abort_on_unconfirmed, str): abort_options.setdefault('message', abort_on_unconfirmed) else: abort_options.setdefault('return_code', 0) abort(**abort_options) return confirmed
Prompt for confirmation. Confirmation can be aborted by typing in a no value instead of one of the yes values or with Ctrl-C. Args: prompt (str): Prompt to present user ["Really?"] color (string|Color|bool) Color to print prompt string; can be ``False`` or ``None`` to print without color ["yellow"] yes_values (list[str]): Values user must type in to confirm [("y", "yes")] abort_on_unconfirmed (bool|int|str): When user does *not* confirm: - If this is an integer, print "Aborted" to stdout if it's 0 or to stderr if it's not 0 and then exit with this code - If this is a string, print it to stdout and exit with code 0 - If this is ``True`` (or any other truthy value), print "Aborted" to stdout and exit with code 0 abort_options (dict): Options to pass to :func:`abort` when not confirmed (these options will override any options set via ``abort_on_unconfirmed``)
codesearchnet
def run_processes(self, procdetails: List[ProcessDetails], subproc_run_timeout_sec: float=1, stop_event_timeout_ms: int=1000, kill_timeout_sec: float=5) -> None: def cleanup(): self.debug('atexit function called: cleaning up') for pmgr_ in self.process_managers: pmgr_.stop() atexit.register(cleanup) self.process_managers = [] n = len(procdetails) for (i, details) in enumerate(procdetails): pmgr = ProcessManager(details, (i + 1), n, kill_timeout_sec=kill_timeout_sec, debugging=self.debugging) self.process_managers.append(pmgr) for pmgr in self.process_managers: pmgr.start() self.info('All started') something_running = True stop_requested = False subproc_failed = False while (something_running and (not stop_requested) and (not subproc_failed)): if (win32event.WaitForSingleObject(self.h_stop_event, stop_event_timeout_ms) == win32event.WAIT_OBJECT_0): stop_requested = True self.info('Stop requested; stopping') else: something_running = False for pmgr in self.process_managers: if subproc_failed: break try: retcode = pmgr.wait(timeout_s=subproc_run_timeout_sec) if (retcode != 0): subproc_failed = True except subprocess.TimeoutExpired: something_running = True for pmgr in self.process_managers: pmgr.stop() self.info('All stopped')
Run multiple child processes. Args: procdetails: list of :class:`ProcessDetails` objects (q.v.) subproc_run_timeout_sec: time (in seconds) to wait for each process when polling child processes to see how they're getting on (default ``1``) stop_event_timeout_ms: time to wait (in ms) while checking the Windows stop event for this service (default ``1000``) kill_timeout_sec: how long (in seconds) will we wait for the subprocesses to end peacefully, before we try to kill them? .. todo:: cardinal_pythonlib.winservice.WindowsService: NOT YET IMPLEMENTED: Windows service autorestart
codesearchnet
def CheckTestDependencies(self, verbose_output=True): if not self.CheckDependencies(verbose_output=verbose_output): return False print('Checking availability and versions of test dependencies.') check_result = True for dependency in sorted( self._test_dependencies.values(), key=lambda dependency: dependency.name): result, status_message = self._CheckPythonModule(dependency) if not result: check_result = False self._PrintCheckDependencyStatus( dependency, result, status_message, verbose_output=verbose_output) if check_result and not verbose_output: print('[OK]') print('') return check_result
Checks the availability of the dependencies when running tests. Args: verbose_output (Optional[bool]): True if output should be verbose. Returns: bool: True if the dependencies are available, False otherwise.
juraj-google-style
def test_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None): inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model) with backend.eager_learning_phase_scope(0): outs, total_loss, output_losses, masks = _model_loss(model, inputs, targets, sample_weights=sample_weights, training=False, output_loss_metrics=output_loss_metrics) if not isinstance(outs, list): outs = [outs] metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights=sample_weights, masks=masks) total_loss = nest.flatten(total_loss) return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}
Calculates the loss for one input batch. Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified.
github-repos
def init(scope): class SinonGlobals(object): pass global CPSCOPE CPSCOPE = SinonGlobals() funcs = [obj for obj in scope.values() if isinstance(obj, FunctionType)] for func in funcs: setattr(CPSCOPE, func.__name__, func) return CPSCOPE
Copy all values of scope into the class SinonGlobals Args: scope (eg. locals() or globals()) Return: SinonGlobals instance
juraj-google-style
def find_id_in_folder(self, name, parent_folder_id=0): if ((name is None) or (len(name) == 0)): return parent_folder_id offset = 0 resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name']) total = int(resp['total_count']) while (offset < total): found = self.__find_name(resp, name) if (found is not None): return found offset += int(len(resp['entries'])) resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name']) return None
Find a folder or a file ID from its name, inside a given folder. Args: name (str): Name of the folder or the file to find. parent_folder_id (int): ID of the folder where to search. Returns: int. ID of the file or folder found. None if not found. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
codesearchnet
def _render_text(self, text, preformatted=False): tag = 'pre' if preformatted else 'div' self._segments.append('<%s>%s</%s>' % (tag, HtmlBuilder._format(text), tag))
Renders an HTML formatted text block with the specified text. Args: text: the text to render preformatted: whether the text should be rendered as preformatted
juraj-google-style
def FindByName(cls, name): if name.endswith('.py'): return cls.LoadFromFile(name) reg = ComponentRegistry() for (_name, tile) in reg.load_extensions('iotile.virtual_tile', name_filter=name, class_filter=VirtualTile): return tile raise ArgumentError('VirtualTile could not be found by name', name=name)
Find an installed VirtualTile by name. This function searches for installed virtual tiles using the pkg_resources entry_point `iotile.virtual_tile`. If name is a path ending in .py, it is assumed to point to a module on disk and loaded directly rather than using pkg_resources. Args: name (str): The name of the tile to search for. Returns: VirtualTile class: A virtual tile subclass that can be instantiated to create a virtual tile.
codesearchnet
def _find_path_between(self, p: GridQubit, q: GridQubit, used: Set[GridQubit]) -> Optional[List[GridQubit]]: def assemble_path(n: GridQubit, parent: Dict[(GridQubit, GridQubit)]): path = [n] while (n in parent): n = parent[n] path.append(n) return path other = {p: q, q: p} parents = {p: dict(), q: dict()} visited = {p: set(), q: set()} queue = collections.deque([(p, p), (q, q)]) while queue: (n, s) = queue.popleft() for n_adj in self._c_adj[n]: if (n_adj in visited[other[s]]): path_s = assemble_path(n, parents[s])[(- 2)::(- 1)] path_other = assemble_path(n_adj, parents[other[s]])[:(- 1)] path = (path_s + path_other) if (s == q): path.reverse() return path elif ((n_adj not in used) and (n_adj not in visited[s])): queue.append((n_adj, s)) visited[s].add(n_adj) parents[s][n_adj] = n return None
Searches for continuous sequence between two qubits. This method runs two BFS algorithms in parallel (alternating variable s in each iteration); the first one starting from qubit p, and the second one starting from qubit q. If at some point a qubit reachable from p is found to be on the set of qubits already reached from q (or vice versa), the search is stopped and new path returned. Args: p: The first qubit, start of the sequence. q: The second qubit, end of the sequence. used: Set of forbidden qubits which cannot appear on the sequence. Returns: Continues sequence of qubits with new path between p and q, or None if no path was found.
codesearchnet
def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True): num_joints = len(joint_positions) if not sync_last: num_joints -= 1 for i in range(num_joints): if simulate: p.setJointMotorControl2( self.ik_robot, self.actual[i], p.POSITION_CONTROL, targetVelocity=0, targetPosition=joint_positions[i], force=500, positionGain=0.5, velocityGain=1., ) else: p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])
Force the internal robot model to match the provided joint angles. Args: joint_positions (list): a list or flat numpy array of joint positions. simulate (bool): If True, actually use physics simulation, else write to physics state directly. sync_last (bool): If False, don't sync the last joint angle. This is useful for directly controlling the roll at the end effector.
juraj-google-style
def Calls(self, conditions=None): results = set() if conditions is None: conditions = [None] for condition in conditions: for c in self.Match(*condition): results.update(self._registry.get(c, [])) return results
Find the methods that evaluate data that meets this condition. Args: conditions: A tuple of (artifact, os_name, cpe, label) Returns: A list of methods that evaluate the data.
juraj-google-style
def render_text(text, preformatted=False): builder = HtmlBuilder() builder._render_text(text, preformatted=preformatted) return builder._to_html()
Renders an HTML formatted text block with the specified text. Args: text: the text to render preformatted: whether the text should be rendered as preformatted Returns: The formatted HTML.
juraj-google-style
def describe_enum(enum_definition): enum_descriptor = EnumDescriptor() enum_descriptor.name = enum_definition.definition_name().split('.')[-1] values = [] for number in enum_definition.numbers(): value = enum_definition.lookup_by_number(number) values.append(describe_enum_value(value)) if values: enum_descriptor.values = values return enum_descriptor
Build descriptor for Enum class. Args: enum_definition: Enum class to provide descriptor for. Returns: Initialized EnumDescriptor instance describing the Enum class.
juraj-google-style
def are_you_sure(flag_changed, evt, parent=None, title="File has been changed", msg="Are you sure you want to exit?"): if flag_changed: r = QMessageBox.question(parent, title, msg, QMessageBox.Yes|QMessageBox.No, QMessageBox.Yes) if r != QMessageBox.Yes: evt.ignore()
"Are you sure you want to exit" question dialog. If flag_changed, shows question dialog. If answer is not yes, calls evt.ignore() Arguments: flag_changed evt -- QCloseEvent instance parent=None -- parent form, used to centralize the question dialog at title -- title for question dialog msg -- text of question dialog Returns True or False. True means: "yes, I want to exit"
juraj-google-style
def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None): with warnings.catch_warnings(): warnings.simplefilter('ignore') if (cloud is None): return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint) return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
Train model. The output can be used for batch prediction or online deployment. Args: input_dir: A directory path containing preprocessed results. Can be local or GCS path. batch_size: size of batch used for training. max_steps: number of steps to train. output_dir: The output directory to use. Can be local or GCS path. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud. If None, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait.
codesearchnet
def _process_from_queue(self, queue): now = time.time() log = self.log.bind(queue=queue) batch_size = self._get_queue_batch_size(queue) (queue_lock, failed_to_acquire) = self._get_queue_lock(queue, log) if failed_to_acquire: return ([], (- 1)) later = (time.time() + self.config['LOCK_RETRY']) task_ids = self.scripts.zpoppush(self._key(QUEUED, queue), self._key(ACTIVE, queue), batch_size, None, now, if_exists=('add', self._key(SCHEDULED, queue), later, 'min'), on_success=('update_sets', queue, self._key(QUEUED), self._key(ACTIVE), self._key(SCHEDULED))) log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE, qty=len(task_ids)) processed_count = 0 if task_ids: processed_count = self._process_queue_tasks(queue, queue_lock, task_ids, now, log) if queue_lock: queue_lock.release() log.debug('released swq lock') return (task_ids, processed_count)
Internal method to process a task batch from the given queue. Args: queue: Queue name to be processed Returns: Task IDs: List of tasks that were processed (even if there was an error so that client code can assume the queue is empty if nothing was returned) Count: The number of tasks that were attempted to be executed or -1 if the queue lock couldn't be acquired.
codesearchnet
def create_domain(provider, context, **kwargs): session = get_session(provider.region) client = session.client('route53') domain = kwargs.get('domain') if (not domain): logger.error('domain argument or BaseDomain variable not provided.') return False zone_id = create_route53_zone(client, domain) return {'domain': domain, 'zone_id': zone_id}
Create a domain within route53. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded.
codesearchnet
def get_actions(self, issues): actions = [] try: for issue in issues: action_item = self.determine_action(issue) if (action_item['action'] != AuditActions.IGNORE): action_item['owners'] = self.get_contacts(issue) actions.append(action_item) finally: db.session.rollback() return actions
Returns a list of actions to executed Args: issues (`list` of :obj:`RequiredTagsIssue`): List of issues Returns: `list` of `dict`
codesearchnet
def __init__(self, key, attributes): self._attributes_normalized = {} self._set_attributes(attributes if attributes else {}) self._key_normalized = '' self._set_key(key)
Object initialization Args: key: String name of an attributes key that represents the unique identify of the request attributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values
juraj-google-style
def FillDeviceAttributes(device, descriptor): attributes = HidAttributes() result = hid.HidD_GetAttributes(device, ctypes.byref(attributes)) if (not result): raise ctypes.WinError() buf = ctypes.create_string_buffer(1024) result = hid.HidD_GetProductString(device, buf, 1024) if (not result): raise ctypes.WinError() descriptor.vendor_id = attributes.VendorID descriptor.product_id = attributes.ProductID descriptor.product_string = ctypes.wstring_at(buf)
Fill out the attributes of the device. Fills the devices HidAttributes and product string into the descriptor. Args: device: A handle to the open device descriptor: The DeviceDescriptor to populate with the attributes. Returns: None Raises: WindowsError when unable to obtain attributes or product string.
codesearchnet
def FromJsonString(self, value): if ((len(value) < 1) or (value[(- 1)] != 's')): raise ParseError('Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if (pos == (- 1)): self.seconds = int(value[:(- 1)]) self.nanos = 0 else: self.seconds = int(value[:pos]) if (value[0] == '-'): self.nanos = int(round((float('-0{0}'.format(value[pos:(- 1)])) * 1000000000.0))) else: self.nanos = int(round((float('0{0}'.format(value[pos:(- 1)])) * 1000000000.0))) except ValueError: raise ParseError("Couldn't parse duration: {0}.".format(value))
Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems.
codesearchnet
def update_score_summary(sender, **kwargs): score = kwargs['instance'] try: score_summary = ScoreSummary.objects.get( student_item=score.student_item ) score_summary.latest = score if score.reset: score_summary.highest = score elif score.to_float() > score_summary.highest.to_float(): score_summary.highest = score score_summary.save() except ScoreSummary.DoesNotExist: ScoreSummary.objects.create( student_item=score.student_item, highest=score, latest=score, ) except DatabaseError as err: logger.exception( u"Error while updating score summary for student item {}" .format(score.student_item) )
Listen for new Scores and update the relevant ScoreSummary. Args: sender: not used Kwargs: instance (Score): The score model whose save triggered this receiver.
juraj-google-style
def tseries_between(self, tstart=None, tend=None): if (self.tseries is None): return None ndat = self.tseries.shape[0] if (tstart is None): istart = 0 else: igm = 0 igp = (ndat - 1) while ((igp - igm) > 1): istart = (igm + ((igp - igm) if (self.tseries.iloc[istart]['t'] >= tstart): igp = istart else: igm = istart istart = igp if (tend is None): iend = None else: igm = 0 igp = (ndat - 1) while ((igp - igm) > 1): iend = (igm + ((igp - igm) if (self.tseries.iloc[iend]['t'] > tend): igp = iend else: igm = iend iend = (igm + 1) return self.tseries.iloc[istart:iend]
Return time series data between requested times. Args: tstart (float): starting time. Set to None to start at the beginning of available data. tend (float): ending time. Set to None to stop at the end of available data. Returns: :class:`pandas.DataFrame`: slice of :attr:`tseries`.
codesearchnet
def _get_val_list(obj, path_list, reverse=False): try: y = getattr(obj, path_list[0]) except AttributeError: return [] if (len(path_list) == 1): return [y] else: val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)] if reverse: val_list.reverse() return val_list
Extract values from nested objects by attribute names. Objects contain attributes which are named references to objects. This will descend down a tree of nested objects, starting at the given object, following the given path. Args: obj: object Any type of object path_list: list Attribute names reverse: bool Reverse the list of values before concatenation. Returns: list of objects
codesearchnet
def print_object_results(obj_result): print_results_header(obj_result.object_id, obj_result.is_valid) if obj_result.warnings: print_warning_results(obj_result, 1) if obj_result.errors: print_schema_results(obj_result, 1)
Print the results of validating an object. Args: obj_result: An ObjectValidationResults instance.
codesearchnet
def SignFile(self, in_filename, out_filename=None): if (out_filename is None): out_filename = ('%s.signed' % in_filename) args = ['-certs', self.cert, '-key', self.key, '-n', self.application, '-t', 'http: try: output_log = io.StringIO() ossl = pexpect.spawn('osslsigncode', args) ossl.logfile_read = output_log ossl.expect('Enter PEM pass phrase') ossl.sendline(self.password) ossl.wait() except pexpect.ExceptionPexpect: output_log.seek(0) logging.exception(output_log.read()) raise if (not os.path.exists(out_filename)): raise SigningError(('Expected output %s not created' % out_filename)) try: subprocess.check_call(['osslsigncode', 'verify', '-in', out_filename]) except subprocess.CalledProcessError: logging.exception('Bad signature verification on %s', out_filename) raise SigningError(('Bad signature verification on %s' % out_filename)) return out_filename
Sign a file using osslsigncode. Args: in_filename: file to read from out_filename: file to output to, if none we output to the same filename as the input with a .signed suffix. Returns: output filename string Raises: pexpect.ExceptionPexpect: if the expect invocation of osslsigncode fails. SigningError: for signing failures.
codesearchnet
def get_imap_capabilities(server): capabilities = list(map(str, list(server.capabilities()))) for i in range(len(capabilities)): capabilities[i] = str(capabilities[i]).replace("b'", "").replace("'", "") logger.debug("IMAP server supports: {0}".format(capabilities)) return capabilities
Returns a list of an IMAP server's capabilities Args: server (imapclient.IMAPClient): An instance of imapclient.IMAPClient Returns (list): A list of capabilities
juraj-google-style
def scheduler(self, sleep_time=0.2): while self.listening: if self.scheduled_calls: timestamp = time.time() self.scheduled_calls[:] = [item for item in self.scheduled_calls if not self.time_reached(timestamp, item)] time.sleep(sleep_time) logger.info("Shutting down the call scheduler...")
Starts the scheduler to check for scheduled calls and execute them at the correct time. Args: sleep_time (float): The amount of time to wait in seconds between each loop iteration. This prevents the scheduler from consuming 100% of the host's CPU. Defaults to 0.2 seconds. Returns: None
juraj-google-style
def clone_with_git(repo_uri, dest_path): log.info('Cloning git repo %s to %s', repo_uri, dest_path) git.Repo.clone_from(repo_uri, dest_path, depth=1)
Create a clone by cloning a git repository. Args: repo_uri: The URI of the git repository to clone. dest_path: The location to clone to.
codesearchnet
def incident(self, name, owner=None, **kwargs): return Incident(self.tcex, name, owner=owner, **kwargs)
Create the Incident TI object. Args: owner: name: **kwargs: Return:
juraj-google-style
def get_component(self, component_name): mapping = self.get_components() return mapping[component_name] if component_name in mapping else None
Looks up a component by its name. Args: component_name: The name of the component to look up. Returns: The component for the provided name or None if there is no such component.
juraj-google-style
def on_message(self, event): metadata = self._parse_metadata(event) message = Message(text=metadata['text'], metadata=metadata).__dict__ if message.get('text'): message['text'] = self.find_and_replace_userids(message['text']) message['text'] = self.find_and_replace_channel_refs(message['text']) return message
Runs when a message event is received Args: event: RTM API event. Returns: Legobot.messge
codesearchnet
def _CheckAndCreateNewGroup(self, group_name, group_class): group = self.GetPossibleGroup() if isinstance(group, group_class) and group.group_name() == group_name: group.AddMethod(self) return self new_group = group_class(group_name) new_group.AddMethod(self) self._call_queue.append(new_group) return self
Checks if the last method (a possible group) is an instance of our group_class. Adds the current method to this group or creates a new one. Args: group_name: the name of the group. group_class: the class used to create instance of this new group
juraj-google-style
def __schema_descriptor(self, services): methods_desc = {} for service in services: protorpc_methods = service.all_remote_methods() for protorpc_method_name in protorpc_methods.iterkeys(): rosy_method = ('%s.%s' % (service.__name__, protorpc_method_name)) method_id = self.__id_from_name[rosy_method] request_response = {} request_schema_id = self.__request_schema.get(method_id) if request_schema_id: request_response['request'] = {'$ref': request_schema_id} response_schema_id = self.__response_schema.get(method_id) if response_schema_id: request_response['response'] = {'$ref': response_schema_id} methods_desc[rosy_method] = request_response descriptor = {'methods': methods_desc, 'schemas': self.__parser.schemas()} return descriptor
Descriptor for the all the JSON Schema used. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: Dictionary containing all the JSON Schema used in the service.
codesearchnet
def register(cls, type_name: str, subclass: Type['JSONConvertible'], override_existing: bool=False) -> None: cls._TYPE_REGISTRY.register(type_name, subclass, override_existing)
Registers a class with a type name. The type name will be used as the key for class lookup during deserialization. A class can be registered with multiple type names, but a type name should be uesd only for one class. Args: type_name: A global unique string identifier for subclass. subclass: A subclass of JSONConvertible. override_existing: If True, override the class if the type name is already present in the registry. Otherwise an error will be raised.
github-repos
def _tf_sess(self): return self._coordinated_creator.tf_sess
Return underlying tf.compat.v1.Session object. Warning: accessing the returned object in user code is likely to cause races or "flaky tests". Returns: A tf.compat.v1.Session object.
github-repos
def exception(self, timeout=None): if not self._completed.wait(timeout=timeout): raise exceptions.TimeoutError("Timed out waiting for result.") if self._result != self._SENTINEL: return None return self._exception
Return the exception raised by the call, if any. This blocks until the message has successfully been published, and returns the exception. If the call succeeded, return None. Args: timeout (Union[int, float]): The number of seconds before this call times out and raises TimeoutError. Raises: TimeoutError: If the request times out. Returns: Exception: The exception raised by the call, if any.
juraj-google-style
def add(self, pattern: Pattern) -> int: inner = pattern.expression if self.operation is None: if not isinstance(inner, Operation) or isinstance(inner, CommutativeOperation): raise TypeError("Pattern must be a non-commutative operation.") self.operation = type(inner) elif not isinstance(inner, self.operation): raise TypeError( "All patterns must be the same operation, expected {} but got {}".format(self.operation, type(inner)) ) if op_len(inner) < 3: raise ValueError("Pattern has not enough operands.") operands = list(op_iter(inner)) first_name = self._check_wildcard_and_get_name(operands[0]) last_name = self._check_wildcard_and_get_name(operands[-1]) index = len(self._patterns) self._patterns.append((pattern, first_name, last_name)) flatterm = FlatTerm.merged(*(FlatTerm(o) for o in operands[1:-1])) self._net.add(flatterm, index) return index
Add a pattern that will be recognized by the matcher. Args: pattern: The pattern to add. Returns: An internal index for the pattern. Raises: ValueError: If the pattern does not have the correct form. TypeError: If the pattern is not a non-commutative operation.
juraj-google-style
def DeregisterPathSpec(cls, path_spec_type): type_indicator = path_spec_type.TYPE_INDICATOR if type_indicator not in cls._path_spec_types: raise KeyError( 'Path specification type: {0:s} not set.'.format(type_indicator)) del cls._path_spec_types[type_indicator] if type_indicator in cls._system_level_type_indicators: del cls._system_level_type_indicators[type_indicator]
Deregisters a path specification. Args: path_spec_type (type): path specification type. Raises: KeyError: if path specification is not registered.
juraj-google-style
def _is_closed(self): return self._coordinated_creator.tf_sess is None
Return True if the monitored session is closed. For tests only. Returns: A boolean.
github-repos
async def msetup(self, text_channel): if self.mready: logger.warning("Attempt to init music when already initialised") return if self.state != 'starting': logger.error("Attempt to init from wrong state ('{}'), must be 'starting'.".format(self.state)) return self.logger.debug("Setting up gui") self.mchannel = text_channel self.new_embed_ui() await self.embed.send() await self.embed.usend() await self.add_reactions() self.mready = True
Creates the gui Args: text_channel (discord.Channel): The channel for the embed ui to run in
juraj-google-style
def _create_make_unique(inputs): if (inputs.shape.ndims != 2): raise ValueError(('Input of top_k_with_unique must be rank-2 but got: %s' % inputs.shape)) height = inputs.shape[0] width = inputs.shape[1] zeros = tf.zeros([height, width], dtype=tf.int32) log2_ceiling = int(math.ceil(math.log(int(width), 2))) next_power_of_two = (1 << log2_ceiling) count_mask = (~ (next_power_of_two - 1)) count_mask_r0 = tf.constant(count_mask) count_mask_r2 = tf.fill([height, width], count_mask_r0) smallest_normal = (1 << 23) smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) low_bit_mask = (~ (1 << 31)) low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1]) input_r2 = tf.bitcast(inputs, tf.int32) abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) if_zero_r2 = tf.equal(abs_r2, zeros) smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or(input_r2, smallest_normal_r2) input_no_zeros_r2 = tf.where(if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) or_r2 = tf.bitwise.bitwise_or(and_r2, iota) return tf.bitcast(or_r2, tf.float32)
Replaces the lower bits of each element with iota. The iota is used to derive the index, and also serves the purpose to make each element unique to break ties. Args: inputs: A tensor with rank of 2 and dtype of tf.float32. [batch_size, original_size]. Returns: A tensor after element wise transformation, with dtype the same as inputs. [batch_size, original_size]. Raises: ValueError: If the rank of the input tensor does not equal 2.
codesearchnet
def measure_each(*qubits: raw_types.Qid, key_func: Callable[([raw_types.Qid], str)]=str) -> List[gate_operation.GateOperation]: return [MeasurementGate(1, key_func(q)).on(q) for q in qubits]
Returns a list of operations individually measuring the given qubits. The qubits are measured in the computational basis. Args: *qubits: The qubits to measure. key_func: Determines the key of the measurements of each qubit. Takes the qubit and returns the key for that qubit. Defaults to str. Returns: A list of operations individually measuring the given qubits.
codesearchnet
def switches(self): if (not self.__switches): self.__switches = Switches(self.__connection) return self.__switches
Gets the Switches API client. Returns: Switches:
codesearchnet
def detach(self) -> Rotation: if self._rot_mats is not None: return Rotation(rot_mats=self._rot_mats.detach(), quats=None) elif self._quats is not None: return Rotation(rot_mats=None, quats=self._quats.detach(), normalize_quats=False) else: raise ValueError('Both rotations are None')
Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph. Returns: A copy of the Rotation whose underlying Tensor has been detached from its torch graph
github-repos
def load_disease_terms(adapter, genemap_lines, genes=None, hpo_disease_lines=None): if not genes: genes = adapter.genes_by_alias() disease_terms = get_mim_phenotypes(genemap_lines=genemap_lines) if not hpo_disease_lines: hpo_disease_lines = fetch_hpo_phenotype_to_terms() hpo_diseases = parse_hpo_diseases(hpo_disease_lines) start_time = datetime.now() nr_diseases = None LOG.info("Loading the hpo disease...") for nr_diseases, disease_number in enumerate(disease_terms): disease_info = disease_terms[disease_number] disease_id = "OMIM:{0}".format(disease_number) if disease_id in hpo_diseases: hpo_terms = hpo_diseases[disease_id]['hpo_terms'] if hpo_terms: disease_info['hpo_terms'] = hpo_terms disease_obj = build_disease_term(disease_info, genes) adapter.load_disease_term(disease_obj) LOG.info("Loading done. Nr of diseases loaded {0}".format(nr_diseases)) LOG.info("Time to load diseases: {0}".format(datetime.now() - start_time))
Load the omim phenotypes into the database Parse the phenotypes from genemap2.txt and find the associated hpo terms from ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt. Args: adapter(MongoAdapter) genemap_lines(iterable(str)) genes(dict): Dictionary with all genes found in database hpo_disease_lines(iterable(str))
juraj-google-style
def __init__(self, initial_op, kinds=None): assert isinstance(initial_op, sc_messages.Operation) if kinds is None: kinds = {} self._kinds = kinds self._metric_values_by_name_then_sign = collections.defaultdict(dict) our_op = encoding.CopyProtoMessage(initial_op) self._merge_metric_values(our_op) our_op.metricValueSets = [] self._op = our_op
Constructor. If kinds is not specifed, all operations will be merged assuming they are of Kind ``DEFAULT_KIND`` Args: initial_op ( :class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): the initial version of the operation kinds (dict[string,[string]]): specifies the metric kind for each metric name
juraj-google-style
def diff_cleanupSemantic(self, diffs): changes = False equalities = [] lastEquality = None pointer = 0 length_insertions1, length_deletions1 = 0, 0 length_insertions2, length_deletions2 = 0, 0 while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_EQUAL: equalities.append(pointer) length_insertions1, length_insertions2 = length_insertions2, 0 length_deletions1, length_deletions2 = length_deletions2, 0 lastEquality = diffs[pointer][1] else: if diffs[pointer][0] == self.DIFF_INSERT: length_insertions2 += len(diffs[pointer][1]) else: length_deletions2 += len(diffs[pointer][1]) if (lastEquality and (len(lastEquality) <= max(length_insertions1, length_deletions1)) and (len(lastEquality) <= max(length_insertions2, length_deletions2))): diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality)) diffs[equalities[-1] + 1] = (self.DIFF_INSERT, diffs[equalities[-1] + 1][1]) equalities.pop() if len(equalities): equalities.pop() if len(equalities): pointer = equalities[-1] else: pointer = -1 length_insertions1, length_deletions1 = 0, 0 length_insertions2, length_deletions2 = 0, 0 lastEquality = None changes = True pointer += 1 if changes: self.diff_cleanupMerge(diffs) self.diff_cleanupSemanticLossless(diffs) pointer = 1 while pointer < len(diffs): if (diffs[pointer - 1][0] == self.DIFF_DELETE and diffs[pointer][0] == self.DIFF_INSERT): deletion = diffs[pointer - 1][1] insertion = diffs[pointer][1] overlap_length1 = self.diff_commonOverlap(deletion, insertion) overlap_length2 = self.diff_commonOverlap(insertion, deletion) if overlap_length1 >= overlap_length2: if (overlap_length1 >= len(deletion) / 2.0 or overlap_length1 >= len(insertion) / 2.0): diffs.insert(pointer, (self.DIFF_EQUAL, insertion[:overlap_length1])) diffs[pointer - 1] = (self.DIFF_DELETE, deletion[:len(deletion) - overlap_length1]) diffs[pointer + 1] = (self.DIFF_INSERT, insertion[overlap_length1:]) pointer += 1 else: if (overlap_length2 >= len(deletion) / 2.0 or overlap_length2 >= len(insertion) / 2.0): diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2])) diffs[pointer - 1] = (self.DIFF_INSERT, insertion[:len(insertion) - overlap_length2]) diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:]) pointer += 1 pointer += 1 pointer += 1
Reduce the number of edits by eliminating semantically trivial equalities. Args: diffs: Array of diff tuples.
juraj-google-style
def parse_view(query): try: idx = query.lower().index('where') query = query[:idx] except ValueError: pass if not query.endswith(';'): query = query.strip() query += ';' result = _view_stmt.parseString(query) return View(result)
Parses asql query to view object. Args: query (str): asql query Returns: View instance: parsed view.
juraj-google-style
def CheckCommaSpacing(filename, clean_lines, linenum, error): raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and Search(r',[^,\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') if Search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;')
Checks for horizontal spacing near commas and semicolons. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
juraj-google-style
def _FindAncestorAtIndent(node, indent): if node.parent.parent is None: return node parent_indent = pytree_utils.GetNodeAnnotation(node.parent, pytree_utils.Annotation.CHILD_INDENT) if parent_indent is not None and indent.startswith(parent_indent): return node else: return _FindAncestorAtIndent(node.parent, indent)
Find an ancestor of node with the given indentation. Arguments: node: node to start from. This must not be the tree root. indent: indentation string for the ancestor we're looking for. See _AnnotateIndents for more details. Returns: An ancestor node with suitable indentation. If no suitable ancestor is found, the closest ancestor to the tree root is returned.
github-repos
def on_modified(self, event): if not self._event_error: self.logger.info(u"Change detected from an edit on: %s", event.src_path) self.compile_dependencies(event.src_path)
Called when a file or directory is modified. Args: event: Watchdog event, ``watchdog.events.DirModifiedEvent`` or ``watchdog.events.FileModifiedEvent``.
juraj-google-style
def _solve(self, sense=None): while len(self._remove_constr) > 0: self._remove_constr.pop().delete() try: return self._prob.solve(sense=sense) except lp.SolverError as e: raise_from(MOMAError(text_type(e)), e) finally: self._remove_constr = []
Remove old constraints and then solve the current problem. Args: sense: Minimize or maximize the objective. (:class:`.lp.ObjectiveSense) Returns: The Result object for the solved LP problem
juraj-google-style
def read_dimvalue(self, dimname, path='/', default=NO_DEFAULT): try: dim = self._read_dimensions(dimname, path=path)[0] return len(dim) except self.Error: if (default is NO_DEFAULT): raise return default
Returns the value of a dimension. Args: dimname: Name of the variable path: path to the group. default: return `default` if `dimname` is not present and `default` is not `NO_DEFAULT` else raise self.Error.
codesearchnet
def _CreateFeed(client): feed_service = client.GetService('FeedService', version='v201809') operation = {'operand': {'name': ('DSA Feed %s' % uuid.uuid4()), 'attributes': [{'type': 'URL_LIST', 'name': 'Page URL'}, {'type': 'STRING_LIST', 'name': 'Label'}], 'origin': 'USER'}, 'operator': 'ADD'} feed = feed_service.mutate([operation])['value'][0] return _DSAFeedDetails(feed['id'], feed['attributes'][0]['id'], feed['attributes'][1]['id'])
Creates the feed for DSA page URLs. Args: client: an AdWordsClient instance. Returns: A _DSAFeedDetails instance containing details about the created feed.
codesearchnet
def get_saver(scope, collections=(tf.GraphKeys.GLOBAL_VARIABLES,), context=None, **kwargs): variable_map = {} for collection in collections: variable_map.update(get_normalized_variable_map(scope, collection, context)) return tf.train.Saver(var_list=variable_map, **kwargs)
Builds a `tf.train.Saver` for the scope or module, with normalized names. The names of the variables are normalized to remove the scope prefix. This allows the same variables to be restored into another similar scope or module using a complementary `tf.train.Saver` object. Args: scope: Scope or module. Variables within will be saved or restored. collections: Sequence of collections of variables to restrict `tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES` which includes moving averages variables as well as trainable variables. context: Scope or module, identical to or parent of `scope`. If given, this will be used as the stripped prefix. **kwargs: Extra keyword arguments to pass to tf.train.Saver. Returns: A `tf.train.Saver` object for Variables in the scope or module.
codesearchnet
def set_ylim(self, ylim): if (len(ylim) != 2): raise ValueError('ylim must contain two elements') if (ylim[1] < ylim[0]): raise ValueError('Min must be less than Max') self.options['min_y'] = ylim[0] self.options['max_y'] = ylim[1]
Set y-axis limits. Accepts a two-element list to set the y-axis limits. Args: ylim (list): lower and upper bounds Raises: ValueError: ylim must contain two elements ValueError: Min must be less than max
codesearchnet
def forward(self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. Represents the hidden states from the previous layer or the input embeddings. position_embeddings (`Tuple[torch.Tensor, torch.Tensor]`): A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`. Represents absolute positional embeddings for the query and key in the attention mechanism. attention_mask (`torch.FloatTensor`): Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def get_container_service(access_token, subscription_id, resource_group, service_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API]) return do_get(endpoint, access_token)
Get details about an Azure Container Server Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response. JSON model.
juraj-google-style
def csv_to_matrix(csv_file_path): mtx = [] with open(csv_file_path) as csv_data_file: for row in csv_data_file: mtx.append(row.split(',')) return mtx
Load a CSV file into a Python matrix of strings. Args: csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
codesearchnet
def List(self, request, global_params=None): config = self.GetMethodConfig('List') return self._RunMethod(config, request, global_params=global_params)
Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully. Args: request: (CloudbuildProjectsLocationsBuildsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListBuildsResponse) The response message.
github-repos
def value_to_string(self, obj): value = self._get_val_from_obj(obj) return self.get_prep_value(value)
Convert the field value from the provided model to a string. Used during model serialization. Args: obj: db.Model, model object Returns: string, the serialized field value
juraj-google-style
def download_image(self, handle, dest): shutil.copyfile(self._prefixed(handle), dest)
Copies over the handl to the destination Args: handle (str): path to copy over dest (str): path to copy to Returns: None
codesearchnet
def create_sketch(self, name, description): resource_url = '{0:s}/sketches/'.format(self.api_base_url) form_data = {'name': name, 'description': description} response = self.session.post(resource_url, json=form_data) response_dict = response.json() sketch_id = response_dict['objects'][0]['id'] return sketch_id
Create a new sketch with the specified name and description. Args: name (str): Title of sketch description (str): Description of sketch Returns: int: ID of created sketch
juraj-google-style
def dismantle_func_graph(func_graph): func_graph._function_captures.clear() ops.dismantle_graph(func_graph)
Removes reference cycles in `func_graph` FuncGraph. Helpful for making sure the garbage collector doesn't need to run when the FuncGraph goes out of scope, e.g. in tests using defun with @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True). Args: func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable after this function.
github-repos
def wait_for_interrupt(self, check_interval=1.0, max_time=None): self.start() wait = max(check_interval, 0.01) accum = 0 try: while ((max_time is None) or (accum < max_time)): try: time.sleep(wait) except IOError: pass accum += wait except KeyboardInterrupt: pass
Run the event loop until we receive a ctrl-c interrupt or max_time passes. This method will wake up every 1 second by default to check for any interrupt signals or if the maximum runtime has expired. This can be set lower for testing purpose to reduce latency but in production settings, this can cause increased CPU usage so 1 second is an appropriate value. Args: check_interval (float): How often to wake up and check for a SIGTERM. Defaults to 1s. Setting this faster is useful for unit testing. Cannot be < 0.01 s. max_time (float): Stop the event loop after max_time seconds. This is useful for testing purposes. Defaults to None, which means run forever until interrupt.
codesearchnet
def broadcast_impl(self, old_slices, old_shape, new_shape): new_slice_shape = self.slice_shape(new_shape) def tf_fn(x): return (tf.zeros(new_slice_shape, dtype=x.dtype) + _expand_dims(x, old_shape, new_shape)) return self.slicewise(tf_fn, old_slices)
Implementation of a broadcast operation. Args: old_slices: LaidOutTensor. old_shape: Shape. new_shape: Shape. Returns: LaidOutTensor.
codesearchnet
def get_complete_ph_dos(partial_dos_path, phonopy_yaml_path): a = np.loadtxt(partial_dos_path).transpose() d = loadfn(phonopy_yaml_path) structure = get_structure_from_dict(d['primitive_cell']) total_dos = PhononDos(a[0], a[1:].sum(axis=0)) pdoss = {} for site, pdos in zip(structure, a[1:]): pdoss[site] = pdos.tolist() return CompletePhononDos(structure, total_dos, pdoss)
Creates a pymatgen CompletePhononDos from a partial_dos.dat and phonopy.yaml files. The second is produced when generating a Dos and is needed to extract the structure. Args: partial_dos_path: path to the partial_dos.dat file. phonopy_yaml_path: path to the phonopy.yaml file.
juraj-google-style
def do_check_pep8(files, status): for file_name in files: args = ['flake8', '--max-line-length=120', '{0}'.format(file_name)] output = run(*args) if output: status.append("Python PEP8/Flake8: {0}: {1}".format(file_name, output)) return status
Run the python pep8 tool against the filst of supplied files. Append any linting errors to the returned status list Args: files (str): list of files to run pep8 against status (list): list of pre-receive check failures to eventually print to the user Returns: status list of current pre-redeive check failures. Might be an empty list.
juraj-google-style
def activate_nsxcontroller(self, **kwargs): name = kwargs.pop('name') name_args = dict(name=name) method_name = 'nsx_controller_activate' method_class = self._brocade_tunnels nsxcontroller_attr = getattr(method_class, method_name) config = nsxcontroller_attr(**name_args) output = self._callback(config) return output
Activate NSX Controller Args: name (str): nsxcontroller name callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
codesearchnet
def extract_header_comment_key_value_tuples_from_file(file_descriptor): file_data = file_descriptor.read() findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, (re.MULTILINE | re.DOTALL)) returned_list = [] for (header_comment, _ignored, raw_comments, key, value) in findall_result: comments = re.findall('/\\* (.*?) \\*/', raw_comments) if (len(comments) == 0): comments = [u''] returned_list.append((header_comment, comments, key, value)) return returned_list
Extracts tuples representing comments and localization entries from strings file. Args: file_descriptor (file): The file to read the tuples from Returns: list : List of tuples representing the headers and localization entries.
codesearchnet
def make_per_replica_value(value, devices): values = [] for device_idx, device in enumerate(devices): if callable(value): v = value(device_idx) elif isinstance(value, list): v = value[device_idx] else: v = value if isinstance(v, IndexedSlicesValue): with ops.device(device): values.append(IndexedSlices(values=array_ops.identity(v.values), indices=array_ops.identity(v.indices), dense_shape=array_ops.identity(v.dense_shape))) else: with ops.device(device): values.append(array_ops.identity(v)) return value_lib.PerReplica(values)
Creates a `PerReplica` object whose values reside in `devices`. Args: value: a tensor-convertible value or a `IndexedSlicesValue`, or a callable that takes one argument (`device_idx`) and should return the value that is going to be created on devices[device_idx]. devices: a list of device strings to create `PerReplica` values on. Returns: A `PerReplica` object.
github-repos
def _VerifyValues(self, pool_func, pool_grad_func, input_sizes, ksize, strides, padding, pool_grad_grad_func=None): for data_format in GetTestConfigs(): self._VerifyOneTest(pool_func, pool_grad_func, input_sizes, ksize, strides, padding, data_format, pool_grad_grad_func=pool_grad_grad_func)
Verifies the output values of the pooling function. Args: pool_func: Pooling function to be called, e.g., tf.nn.max_pool2d pool_grad_func: Corresponding pooling gradient function. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. pool_grad_grad_func: Second-order gradient function, if available.
github-repos
def _value_and_batch_jacobian(f, x): if tf.executing_eagerly(): with tf.GradientTape() as tape: tape.watch(x) value = f(x) batch_jacobian = tape.batch_jacobian(value, x) else: value = f(x) batch_jacobian = gradients.batch_jacobian(value, x) return value, batch_jacobian
Enables uniform interface to value and batch jacobian calculation. Works in both eager and graph modes. Arguments: f: The scalar function to evaluate. x: The value at which to compute the value and the batch jacobian. Returns: A tuple (f(x), J(x)), where J(x) is the batch jacobian.
juraj-google-style
def read_frames(file_path, frame_size, hop_size, start=0.0, end=float('inf'), buffer_size=5760000): rest_samples = np.array([], dtype=np.float32) for block in read_blocks(file_path, start=start, end=end, buffer_size=buffer_size): block = np.concatenate([rest_samples, block]) current_sample = 0 while ((current_sample + frame_size) < block.size): frame = block[current_sample:(current_sample + frame_size)] (yield (frame, False)) current_sample += hop_size rest_samples = block[current_sample:] if (rest_samples.size > 0): rest_samples = np.pad(rest_samples, (0, (frame_size - rest_samples.size)), mode='constant', constant_values=0) (yield (rest_samples, True))
Read an audio file frame by frame. The frames are yielded one after another. Args: file_path (str): Path to the file to read. frame_size (int): The number of samples per frame. hop_size (int): The number of samples between two frames. start (float): Start in seconds to read from. end (float): End in seconds to read to. ``inf`` means to the end of the file. buffer_size (int): Number of samples to load into memory at once and return as a single block. The exact number of loaded samples depends on the block-size of the audioread library. So it can be of x higher, where the x is typically 1024 or 4096. Returns: Generator: A generator yielding a tuple for every frame. The first item is the frame and the second a boolean indicating if it is the last frame.
codesearchnet
class RealmForOpenQAOutput(ModelOutput): reader_output: dict = None predicted_answer_ids: Optional[torch.LongTensor] = None
Outputs of [`RealmForOpenQA`] models. Args: reader_output (`dict`): Reader output. predicted_answer_ids (`torch.LongTensor` of shape `(answer_sequence_length)`): Predicted answer ids.
github-repos
def gcd_float(numbers, tol=1e-8): def pair_gcd_tol(a, b): while b > tol: a, b = b, a % b return a n = numbers[0] for i in numbers: n = pair_gcd_tol(n, i) return n
Returns the greatest common divisor for a sequence of numbers. Uses a numerical tolerance, so can be used on floats Args: numbers: Sequence of numbers. tol: Numerical tolerance Returns: (int) Greatest common divisor of numbers.
juraj-google-style
def copy_pkg(self, filename, id_=-1): self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE)
Copy a package to the distribution server. Bundle-style packages must be zipped prior to copying. Args: filename: Full path to file to upload. id_: ID of Package object to associate with, or -1 for new packages (default).
juraj-google-style
def noise_get_turbulence(n: tcod.noise.Noise, f: Sequence[float], oc: float, typ: int=NOISE_DEFAULT) -> float: return float(lib.TCOD_noise_get_turbulence_ex(n.noise_c, ffi.new('float[4]', f), oc, typ))
Return the turbulence noise sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value.
codesearchnet
def wait_until_finish(self, duration=None): if not PipelineState.is_terminal(self._state): raise NotImplementedError()
Waits until the pipeline finishes and returns the final status. Args: duration (int): The time to wait (in milliseconds) for job to finish. If it is set to :data:`None`, it will wait indefinitely until the job is finished. Raises: IOError: If there is a persistent problem getting job information. NotImplementedError: If the runner does not support this operation. Returns: The final state of the pipeline, or :data:`None` on timeout.
github-repos
def normalize_docroot(app, root): srcdir = app.env.srcdir default_version = app.config.javalink_default_version if isinstance(root, basestring): (url, base) = _parse_docroot_str(srcdir, root) return {'root': url, 'base': base, 'version': default_version} else: normalized = {} normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0] if 'base' in root: normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1] else: normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1] if 'version' in root: normalized['version'] = root['version'] else: normalized['version'] = default_version return normalized
Creates a package-list URL and a link base from a docroot element. Args: app: the global app object root: the docroot element [string or dictionary]
juraj-google-style
def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray: import matplotlib.pyplot as plt num_qubits = len(result.measurements.keys()) states = (2 ** num_qubits) values = np.zeros(states) measurement_by_result = np.array([v.transpose()[0] for (k, v) in result.measurements.items()]).transpose() for meas in measurement_by_result: state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2) values[state_ind] += 1 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)] plt.bar(np.arange(states), values, tick_label=plot_labels) plt.xlabel('qubit state') plt.ylabel('result count') plt.show() return values
Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis.
codesearchnet
def _maybe_download_corpus(tmp_dir): corpus_url = ("http: "1-billion-word-language-modeling-benchmark-r13output.tar.gz") corpus_filename = os.path.basename(corpus_url) corpus_filepath = os.path.join(tmp_dir, corpus_filename) if not os.path.exists(corpus_filepath): generator_utils.maybe_download(tmp_dir, corpus_filename, corpus_url) with tarfile.open(corpus_filepath, "r:gz") as corpus_tar: corpus_tar.extractall(tmp_dir)
Download and unpack the corpus. Args: tmp_dir: directory containing dataset.
juraj-google-style
def __getattr__(self, name: str) -> np.ndarray: try: vals = self.__dict__["storage"][name] if vals is None: a = ["/row_attrs/", "/col_attrs/"][self.axis] vals = loompy.materialize_attr_values(self.ds._file[a][name][:]) self.__dict__["storage"][name] = vals return vals except KeyError: raise AttributeError(f"'{type(self)}' object has no attribute '{name}'")
Return the named attribute Args: name (str) Name of the attribute Remarks: The values will be loaded from file, and properly HTML unescaped
juraj-google-style
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False): if trailing_slash: base_path = base_path.rsplit('/', 1)[0] + '/' test_path = test_path.rsplit('/', 1)[0] + '/' else: if not base_path.endswith('/'): base_path += '/' if not test_path.endswith('/'): test_path += '/' if wildcards: return fnmatch.fnmatchcase(test_path, base_path) else: return test_path.startswith(base_path)
Return whether the a path is a subpath of another. Args: base_path: The base path test_path: The path which we are testing trailing_slash: If True, the trailing slash is treated with importance. For example, ``/images/`` is a directory while ``/images`` is a file. wildcards: If True, globbing wildcards are matched against paths
juraj-google-style
def load_kbs(kbs_files): return { 'journals_re': build_journals_re_kb(kbs_files['journals-re']), 'journals': load_kb(kbs_files['journals'], build_journals_kb), 'report-numbers': build_reportnum_kb(kbs_files['report-numbers']), 'authors': build_authors_kb(kbs_files['authors']), 'books': build_books_kb(kbs_files['books']), 'publishers': load_kb(kbs_files['publishers'], build_publishers_kb), 'special_journals': build_special_journals_kb(kbs_files['special-journals']), 'collaborations': load_kb(kbs_files['collaborations'], build_collaborations_kb), }
Load kbs (without caching) Args: - kb_files: list of custom paths you can specify to override the default values If path starts with "kb:", the kb will be loaded from the database
juraj-google-style
def disable_switchport(self, inter_type, inter): config = ET.Element('config') interface = ET.SubElement(config, 'interface', xmlns=("urn:brocade.com:mgmt:" "brocade-interface")) int_type = ET.SubElement(interface, inter_type) name = ET.SubElement(int_type, 'name') name.text = inter ET.SubElement(int_type, 'switchport-basic', operation='delete') try: self._callback(config) return True except Exception as error: logging.error(error) return False
Change an interface's operation to L3. Args: inter_type: The type of interface you want to configure. Ex. tengigabitethernet, gigabitethernet, fortygigabitethernet. inter: The ID for the interface you want to configure. Ex. 1/0/1 Returns: True if command completes successfully or False if not. Raises: None
juraj-google-style
def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout): if (varnames is not None): varnames = [s.strip() for s in list_strings(varnames)] dlist = collections.defaultdict(list) for task in self.select_tasks(nids=nids, wslice=wslice): dstruct = task.input.structure.as_dict(fmt='abivars') for vname in varnames: value = task.input.get(vname, None) if (value is None): value = dstruct.get(vname, None) if (value is not None): dlist[vname].append((task, value)) for vname in varnames: tv_list = dlist[vname] if (not tv_list): stream.write(('[%s]: Found 0 tasks with this variable\n' % vname)) else: stream.write(('[%s]: Found %s tasks with this variable\n' % (vname, len(tv_list)))) for (i, (task, value)) in enumerate(tv_list): stream.write((' %s --> %s\n' % (str(value), task))) stream.write('\n') else: lines = [] for task in self.select_tasks(nids=nids, wslice=wslice): s = task.make_input(with_header=True) if task.deps: s += ('\n\nDependencies:\n' + '\n'.join((str(dep) for dep in task.deps))) else: s += '\n\nDependencies: None' lines.append((((((2 * '\n') + (80 * '=')) + '\n') + s) + (2 * '\n'))) stream.writelines(lines)
Print the input of the tasks to the given stream. Args: varnames: List of Abinit variables. If not None, only the variable in varnames are selected and printed. nids: List of node identifiers. By defaults all nodes are shown wslice: Slice object used to select works. stream: File-like object, Default: sys.stdout
codesearchnet
def hist(hist_function, *, options={}, **interact_params): params = {'marks': [{'sample': _array_or_placeholder(hist_function), 'bins': _get_option('bins'), 'normalized': _get_option('normalized'), 'scales': (lambda opts: {'sample': opts['x_sc'], 'count': opts['y_sc']})}]} fig = (options.get('_fig', False) or _create_fig(options=options)) [hist] = _create_marks(fig=fig, marks=[bq.Hist], options=options, params=params) _add_marks(fig, [hist]) def wrapped(**interact_params): hist.sample = util.maybe_call(hist_function, interact_params) controls = widgets.interactive(wrapped, **interact_params) return widgets.VBox([controls, fig])
Generates an interactive histogram that allows users to change the parameters of the input hist_function. Args: hist_function (Array | (*args -> Array int | Array float)): Function that takes in parameters to interact with and returns an array of numbers. These numbers will be plotted in the resulting histogram. Kwargs: {options} interact_params (dict): Keyword arguments in the same format as `ipywidgets.interact`. One argument is required for each argument of `hist_function`. Returns: VBox with two children: the interactive controls and the figure. >>> def gen_random(n_points): ... return np.random.normal(size=n_points) >>> hist(gen_random, n_points=(0, 1000, 10)) VBox(...)
codesearchnet
def WriteExecution(self, execution): debug_event = debug_event_pb2.DebugEvent(execution=execution) self._EnsureTimestampAdded(debug_event) _pywrap_debug_events_writer.WriteExecution(self._dump_root, debug_event)
Write a Execution proto with the writer. Args: execution: An Execution proto, describing a TensorFlow op or graph execution event.
github-repos
def process_obj(self, obj: Union[URIRef, Literal, str]) -> Union[URIRef, Literal]: if isinstance(obj, dict) or isinstance(obj, list): exit(str(obj) + ': should be str or intended to be a URIRef or Literal.') if isinstance(obj, Literal) or isinstance(obj, URIRef): prefix = self.find_prefix(obj) if prefix: self.process_prefix(prefix) return obj if len(obj) > 8: if 'http' == obj[:4] and ': prefix = self.find_prefix(obj) if prefix: self.process_prefix(prefix) return URIRef(obj) if ':' in str(obj): presumed_prefix, info = obj.split(':', 1) namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix) if namespace: return namespace[info] return Literal(obj)
Gives component the proper node type Args: obj: Entity object to be converted to its appropriate node type Returns: URIRef or Literal type of the object provided. Raises: SystemExit: If object is a dict or list it becomes str with broken data. Needs to come in one object at a time.
juraj-google-style
def translate(self, entity, identifier): if entity in self._id_map and identifier in self._id_map[entity]: return self._id_map[entity][identifier] return None
Given an id, returns its counterpart. ext id to cm id and vice versa. Args: entity: The name of the entity for which the ID relates. identifier: Ext id or actual CM id to map.
github-repos
def decode_der(cert_der): return cryptography.x509.load_der_x509_certificate( data=cert_der, backend=cryptography.hazmat.backends.default_backend() )
Decode cert DER string to Certificate object. Args: cert_der : Certificate as a DER encoded string Returns: cryptography.Certificate()
juraj-google-style
def build_srpm(specfile, save_dir): logger.info('Starting rpmbuild to build: {0} SRPM.'.format(specfile)) if (save_dir != get_default_save_path()): try: msg = subprocess.Popen(['rpmbuild', '--define', '_sourcedir {0}'.format(save_dir), '--define', '_builddir {0}'.format(save_dir), '--define', '_srcrpmdir {0}'.format(save_dir), '--define', '_rpmdir {0}'.format(save_dir), '-bs', specfile], stdout=subprocess.PIPE).communicate()[0].strip() except OSError: logger.error('Rpmbuild failed for specfile: {0} and save_dir: {1}'.format(specfile, save_dir), exc_info=True) msg = 'Rpmbuild failed. See log for more info.' return msg else: if (not os.path.exists(save_dir)): raise IOError('Specify folder to store a file (SAVE_DIR) or install rpmdevtools.') try: msg = subprocess.Popen(['rpmbuild', '--define', '_sourcedir {0}'.format((save_dir + '/SOURCES')), '--define', '_builddir {0}'.format((save_dir + '/BUILD')), '--define', '_srcrpmdir {0}'.format((save_dir + '/SRPMS')), '--define', '_rpmdir {0}'.format((save_dir + '/RPMS')), '-bs', specfile], stdout=subprocess.PIPE).communicate()[0].strip() except OSError: logger.error('Rpmbuild failed for specfile: {0} and save_dir: {1}'.format(specfile, save_dir), exc_info=True) msg = 'Rpmbuild failed. See log for more info.' return msg
Builds a srpm from given specfile using rpmbuild. Generated srpm is stored in directory specified by save_dir. Args: specfile: path to a specfile save_dir: path to source and build tree
codesearchnet
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10, address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL): interval_fractions = (interval_ms / MS_FRACTION_DIVIDER) if ((interval_fractions < 4) or (interval_fractions > 16384)): raise ValueError('Invalid interval given {}, must be in range of 2.5ms to 10240ms!'.format(interval_fractions)) window_fractions = (window_ms / MS_FRACTION_DIVIDER) if ((window_fractions < 4) or (window_fractions > 16384)): raise ValueError('Invalid window given {}, must be in range of 2.5ms to 10240ms!'.format(window_fractions)) (interval_fractions, window_fractions) = (int(interval_fractions), int(window_fractions)) scan_parameter_pkg = struct.pack('>BHHBB', scan_type, interval_fractions, window_fractions, address_type, filter_type) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS, scan_parameter_pkg)
sets the le scan parameters Args: scan_type: ScanType.(PASSIVE|ACTIVE) interval: ms (as float) between scans (valid range 2.5ms - 10240ms) ..note:: when interval and window are equal, the scan runs continuos window: ms (as float) scan duration (valid range 2.5ms - 10240ms) address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM) * PUBLIC = use device MAC address * RANDOM = generate a random MAC address and use that filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will return all fetched bluetooth packets (WHITELIST_ONLY is not supported, because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented) Raises: ValueError: A value had an unexpected format or was not in range
codesearchnet
def make_parser(parser_creator=None, **kwargs): if parser_creator: parser = parser_creator(**kwargs) else: parser = argparse.ArgumentParser(**kwargs) parser.add_argument( "--run", default=None, type=str, help="The algorithm or model to train. This may refer to the name " "of a built-on algorithm (e.g. RLLib's DQN or PPO), or a " "user-defined trainable function or class registered in the " "tune registry.") parser.add_argument( "--stop", default="{}", type=json.loads, help="The stopping criteria, specified in JSON. The keys may be any " "field returned by 'train()' e.g. " "'{\"time_total_s\": 600, \"training_iteration\": 100000}' to stop " "after 600 seconds or 100k iterations, whichever is reached first.") parser.add_argument( "--config", default="{}", type=json.loads, help="Algorithm-specific configuration (e.g. env, hyperparams), " "specified in JSON.") parser.add_argument( "--resources-per-trial", default=None, type=json_to_resources, help="Override the machine resources to allocate per trial, e.g. " "'{\"cpu\": 64, \"gpu\": 8}'. Note that GPUs will not be assigned " "unless you specify them here. For RLlib, you probably want to " "leave this alone and use RLlib configs to control parallelism.") parser.add_argument( "--num-samples", default=1, type=int, help="Number of times to repeat each trial.") parser.add_argument( "--local-dir", default=DEFAULT_RESULTS_DIR, type=str, help="Local dir to save training results to. Defaults to '{}'.".format( DEFAULT_RESULTS_DIR)) parser.add_argument( "--upload-dir", default="", type=str, help="Optional URI to sync training results to (e.g. s3: parser.add_argument( "--trial-name-creator", default=None, help="Optional creator function for the trial string, used in " "generating a trial directory.") parser.add_argument( "--sync-function", default=None, help="Function for syncing the local_dir to upload_dir. If string, " "then it must be a string template for syncer to run and needs to " "include replacement fields '{local_dir}' and '{remote_dir}'.") parser.add_argument( "--loggers", default=None, help="List of logger creators to be used with each Trial. " "Defaults to ray.tune.logger.DEFAULT_LOGGERS.") parser.add_argument( "--checkpoint-freq", default=0, type=int, help="How many training iterations between checkpoints. " "A value of 0 (default) disables checkpointing.") parser.add_argument( "--checkpoint-at-end", action="store_true", help="Whether to checkpoint at the end of the experiment. " "Default is False.") parser.add_argument( "--keep-checkpoints-num", default=None, type=int, help="Number of last checkpoints to keep. Others get " "deleted. Default (None) keeps all checkpoints.") parser.add_argument( "--checkpoint-score-attr", default="training_iteration", type=str, help="Specifies by which attribute to rank the best checkpoint. " "Default is increasing order. If attribute starts with min- it " "will rank attribute in decreasing order. Example: " "min-validation_loss") parser.add_argument( "--export-formats", default=None, help="List of formats that exported at the end of the experiment. " "Default is None. For RLlib, 'checkpoint' and 'model' are " "supported for TensorFlow policy graphs.") parser.add_argument( "--max-failures", default=3, type=int, help="Try to recover a trial from its last checkpoint at least this " "many times. Only applies if checkpointing is enabled.") parser.add_argument( "--scheduler", default="FIFO", type=str, help="FIFO (default), MedianStopping, AsyncHyperBand, " "HyperBand, or HyperOpt.") parser.add_argument( "--scheduler-config", default="{}", type=json.loads, help="Config options to pass to the scheduler.") parser.add_argument( "--restore", default=None, type=str, help="If specified, restore from this checkpoint.") return parser
Returns a base argument parser for the ray.tune tool. Args: parser_creator: A constructor for the parser class. kwargs: Non-positional args to be passed into the parser class constructor.
juraj-google-style
def sin(self: EventSetOrNode) -> EventSetOrNode: from temporian.core.operators.unary import sin return sin(self)
Calculates the sine of an [`EventSet`][temporian.EventSet]'s features. Can only be used on floating point features. Example: ```python >>> a = tp.event_set( ... timestamps=[1, 2, 3, 4, 5], ... features={"M": [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]}, ... ) >>> a.sin() indexes: ... timestamps: [1. 2. 3. 4. 5.] 'M': [ 0.0000e+00 1.0000e+00 1.2246e-16 -1.0000e+00 -2.4493e-16] ... ``` Returns: EventSetOrNode with sine of input features.
github-repos
def _get_argspec_for_partial(obj): n_prune_args = len(obj.args) partial_keywords = obj.keywords or {} args, varargs, keywords, defaults = getargspec(obj.func) args = args[n_prune_args:] no_default = object() all_defaults = [no_default] * len(args) if defaults: all_defaults[-len(defaults):] = defaults for kw, default in partial_keywords.items(): if kw in args: idx = args.index(kw) all_defaults[idx] = default elif not keywords: raise ValueError('Function does not have **kwargs parameter, but contains an unknown partial keyword.') first_default = next((idx for idx, x in enumerate(all_defaults) if x is not no_default), None) if first_default is None: return ArgSpec(args, varargs, keywords, None) invalid_default_values = [args[i] for i, j in enumerate(all_defaults) if j is no_default and i > first_default] if invalid_default_values: raise ValueError('Some arguments %s do not have default value, but they are positioned after those with default values. This can not be expressed with ArgSpec.' % invalid_default_values) return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))
Implements `getargspec` for `functools.partial` objects. Args: obj: The `functools.partial` object Returns: An `inspect.ArgSpec` Raises: ValueError: When callable's signature can not be expressed with ArgSpec.
github-repos