code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def create_folder(self, name, parent_folder_id=0): return self.__request('POST', 'folders', data={'name': name, 'parent': {'id': unicode(parent_folder_id)}})
Create a folder If the folder exists, a BoxError will be raised. Args: folder_id (int): Name of the folder. parent_folder_id (int): ID of the folder where to create the new one. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
codesearchnet
def draw_line(self, x1, y1, x2, y2): check_int_err(lib.SDL_RenderDrawLine(self._ptr, x1, y1, x2, y2))
Draw a line on the current rendering target. Args: x1 (int): The x coordinate of the start point. y1 (int): The y coordinate of the start point. x2 (int): The x coordinate of the end point. y2 (int): The y coordinate of the end point. Raises: SDLError: If an error is encountered.
codesearchnet
def from_rtm(cls, raw_event: MutableMapping) -> 'Event': if raw_event['type'].startswith('message'): return Message(raw_event) else: return Event(raw_event)
Create an event with data coming from the RTM API. If the event type is a message a :class:`slack.events.Message` is returned. Args: raw_event: JSON decoded data from the RTM API Returns: :class:`slack.events.Event` or :class:`slack.events.Message`
codesearchnet
def describe_images(self, idaho_image_results): results = idaho_image_results['results'] results = [r for r in results if 'IDAHOImage' in r['type']] self.logger.debug('Describing %s IDAHO images.' % len(results)) catids = set([r['properties']['catalogID'] for r in results]) description = {} for catid in catids: description[catid] = {} description[catid]['parts'] = {} images = [r for r in results if r['properties']['catalogID'] == catid] for image in images: description[catid]['sensorPlatformName'] = image['properties']['sensorPlatformName'] part = int(image['properties']['vendorDatasetIdentifier'].split(':')[1][-3:]) color = image['properties']['colorInterpretation'] bucket = image['properties']['tileBucketName'] identifier = image['identifier'] boundstr = image['properties']['footprintWkt'] try: description[catid]['parts'][part] except: description[catid]['parts'][part] = {} description[catid]['parts'][part][color] = {} description[catid]['parts'][part][color]['id'] = identifier description[catid]['parts'][part][color]['bucket'] = bucket description[catid]['parts'][part][color]['boundstr'] = boundstr return description
Describe the result set of a catalog search for IDAHO images. Args: idaho_image_results (dict): Result set of catalog search. Returns: results (json): The full catalog-search response for IDAHO images corresponding to the given catID.
juraj-google-style
def inspect(logdir='', event_file='', tag=''): print(PRINT_SEPARATOR + 'Processing event files... (this can take a few minutes)\n' + PRINT_SEPARATOR) inspection_units = get_inspection_units(logdir, event_file, tag) for unit in inspection_units: if tag: print('Event statistics for tag {} in {}:'.format(tag, unit.name)) else: print('These tags are in {}:'.format(unit.name)) print_dict(get_unique_tags(unit.field_to_obs)) print(PRINT_SEPARATOR) print('Event statistics for {}:'.format(unit.name)) print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag)) print(PRINT_SEPARATOR)
Main function for inspector that prints out a digest of event files. Args: logdir: A log directory that contains event files. event_file: Or, a particular event file path. tag: An optional tag name to query for. Raises: ValueError: If neither logdir and event_file are given, or both are given.
juraj-google-style
def get_output_shape_at(self, node_index): return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape')
Retrieves the output shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode.
github-repos
def node_defs(self): return self._node_defs
All the node defs in the graph to be converted. Returns: A map from node name to the NodeDef for all NodeDefs in the graph, as well as all control flow NodeDefs in the functions.
github-repos
def _get_local_folder(self, root=None): if (root is None): root = Path() for folders in (['.'], [self.user, self.napp]): kytos_json = ((root / Path(*folders)) / 'kytos.json') if kytos_json.exists(): with kytos_json.open() as file_descriptor: meta = json.load(file_descriptor) username = meta.get('username', meta.get('author')) if ((username == self.user) and (meta.get('name') == self.napp)): return kytos_json.parent raise FileNotFoundError('kytos.json not found.')
Return local NApp root folder. Search for kytos.json in _./_ folder and _./user/napp_. Args: root (pathlib.Path): Where to begin searching. Return: pathlib.Path: NApp root folder. Raises: FileNotFoundError: If there is no such local NApp.
codesearchnet
def assert_present(self, selector, testid=None, **kwargs): self.info_log(('Assert present selector(%s) testid(%s)' % (selector, testid))) wait_until_present = kwargs.get('wait_until_present', BROME_CONFIG['proxy_driver']['wait_until_present_before_assert_present']) self.debug_log(('effective wait_until_present: %s' % wait_until_present)) if wait_until_present: element = self.wait_until_present(selector, raise_exception=False) else: element = self.is_present(selector) if element: if (testid is not None): self.create_test_result(testid, True) return True else: if (testid is not None): self.create_test_result(testid, False) return False
Assert that the element is present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_present (bool) Returns: bool: True is the assertion succeed; False otherwise.
codesearchnet
def open(self, mode='r', encoding=None): access_type = self._get_access_type(mode) if encoding is None: encoding = self.encoding if access_type == 'b': if not self._isbytes: content = self._contents.encode(encoding) else: content = self._contents return io.BytesIO(content) else: assert access_type == 't' if PYVERSION == 2 and self._isbytes: return io.BytesIO(self._contents) elif self._isbytes: content = self._contents.decode(encoding) else: content = self._contents return io.StringIO(content)
Return file-like object Args: mode (str): access mode (only reading modes are supported) encoding (str): encoding type (only for binary access) Returns: io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters
juraj-google-style
def get_permissions(self, grp_name, resource): self.project_service.set_auth(self._token_project) return self.project_service.get_permissions(grp_name, resource)
Get permissions associated the group has with the given resource. Args: grp_name (string): Name of group. resource (intern.resource.boss.Resource): Identifies which data model object to operate on. Returns: (list): List of permissions. Raises: requests.HTTPError on failure.
juraj-google-style
def patch_mask(patch: dict) -> dict: def _patch_mask(body: dict) -> list: mask = set() if isinstance(body, dict): for parent, value in body.items(): children = _patch_mask(value) if children and parent not in ('budgetSegments', 'partnerCosts'): for child in children: mask.add(parent + '.' + child) else: mask.add(parent) elif isinstance(body, (list, tuple)): for value in body: mask.update(_patch_mask(value)) return list(mask) mask = ','.join(_patch_mask(patch['parameters'].get('body'))) if mask: patch['parameters']['updateMask'] = mask return patch
Adds an update mask to a patch based on keys in patch. Operates under assumption that all fields prsent in update will be updated. Immediately wraps a nested function to perform actual patch logic using generator. Args: patch: { "operation": IGNORED, "action": IGNORED, "partner": IGNORED, "advertiser": IGNORED, "campaign": IGNORED, "parameters": { "body": { PATCH IS CONSTRUCTED FROM THIS } } } Returns: Patch with ['parameters']['updateMask'] added.
github-repos
def break_before_sequence(chunks: typing.List[str], sequence: str) -> typing.List[str]: chunks = utils.SEP.join(chunks).replace(sequence, utils.SEP + sequence).split(utils.SEP) chunks = [chunk for chunk in chunks if len(chunk) > 0] return chunks
Breaks chunks before a specified character sequence appears. Args: chunks (List[str]): Chunks to break. sequence (str): A character sequence to break chunks before. Returns: Processed chunks.
github-repos
def from_structures(structures, transformations=None, extend_collection=0): tstruct = [TransformedStructure(s, []) for s in structures] return StandardTransmuter(tstruct, transformations, extend_collection)
Alternative constructor from structures rather than TransformedStructures. Args: structures: Sequence of structures transformations: New transformations to be applied to all structures extend_collection: Whether to use more than one output structure from one-to-many transformations. extend_collection can be a number, which determines the maximum branching for each transformation. Returns: StandardTransmuter
codesearchnet
def get_attribute_list(self, uid=None): batch_item = self._build_get_attribute_list_batch_item(uid) request = self._build_request_message(None, [batch_item]) response = self._send_and_receive_message(request) results = self._process_batch_items(response) return results[0]
Send a GetAttributeList request to the server. Args: uid (string): The ID of the managed object with which the retrieved attribute names should be associated. Returns: result (GetAttributeListResult): A structure containing the results of the operation.
codesearchnet
def add_severity(self, name, value): logger.debug("Adding severity {0} with value {1} to variant {2}".format( name, value, self['variant_id'])) self['severities'].append({name: value})
Add a severity to the variant Args: name (str): The name of the severity value : The value of the severity
juraj-google-style
def center_of_mass(self, time): if (self.start_time <= time <= self.end_time): diff = (time - self.start_time) valid = np.flatnonzero((self.masks[diff] != 0)) if (valid.size > 0): com_x = ((1.0 / self.timesteps[diff].ravel()[valid].sum()) * np.sum((self.timesteps[diff].ravel()[valid] * self.x[diff].ravel()[valid]))) com_y = ((1.0 / self.timesteps[diff].ravel()[valid].sum()) * np.sum((self.timesteps[diff].ravel()[valid] * self.y[diff].ravel()[valid]))) else: com_x = np.mean(self.x[diff]) com_y = np.mean(self.y[diff]) else: com_x = None com_y = None return (com_x, com_y)
Calculate the center of mass at a given timestep. Args: time: Time at which the center of mass calculation is performed Returns: The x- and y-coordinates of the center of mass.
codesearchnet
def get_block_iter(self, start_block=None, start_block_num=None, reverse=True): start = None if start_block_num: if (len(start_block_num) < 2): raise ValueError('Invalid start block num') if (start_block_num[:2] != '0x'): raise ValueError('Invalid start block num') start = int(start_block_num, 16) elif start_block: start = start_block.block_num return _BlockStoreIter(self.pointer, start, reverse)
Returns an iterator that traverses blocks in block number order. Args: start_block (:obj:`BlockWrapper`): the block from which traversal begins start_block_num (str): a starting block number, in hex, from where traversal begins; only used if no starting_block is provided reverse (bool): If True, traverse the blocks in from most recent to oldest block. Otherwise, it traverse the blocks in the opposite order. Returns: An iterator of block wrappers Raises: ValueError: If start_block or start_block_num do not specify a valid block
codesearchnet
def _infer_mutants_handler(self, request): try: if request.method != 'GET': logger.error('%s requests are forbidden.', request.method) return http_util.Respond(request, {'error': 'invalid non-GET request'}, 'application/json', code=405) example_index = int(request.args.get('example_index', '0')) feature_name = request.args.get('feature_name') examples = (self.examples if example_index == -1 else [self.examples[example_index]]) (inference_addresses, model_names, model_versions, model_signatures) = self._parse_request_arguments(request) serving_bundles = [] for model_num in xrange(len(inference_addresses)): serving_bundles.append(inference_utils.ServingBundle( inference_addresses[model_num], model_names[model_num], request.args.get('model_type'), model_versions[model_num], model_signatures[model_num], request.args.get('use_predict') == 'true', request.args.get('predict_input_tensor'), request.args.get('predict_output_tensor'))) viz_params = inference_utils.VizParams( request.args.get('x_min'), request.args.get('x_max'), self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS, request.args.get('feature_index_pattern')) json_mapping = inference_utils.mutant_charts_for_feature( examples, feature_name, serving_bundles, viz_params) return http_util.Respond(request, json_mapping, 'application/json') except common_utils.InvalidUserInputError as e: return http_util.Respond(request, {'error': e.message}, 'application/json', code=400)
Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'feature_name', 'example_index', 'inference_address', 'model_name', 'model_type', 'model_version', and 'model_signature'. Returns: A list of JSON objects, one for each chart.
juraj-google-style
def persist_time(run, session, timings): from benchbuild.utils import schema as s for timing in timings: session.add( s.Metric(name="time.user_s", value=timing[0], run_id=run.id)) session.add( s.Metric(name="time.system_s", value=timing[1], run_id=run.id)) session.add( s.Metric(name="time.real_s", value=timing[2], run_id=run.id))
Persist the run results in the database. Args: run: The run we attach this timing results to. session: The db transaction we belong to. timings: The timing measurements we want to store.
juraj-google-style
def set_computer_desc(desc=None): if six.PY2: desc = _to_unicode(desc) system_info = win32net.NetServerGetInfo(None, 101) if (desc is None): return False system_info['comment'] = desc try: win32net.NetServerSetInfo(None, 101, system_info) except win32net.error as exc: (number, context, message) = exc.args log.error('Failed to update system') log.error('nbr: %s', number) log.error('ctx: %s', context) log.error('msg: %s', message) return False return {'Computer Description': get_computer_desc()}
Set the Windows computer description Args: desc (str): The computer description Returns: str: Description if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
codesearchnet
def __init__(self, logger, gccxml_cvs_revision=None, castxml_format=None): if castxml_format is not None and gccxml_cvs_revision is not None: raise RuntimeError("Setting both gccxml_cvs_revision and" "castxml_format is not allowed!") self._is_castxml1 = False self._is_castxml = False self._is_gccxml = False if castxml_format is not None: self._xml_generator_version = self.__castxml self._xml_output_version = castxml_format self._is_castxml = True self._is_castxml1 = True elif gccxml_cvs_revision is not None: self._xml_generator_version, self._xml_output_version = \ self.__extract_versions(logger, gccxml_cvs_revision) self._is_gccxml = "GCC-XML" in self._xml_generator_version self._is_castxml = "CastXML" in self._xml_generator_version else: raise RuntimeError("Either castxml_format or gccxml_cvs_revision" "need to be defined!")
Create a new xml_generators object. Args: logger (logging.Logger) : a logger for debugging output gccxml_cvs_revision (str|None): the xml output version castxml_format (str|None): the xml output version
juraj-google-style
def ParseFileDownloadedRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = ChromeHistoryFileDownloadedEventData() event_data.full_path = self._GetRowValue(query_hash, row, 'target_path') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.received_bytes = self._GetRowValue(query_hash, row, 'received_bytes') event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes') event_data.url = self._GetRowValue(query_hash, row, 'url') timestamp = self._GetRowValue(query_hash, row, 'start_time') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a file downloaded row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys): workbook = create_excel_workbook(data, result_info_key, identifier_keys) workbook.save(output_file_name) print('Saved Excel file to {}'.format(output_file_name))
Creates an Excel file containing data returned by the Analytics API Args: data: Analytics API data as a list of dicts output_file_name: File name for output Excel file (use .xlsx extension).
codesearchnet
def copy_script(self, filename, id_=(- 1)): for repo in self._children: repo.copy_script(filename, id_)
Copy a script to all repositories. Takes into account whether a JSS has been migrated. See the individual DistributionPoint types for more information. Args: filename: String path to the local file to copy. id_: Integer ID you wish to associate script with for a JDS or CDP only. Default is -1, which is used for creating a new script object in the database.
codesearchnet
def check_integrity(sakefile, settings): sprint = settings['sprint'] error = settings['error'] sprint('Call to check_integrity issued', level='verbose') if (not sakefile): error('Sakefile is empty') return False if (len(sakefile.keys()) != len(set(sakefile.keys()))): error('Sakefile contains duplicate targets') return False for target in sakefile: if (target == 'all'): if (not check_target_integrity(target, sakefile['all'], all=True)): error("Failed to accept target 'all'") return False continue if ('formula' not in sakefile[target]): if (not check_target_integrity(target, sakefile[target], meta=True)): errmes = "Failed to accept meta-target '{}'".format(target) error(errmes) return False for atom_target in sakefile[target]: if (atom_target == 'help'): continue if (not check_target_integrity(atom_target, sakefile[target][atom_target], parent=target)): errmes = "Failed to accept target '{}'\n".format(atom_target) error(errmes) return False continue if (not check_target_integrity(target, sakefile[target])): errmes = "Failed to accept target '{}'\n".format(target) error(errmes) return False return True
Checks the format of the sakefile dictionary to ensure it conforms to specification Args: A dictionary that is the parsed Sakefile (from sake.py) The setting dictionary (for print functions) Returns: True if the Sakefile is conformant False if not
codesearchnet
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs
github-repos
def do_reset_ids(concatenated_meta_df, data_df, concat_direction): if (concat_direction == 'horiz'): assert concatenated_meta_df.index.equals(data_df.columns), 'cids in concatenated_meta_df do not agree with cids in data_df.' reset_ids_in_meta_df(concatenated_meta_df) data_df.columns = pd.Index(concatenated_meta_df.index.values) elif (concat_direction == 'vert'): assert concatenated_meta_df.index.equals(data_df.index), 'rids in concatenated_meta_df do not agree with rids in data_df.' reset_ids_in_meta_df(concatenated_meta_df) data_df.index = pd.Index(concatenated_meta_df.index.values)
Reset ids in concatenated metadata and data dfs to unique integers and save the old ids in a metadata column. Note that the dataframes are modified in-place. Args: concatenated_meta_df (pandas df) data_df (pandas df) concat_direction (string): 'horiz' or 'vert' Returns: None (dfs modified in-place)
codesearchnet
def ws_db996(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `ws_db996`'.format(value)) self._ws_db996 = value
Corresponds to IDD Field `ws_db996` Mean wind speed coincident with 99.6% dry-bulb temperature Args: value (float): value for IDD Field `ws_db996` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def Mult(self, x, factor): self.d[x] = self.d.get(x, 0) * factor
Scales the freq/prob associated with the value x. Args: x: number value factor: how much to multiply by
juraj-google-style
def _finish_disconnection_action(self, action): success = action.data['success'] conn_key = action.data['id'] if self._get_connection_state(conn_key) != self.Disconnecting: self._logger.error("Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key=%s", str(conn_key)) return data = self._get_connection(conn_key) callback = data['callback'] conn_id = data['conn_id'] int_id = data['int_id'] if success is False: reason = action.data['reason'] if reason is None: reason = "No reason was given" data['state'] = self.Idle data['microstate'] = None data['callback'] = None callback(conn_id, self.id, False, reason) else: del self._connections[conn_id] del self._int_connections[int_id] callback(conn_id, self.id, True, None)
Finish a disconnection attempt There are two possible outcomes: - if we were successful at disconnecting, we transition to disconnected - if we failed at disconnecting, we transition back to idle Args: action (ConnectionAction): the action object describing what we are disconnecting from and what the result of the operation was
juraj-google-style
def load_case(self, config_data, update=False): institute_obj = self.institute(config_data['owner']) if not institute_obj: raise IntegrityError("Institute '%s' does not exist in database" % config_data['owner']) parsed_case = parse_case(config=config_data) case_obj = build_case(parsed_case, self) old_caseid = '-'.join([case_obj['owner'], case_obj['display_name']]) old_case = self.case(old_caseid) if old_case: LOG.info("Update case id for existing case: %s -> %s", old_caseid, case_obj['_id']) self.update_caseid(old_case, case_obj['_id']) update = True existing_case = self.case(case_obj['_id']) if existing_case and not update: raise IntegrityError("Case %s already exists in database" % case_obj['_id']) files = [ {'file_name': 'vcf_snv', 'variant_type': 'clinical', 'category': 'snv'}, {'file_name': 'vcf_sv', 'variant_type': 'clinical', 'category': 'sv'}, {'file_name': 'vcf_cancer', 'variant_type': 'clinical', 'category': 'cancer'}, {'file_name': 'vcf_str', 'variant_type': 'clinical', 'category': 'str'} ] try: for vcf_file in files: if not case_obj['vcf_files'].get(vcf_file['file_name']): LOG.debug("didn't find {}, skipping".format(vcf_file['file_name'])) continue variant_type = vcf_file['variant_type'] category = vcf_file['category'] if update: self.delete_variants( case_id=case_obj['_id'], variant_type=variant_type, category=category ) self.load_variants( case_obj=case_obj, variant_type=variant_type, category=category, rank_threshold=case_obj.get('rank_score_threshold', 0), ) except (IntegrityError, ValueError, ConfigError, KeyError) as error: LOG.warning(error) if existing_case and update: self.update_case(case_obj) else: LOG.info('Loading case %s into database', case_obj['display_name']) self._add_case(case_obj) return case_obj
Load a case into the database Check if the owner and the institute exists. Args: config_data(dict): A dictionary with all the necessary information update(bool): If existing case should be updated Returns: case_obj(dict)
juraj-google-style
def call(self, url, method=None, args=None): if (not args): args = {} if (sys.version_info.major == 3): data = urllib.parse.urlparse(url) path = (data.path.rstrip('/') + '/') _args = dict(urllib.parse.parse_qs(data.query, keep_blank_values=True)) elif (sys.version_info.major == 2): data = urlparse.urlparse(url) path = (data.path.rstrip('/') + '/') _args = dict(urlparse.parse_qs(data.query, keep_blank_values=True)) for elem in self._data_store: pattern = elem['pattern'] function = elem['function'] _method = elem['method'] type_cast = elem['type_cast'] result = re.match(pattern, path) if (result and (_method == method)): _args = dict(_args, **result.groupdict()) for (key, val) in _args.items(): if (isinstance(_args[key], list) and (len(_args[key]) == 1)): _args[key] = _args[key][0] for (key, val) in type_cast.items(): if (key not in _args): continue if (not _args[key]): continue if isinstance(_args[key], list): for (i, _val) in enumerate(_args[key]): _args[key][i] = self._cast(_val, val) else: _args[key] = self._cast(_args[key], val) requiered_args = self._get_function_args(function) for (key, val) in args.items(): if (key in requiered_args): _args[key] = val return function(**_args) return None
Calls the first function matching the urls pattern and method. Args: url (str): Url for which to call a matching function. method (str, optional): The method used while registering a function. Defaults to None args (dict, optional): Additional args to be passed to the matching function. Returns: The functions return value or `None` if no function was called.
codesearchnet
def get_alpha(self, x: int, y: int) -> int: return lib.TCOD_image_get_alpha(self.image_c, x, y)
Get the Image alpha of the pixel at x, y. Args: x (int): X pixel of the image. Starting from the left at 0. y (int): Y pixel of the image. Starting from the top at 0. Returns: int: The alpha value of the pixel. With 0 being fully transparent and 255 being fully opaque.
juraj-google-style
def _compute_nfp_real(l, u, counts, sizes): if l > u: raise ValueError("l must be less or equal to u") return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])
Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], using the real set size distribution. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. counts: the complete distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives.
juraj-google-style
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() page_transition_type = event_values.get('page_transition_type', None) if page_transition_type is not None: page_transition, page_transition_long = self._PAGE_TRANSITIONS.get( page_transition_type, self._UNKNOWN_PAGE_TRANSITION) if page_transition_long: event_values['page_transition'] = '{0:s} - {1:s}'.format( page_transition, page_transition_long) else: event_values['page_transition'] = page_transition visit_source = event_values.get('visit_source', None) if visit_source is not None: event_values['visit_source'] = self._VISIT_SOURCE.get( visit_source, 'UNKNOWN') extras = [] url_hidden = event_values.get('url_hidden', False) if url_hidden: extras.append('(url hidden)') typed_count = event_values.get('typed_count', 0) if typed_count == 0: extras.append('(URL not typed directly - no typed count)') elif typed_count == 1: extras.append('(type count {0:d} time)'.format(typed_count)) else: extras.append('(type count {0:d} times)'.format(typed_count)) event_values['extra'] = ' '.join(extras) return self._ConditionalFormatMessages(event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def validate(cls, job_config): if job_config.input_reader_cls != cls: raise errors.BadReaderParamsError( "Expect input reader class %r, got %r." % (cls, job_config.input_reader_cls))
Validates relevant parameters. This method can validate fields which it deems relevant. Args: job_config: an instance of map_job.JobConfig. Raises: errors.BadReaderParamsError: required parameters are missing or invalid.
juraj-google-style
def register_game(game_name, game_mode="NoFrameskip-v4"): if game_name not in ATARI_GAMES: raise ValueError("Game %s not in ATARI_GAMES" % game_name) if game_mode not in ATARI_GAME_MODES: raise ValueError("Unknown ATARI game mode: %s." % game_mode) camel_game_name = misc_utils.snakecase_to_camelcase(game_name) + game_mode cls = type("Gym%sRandom" % camel_game_name, (T2TGymEnv,), {"base_env_name": camel_game_name}) registry.register_problem(cls)
Create and register problems for the game. Args: game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist". game_mode: the frame skip and sticky keys config. Raises: ValueError: if game_name or game_mode are wrong.
juraj-google-style
def diff(self, container): return self._result( self._get(self._url("/containers/{0}/changes", container)), True )
Inspect changes on a container's filesystem. Args: container (str): The container to diff Returns: (str) Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def bogoliubov_trans(p, q, theta): expo = (((- 4) * theta) / np.pi) (yield cirq.X(p)) (yield cirq.S(p)) (yield (cirq.ISWAP(p, q) ** expo)) (yield (cirq.S(p) ** 1.5)) (yield cirq.X(p))
r"""The 2-mode Bogoliubov transformation is mapped to two-qubit operations. We use the identity X S^\dag X S X = Y X S^\dag Y S X = X to transform the Hamiltonian XY+YX to XX+YY type. The time evolution of the XX + YY Hamiltonian can be expressed as a power of the iSWAP gate. Args: p: the first qubit q: the second qubit theta: The rotational angle that specifies the Bogoliubov transformation, which is a function of the kinetic energy and the superconducting gap.
codesearchnet
def extract(self, text: str, confidence=0.5, filter=['Person', 'Place', 'Organisation']) -> List[Extraction]: filter = ','.join(filter) search_data = [('confidence', confidence), ('text', text), ('types', filter)] search_headers = {'Accept': 'application/json'} r = requests.post(self._search_url, data=search_data, headers=search_headers) results = r.json() last_results = self._combiner(results) return last_results
Extract with the input text, confidence and fields filter to be used. Args: text (str): text input to be annotated confidence (float): the confidence of the annotation filter (List[str]): the fields that to be extracted Returns: List[Extraction]
juraj-google-style
def _convert_to_ragged_tensor_values(value): if _is_supported_ragged_values_type(value): return value else: return convert_to_tensor_or_ragged_tensor(value, name='values')
Converts value to supported RaggedTensor value. * If `value` is an object of supported value type, then return it as-is. * Otherwise convert it to Tensor or RaggedTensor. Args: value: An object of `Tensor`, `RaggedTensor` or registered RaggedTensor value types, or an object whose type has a registered `Tensor` conversion function. Returns: An object of `Tensor`, `RaggedTensor` or registered RaggedTensor value types
github-repos
def _determine_api_url(self, platform, service, action): base_uri = settings.BASE_PAL_URL.format(platform) if service == "Recurring": api_version = settings.API_RECURRING_VERSION elif service == "Payout": api_version = settings.API_PAYOUT_VERSION else: api_version = settings.API_PAYMENT_VERSION return '/'.join([base_uri, service, api_version, action])
This returns the Adyen API endpoint based on the provided platform, service and action. Args: platform (str): Adyen platform, ie 'live' or 'test'. service (str): API service to place request through. action (str): the API action to perform.
juraj-google-style
def __init_subclass__(cls, user_cls=None): utils.ensure_explicit_method_override(cls.__init__, '`pg.Object.__init__` is a PyGlove managed method. For setting up the class initialization logic, please override `_on_bound()` or `_on_init()`. If you do have a need to override `__init__` and know the implications, please decorate your overridden method with `@pg.explicit_method_override`.') setattr(cls, '__serialization_key__', cls.__type_name__) super().__init_subclass__() user_cls = user_cls or cls if user_cls.auto_schema: base_schema_list = [] for base_cls in user_cls.__bases__: base_schema = getattr(base_cls, '__schema__', None) if isinstance(base_schema, pg_typing.Schema): base_schema_list.append(base_schema) new_fields = user_cls._infer_fields_from_annotations() cls_schema = pg_typing.create_schema(new_fields, base_schema_list=base_schema_list, allow_nonconst_keys=True, metadata={}, for_cls=user_cls) user_cls._update_default_values_from_class_attributes(cls_schema) if new_fields: cls_schema.metadata['init_arg_list'] = None user_cls.apply_schema(cls_schema)
Initializes subclass. `pg.Object` allows child classes to explicit call `pg.Object.__init_subclass__` in their `__init_subclass__`, to bypass other classes' `__init__subclass__` in multi-inheritance use cases. Example: class Subclass(pg.Object, UserClass): def __init_subclass__(cls): # This bypasses UserClass.__init_subclass__ pg.Object.__init_subclass__(cls) Args: user_cls: The source class that calls this class method.
github-repos
def depth_january_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_january_average_ground_temperature`'.format(value)) self._depth_january_average_ground_temperature = value
Corresponds to IDD Field `depth_january_average_ground_temperature` Args: value (float): value for IDD Field `depth_january_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def from_json(cls, raw): bcls = None if ('webLink' in raw): bcls = WebLink elif ('topicCategory' in raw): bcls = Category elif ('taskAssist' in raw): bcls = TaskAssist elif ('context' in raw): bcls = Context if (bcls is None): logger.warning('Unknown annotation type: %s', raw.keys()) return None annotation = bcls() annotation.load(raw) return annotation
Helper to construct an annotation from a dict. Args: raw (dict): Raw annotation representation. Returns: Node: An Annotation object or None.
codesearchnet
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, position_embeddings, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, **kwargs) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embeddings which serve as input to the Transformer. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
github-repos
def _reset_env(self, env: BaseUnityEnvironment): if (self.meta_curriculum is not None): return env.reset(train_mode=self.fast_simulation, config=self.meta_curriculum.get_config()) else: return env.reset(train_mode=self.fast_simulation)
Resets the environment. Returns: A Data structure corresponding to the initial reset state of the environment.
codesearchnet
def Run(self, conf, args): try: options, args = self.parser.parse_args(args) except SystemExit as e: return e.code if options.maps: self.log.info('Setting configured maps to %s', options.maps) conf.maps = options.maps warnings, errors = (0, 0) self.log.info('Verifying program and system configuration.') config_warnings, config_errors = config.VerifyConfiguration(conf) warnings += config_warnings errors += config_errors self.log.info('Verifying data sources.') errors += Verify().VerifySources(conf) self.log.info('verification: %d warnings, %d errors', warnings, errors) if errors > 0: self.log.error('Too many errors in verification tests failed; repair aborted!') return 1 self.log.info('Rebuilding and verifying caches: %s.', conf.maps) return Update().UpdateMaps(conf=conf, incremental=False)
Run the Repair command. See Command.Run() for full documentation on the Run() method. Args: conf: nss_cache.config.Config object args: list of arguments to be parsed by this command Returns: 0 on success, nonzero on error
github-repos
def stop(self, **kwargs): path = '%s/%s/stop' % (self.manager.path, self.get_id()) self.manager.gitlab.http_post(path, **kwargs)
Stop the environment. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabStopError: If the operation failed
juraj-google-style
def exponential(x): return math_ops.exp(x)
Exponential activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.exponential(a) >>> b.numpy() array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32) Args: x: Input tensor. Returns: Tensor with exponential activation: `exp(x)`.
github-repos
def __new__(cls, name, bases, attrs): new_cls = super(DidlMetaClass, cls).__new__(cls, name, bases, attrs) item_class = attrs.get('item_class', None) if item_class is not None: _DIDL_CLASS_TO_CLASS[item_class] = new_cls return new_cls
Create a new instance. Args: name (str): Name of the class. bases (tuple): Base classes. attrs (dict): attributes defined for the class.
juraj-google-style
def assets(self, asset_type=None): if not self.can_update(): self._tcex.handle_error(910, [self.type]) if not asset_type: return self.tc_requests.adversary_assets( self.api_type, self.api_sub_type, self.unique_id ) if asset_type == 'PHONE': return self.tc_requests.adversary_phone_assets( self.api_type, self.api_sub_type, self.unique_id ) if asset_type == 'HANDLER': return self.tc_requests.adversary_handle_assets( self.api_type, self.api_sub_type, self.unique_id ) if asset_type == 'URL': return self.tc_requests.adversary_url_assets( self.api_type, self.api_sub_type, self.unique_id ) self._tcex.handle_error( 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type] ) return None
Retrieves all of the assets of a given asset_type Args: asset_type: (str) Either None, PHONE, HANDLER, or URL Returns:
juraj-google-style
def SerializeExclusiveData(self, writer): self.Code.Serialize(writer) if self.Version >= 1: writer.WriteBool(self.NeedStorage) writer.WriteVarString(self.Name) writer.WriteVarString(self.CodeVersion) writer.WriteVarString(self.Author) writer.WriteVarString(self.Email) writer.WriteVarString(self.Description)
Serialize object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def new_list(iterable=None): if iterable: elements = tuple(iterable) else: elements = () if elements: return _py_list_new(elements) return tf_tensor_list_new(elements)
The list constructor. Args: iterable: Optional elements to fill the list with. Returns: A list-like object. The exact return value depends on the initial elements.
github-repos
def record_factory(app, fields=None): record = Record(app, {'$type': Record._type, 'isNew': True, 'applicationId': app.id, 'comments': {'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Collections.Generic.List`1[[Core.Models.Record.Comments, Core]], mscorlib]], mscorlib'}, 'values': {'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Object, mscorlib]], mscorlib'}}) fields = (fields or {}) for (name, value) in six.iteritems(fields): record[name] = value copy_raw = copy.copy(record._raw) values_dict = {} for (key, value) in six.iteritems(copy_raw['values']): if (value is not None): values_dict[key] = value record._raw['values'] = values_dict return record
Return a temporary Record instance to be used for field validation and value parsing Args: app (App): Target App to create a transient Record instance for fields (dict): Optional dict of fields and values to set on new Record instance before returning Returns: Record: Unsaved Record instance to be used for validation, creation, etc.
codesearchnet
def _central_crop(image, crop_height, crop_width): shape = tf.shape(image) (height, width) = (shape[0], shape[1]) mlperf_log.resnet_print(key=mlperf_log.INPUT_CENTRAL_CROP, value=[crop_height, crop_width]) amount_to_be_cropped_h = (height - crop_height) crop_top = (amount_to_be_cropped_h amount_to_be_cropped_w = (width - crop_width) crop_left = (amount_to_be_cropped_w return tf.slice(image, [crop_top, crop_left, 0], [crop_height, crop_width, (- 1)])
Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image.
codesearchnet
def _OverloadOperator(cls, operator): if operator == '__eq__' or operator == '__ne__': return tensor_oper = getattr(tensor_lib.Tensor, operator) def _run_op(a, *args, **kwargs): return tensor_oper(a.value(), *args, **kwargs) functools.update_wrapper(_run_op, tensor_oper) setattr(cls, operator, _run_op)
Defer an operator overload to `tensor_lib.Tensor`. We pull the operator out of tensor_lib.Tensor dynamically to avoid ordering issues. Args: operator: string. The operator name.
github-repos
def _load_from_hdx(self, object_type, id_field): success, result = self._read_from_hdx(object_type, id_field) if success: self.old_data = self.data self.data = result return True logger.debug(result) return False
Helper method to load the HDX object given by identifier from HDX Args: object_type (str): Description of HDX object type (for messages) id_field (str): HDX object identifier Returns: bool: True if loaded, False if not
juraj-google-style
def ZerosLikeV1WhileLoop(self, op, index): if util.IsLoopSwitch(op): return None if op.graph.building_function: return array_ops.zeros_like(op.outputs[index]) dead_branch = util.IsSwitch(op) forward_ctxt = util.GetWhileContext(op) grad_state = self._map.get(forward_ctxt) if grad_state is None: return ZerosLike(op, index) op_ctxt = op._get_control_flow_context() val = ops.convert_to_tensor(op.outputs[index], name='tensor') shape = val.get_shape() if shape.is_fully_defined(): if val.dtype == dtypes.resource: result = array_ops.zeros(resource_variable_ops.variable_shape(val), dtype=default_gradient.get_zeros_dtype(val)) else: result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype) if dead_branch: pred = grad_state.history_map.get(op_ctxt.pred.name) branch = op_ctxt.branch result = control_flow_ops._SwitchRefOrTensor(result, pred)[1 - branch] else: if dead_branch: pred = op_ctxt.pred branch = op_ctxt.branch op_ctxt.outer_context.Enter() val = control_flow_ops._SwitchRefOrTensor(op.inputs[0], pred)[1 - branch] zeros_shape = array_ops.shape_internal(val, optimize=False) op_ctxt.outer_context.Exit() val.op._set_control_flow_context(op_ctxt) zeros_shape.op._set_control_flow_context(op_ctxt) else: op_ctxt.Enter() zeros_shape = array_ops.shape_internal(val, optimize=False) op_ctxt.Exit() grad_state.grad_context.Exit() history_zeros_shape = grad_state.AddForwardAccumulator(zeros_shape, dead_branch=dead_branch) grad_state.grad_context.Enter() shape = grad_state.AddBackpropAccumulatedValue(history_zeros_shape, zeros_shape, dead_branch) result = array_ops.zeros(shape, val.dtype) return result
Create zeros_like for the specified output of an op. If op is in a while loop that is part of gradients(), this method must be called in its grad loop context. Args: op: A tensorflow operation. index: the index for a specific output of the op. Returns: A zero tensor of the same shape of op.outputs[index].
github-repos
def parse_init(init_file) -> Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]: with open(init_file, 'r', encoding='utf-8', newline='\n') as f: lines = f.readlines() line_index = 0 while line_index < len(lines) and (not lines[line_index].startswith('_import_structure = {')): line_index += 1 if line_index >= len(lines): return None objects = [] while not lines[line_index].startswith('if TYPE_CHECKING') and find_backend(lines[line_index]) is None: line = lines[line_index] if _re_one_line_import_struct.search(line): content = _re_one_line_import_struct.search(line).groups()[0] imports = re.findall('\\[([^\\]]+)\\]', content) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ')]) line_index += 1 continue single_line_import_search = _re_import_struct_key_value.search(line) if single_line_import_search is not None: imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ') if len(obj) > 0] objects.extend(imports) elif line.startswith(' ' * 8 + '"'): objects.append(line[9:-3]) line_index += 1 import_dict_objects = {'none': objects} while not lines[line_index].startswith('if TYPE_CHECKING'): backend = find_backend(lines[line_index]) if _re_try.search(lines[line_index - 1]) is None: backend = None if backend is not None: line_index += 1 while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 objects = [] while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 4): line = lines[line_index] if _re_import_struct_add_one.search(line) is not None: objects.append(_re_import_struct_add_one.search(line).groups()[0]) elif _re_import_struct_add_many.search(line) is not None: imports = _re_import_struct_add_many.search(line).groups()[0].split(', ') imports = [obj[1:-1] for obj in imports if len(obj) > 0] objects.extend(imports) elif _re_between_brackets.search(line) is not None: imports = _re_between_brackets.search(line).groups()[0].split(', ') imports = [obj[1:-1] for obj in imports if len(obj) > 0] objects.extend(imports) elif _re_quote_object.search(line) is not None: objects.append(_re_quote_object.search(line).groups()[0]) elif line.startswith(' ' * 8 + '"'): objects.append(line[9:-3]) elif line.startswith(' ' * 12 + '"'): objects.append(line[13:-3]) line_index += 1 import_dict_objects[backend] = objects else: line_index += 1 objects = [] while line_index < len(lines) and find_backend(lines[line_index]) is None and (not lines[line_index].startswith('else')): line = lines[line_index] single_line_import_search = _re_import.search(line) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ')) elif line.startswith(' ' * 8): objects.append(line[8:-2]) line_index += 1 type_hint_objects = {'none': objects} while line_index < len(lines): backend = find_backend(lines[line_index]) if _re_try.search(lines[line_index - 1]) is None: backend = None if backend is not None: line_index += 1 while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 objects = [] while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 8): line = lines[line_index] single_line_import_search = _re_import.search(line) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ')) elif line.startswith(' ' * 12): objects.append(line[12:-2]) line_index += 1 type_hint_objects[backend] = objects else: line_index += 1 return (import_dict_objects, type_hint_objects)
Read an init_file and parse (per backend) the `_import_structure` objects defined and the `TYPE_CHECKING` objects defined. Args: init_file (`str`): Path to the init file to inspect. Returns: `Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]`: A tuple of two dictionaries mapping backends to list of imported objects, one for the `_import_structure` part of the init and one for the `TYPE_CHECKING` part of the init. Returns `None` if the init is not a custom init.
github-repos
def request(self, path, data=None, headers=None, method=None): if isinstance(data, str): data = data.encode('utf-8') response = urlopen(self._request(path, data=data, headers=headers, method=method)) self._set_session_cookie(response) return response
Performs a HTTP request to the Go server Args: path (str): The full path on the Go server to request. This includes any query string attributes. data (str, dict, bool, optional): If any data is present this request will become a POST request. headers (dict, optional): Headers to set for this particular request Raises: HTTPError: when the HTTP request fails. Returns: file like object: The response from a :func:`urllib2.urlopen` call
codesearchnet
def __new__(cls, orb, values, frame=PARENT_FRAME): if isinstance(values, cls): frame = values.frame values = values.base obj = np.ndarray.__new__(cls, (6, 6), buffer=np.array(values), dtype=float) obj._frame = frame obj.orb = orb.copy(form="cartesian") return obj
Create a covariance matrix Args: orb (Orbit): Covariance from which this is the covariance values: 2D matrix frame (str): Frame in which the covariance is expressed
juraj-google-style
def evaluate_inverse(distribution, u_data, cache=None, parameters=None): if (cache is None): cache = {} out = numpy.zeros(u_data.shape) if hasattr(distribution, '_ppf'): parameters = load_parameters(distribution, '_ppf', parameters=parameters, cache=cache) out[:] = distribution._ppf(u_data.copy(), **parameters) else: from .. import approximation parameters = load_parameters(distribution, '_cdf', parameters=parameters, cache=cache) out[:] = approximation.approximate_inverse(distribution, u_data.copy(), cache=cache.copy(), parameters=parameters) cache[distribution] = out return out
Evaluate inverse Rosenblatt transformation. Args: distribution (Dist): Distribution to evaluate. u_data (numpy.ndarray): Locations for where evaluate inverse transformation distribution at. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The cumulative distribution values of ``distribution`` at location ``u_data`` using parameters ``parameters``.
codesearchnet
def get_pretty_app_names(self): pretty_app_names = set() for app_name in self.get_app_names(): pretty_app_names.add(self.get_name(app_name)) return pretty_app_names
Return the list of pretty app names that are available in the database. Returns: set of str.
codesearchnet
def tuplize(nested): if isinstance(nested, str): return nested try: return tuple(map(tuplize, nested)) except TypeError: return nested
Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples.
juraj-google-style
def barrier(mesh: layout.Mesh, barrier_name: Optional[str]=None, timeout_in_ms: Optional[int]=None): if barrier_name is None: barrier_name = '(barrier)' logging.info('entering barrier before op: %s', barrier_name) context.async_wait() component = array_ops.reshape(1.0, [1] * len(mesh.shape())) ones = api.pack([component] * mesh.num_local_devices(), layout.Layout(mesh.dim_names, mesh)) mesh_size = math_ops.reduce_sum(ones) if mesh_size != mesh.size: raise ValueError('Global barrier produced wrong mesh size : {0} while mesh has actualsize : {1}'.format(mesh_size, mesh.size)) context.async_wait() if context.context().coordination_service: if timeout_in_ms is None: timeout_in_ms = 24 * 60 * 60 * 1000 num_calls = _BARRIER_DICT.setdefault(barrier_name, 0) _BARRIER_DICT[barrier_name] = num_calls + 1 barrier_id = f'{barrier_name}:{num_calls}' context.context().wait_at_barrier(barrier_id, timeout_in_ms) logging.info('finished running barrier across all clients after op: %s', barrier_name)
Runs a barrier on the mesh. Upon returning from the barrier, all operations run before the barrier would have completed across all clients. Currently we allocate a fully sharded tensor with mesh shape and run an all_reduce on it. Example: A barrier can be used before application exit to ensure completion of pending ops. ```python x = [1, 2, 3] x = dtensor.relayout(x, dtensor.Layout.batch_sharded(mesh, 'batch', 1)) dtensor.barrier(mesh) # At this point all devices on all clients in the mesh have completed # operations before the barrier. Therefore it is OK to tear down the clients. sys.exit() ``` Args: mesh: The mesh to run the barrier on. barrier_name: The name of the barrier. Mainly used for logging purpose. timeout_in_ms: The timeout of the barrier in ms. If omitted, blocks indefinitely till the barrier is reached from all clients.
github-repos
def __init__(self, iterable, order, func): assert abs(order) == 1, 'order argument must be +1 or -1' super(OrderedQueryable, self).__init__(iterable) self._funcs = [(order, func)]
Create an OrderedIterable. Args: iterable: The iterable sequence to be ordered. order: +1 for ascending, -1 for descending. func: The function to select the sorting key.
juraj-google-style
def infer_transportation_mode(self, clf, min_time): self.transportation_modes = speed_clustering(clf, self.points, min_time) return self
In-place transportation mode inferring See infer_transportation_mode function Args: Returns: :obj:`Segment`: self
codesearchnet
def Update(self, request, global_params=None): config = self.GetMethodConfig('Update') return self._RunMethod(config, request, global_params=global_params)
Updates information in an existing routine. The update method replaces the entire Routine resource. Args: request: (BigqueryRoutinesUpdateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Routine) The response message.
github-repos
def ms_to_mph(value): if value is None: return None return value * 2.237
Converts speed from meters per second to miles per hour Args: value: floating point representing the speed in meters per second Returns: speed in miles per hour
github-repos
def on_train_begin(self, logs=None):
Called at the beginning of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.
github-repos
def tuplesorted(items, *keys): tuple_keys = [ Key(func=lambda t, i=index, k=key: k.func(t[i]), reverse=key.reverse) for index, key in enumerate(keys) ] return multisorted(items, *tuple_keys)
Sort by tuples with a different key for each item. Args: items: An iterable series of sequences (typically tuples) *keys: Key objects which transform individual elements of each tuple into sort keys. The zeroth object transforms the zeroth element of each tuple, the first key object transforms the first element of each tuple, and so on. Returns: A list of items sorted according to keys.
juraj-google-style
def controlled_by(self, *control_qubits: Qid) -> 'Operation': from cirq.ops import ControlledOperation if ((control_qubits is None) or (len(control_qubits) is 0)): raise ValueError("Can't get controlled operation without control qubit. Op: {}".format(repr(self))) else: return ControlledOperation(control_qubits, self)
Returns a controlled version of this operation. Args: control_qubits: Qubits to control the operation by. Required.
codesearchnet
def combiplot(self, fontsize=8, **kwargs): ax_list = None for i, (label, cycle) in enumerate(self.items()): fig = cycle.plot(ax_list=ax_list, label=label, fontsize=fontsize, lw=2.0, marker="o", linestyle="-", show=False) ax_list = fig.axes return fig
Compare multiple cycels on a grid: one subplot per quantity, all cycles on the same subplot. Args: fontsize: Legend fontsize.
juraj-google-style
def log_batch(self, log_data): url = uri_join(self.base_url, "log") attachments = [] for log_item in log_data: log_item["item_id"] = self.stack[-1] attachment = log_item.get("attachment", None) if "attachment" in log_item: del log_item["attachment"] if attachment: if not isinstance(attachment, collections.Mapping): attachment = {"data": attachment} name = attachment.get("name", str(uuid.uuid4())) log_item["file"] = {"name": name} attachments.append(("file", ( name, attachment["data"], attachment.get("mime", "application/octet-stream") ))) files = [( "json_request_part", ( None, json.dumps(log_data), "application/json" ) )] files.extend(attachments) from reportportal_client import POST_LOGBATCH_RETRY_COUNT for i in range(POST_LOGBATCH_RETRY_COUNT): try: r = self.session.post( url=url, files=files, verify=self.verify_ssl ) except KeyError: if i < POST_LOGBATCH_RETRY_COUNT - 1: continue else: raise break logger.debug("log_batch - Stack: %s", self.stack) logger.debug("log_batch response: %s", r.text) return _get_data(r)
Logs batch of messages with attachment. Args: log_data: list of log records. log record is a dict of; time, message, level, attachment attachment is a dict of: name: name of attachment data: fileobj or content mime: content type for attachment
juraj-google-style
def get_structure_seqs(self, model): dont_overwrite = [] chains = list(model.get_chains()) for x in chains: if self.chains.has_id(x.id): if self.chains.get_by_id(x.id).seq_record: dont_overwrite.append(x.id) if (len(dont_overwrite) == len(chains)): log.debug('Not writing structure sequences, already stored') return structure_seqs = ssbio.protein.structure.properties.residues.get_structure_seqrecords(model) log.debug('{}: gathered chain sequences'.format(self.id)) for seq_record in structure_seqs: log.debug('{}: adding chain sequence to ChainProp'.format(seq_record.id)) my_chain = self.chains.get_by_id(seq_record.id) my_chain.seq_record = seq_record
Gather chain sequences and store in their corresponding ``ChainProp`` objects in the ``chains`` attribute. Args: model (Model): Biopython Model object of the structure you would like to parse
codesearchnet
def get_ast(module_path, python_version): with module_path.open(mode='rt', encoding='utf-8') as handle: source = handle.read() return parso.parse(source, version=python_version)
Get the AST for the code in a file. Args: module_path: pathlib.Path to the file containing the code. python_version: Python version as a "MAJ.MIN" string. Returns: The parso parse tree for the code in `module_path`.
juraj-google-style
def get_filename_by_suffixes(dir_src, suffixes): list_files = os.listdir(dir_src) re_files = list() if is_string(suffixes): suffixes = [suffixes] if not isinstance(suffixes, list): return None for i, suf in enumerate(suffixes): if len(suf) >= 1 and suf[0] != '.': suffixes[i] = '.' + suf for f in list_files: name, ext = os.path.splitext(f) if StringClass.string_in_list(ext, suffixes): re_files.append(f) return re_files
get file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes list, the suffix in suffixes can with or without '.' Returns: file names with the given suffixes as list
juraj-google-style
def update_refresh_state(self, id_or_uri, refresh_state_data): uri = (self._client.build_uri(id_or_uri) + '/refreshState') return self._client.update(refresh_state_data, uri=uri)
Refreshes a given intelligent power delivery device. Args: id_or_uri: Can be either the power device id or the uri refresh_state_data: Power device refresh request Returns: str: The power state
codesearchnet
def WriteSignedBinaryBlobs(binary_urn, blobs, token = None): if _ShouldUseLegacyDatastore(): aff4.FACTORY.Delete(binary_urn, token=token) with data_store.DB.GetMutationPool() as mutation_pool: with aff4.FACTORY.Create( binary_urn, collects.GRRSignedBlob, mode="w", mutation_pool=mutation_pool, token=token) as fd: for blob in blobs: fd.Add(blob, mutation_pool=mutation_pool) if data_store.RelationalDBEnabled(): blob_references = rdf_objects.BlobReferences() current_offset = 0 for blob in blobs: blob_id = data_store.BLOBS.WriteBlobWithUnknownHash( blob.SerializeToString()) blob_references.items.Append( rdf_objects.BlobReference( offset=current_offset, size=len(blob.data), blob_id=blob_id)) current_offset += len(blob.data) data_store.REL_DB.WriteSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn), blob_references)
Saves signed blobs to the datastore. If a signed binary with the given URN already exists, its contents will get overwritten. Args: binary_urn: RDFURN that should serve as a unique identifier for the binary. blobs: An Iterable of signed blobs to write to the datastore. token: ACL token to use with the legacy (non-relational) datastore.
juraj-google-style
def GetModifyTimestamp(self): return self._last_modification_timestamp
Return last modification timestamp of this map. Returns: Either an int containing seconds since epoch, or None.
github-repos
def find_stages(document): names = [] if 'pipeline' in document: for entry in document['pipeline']: key, _ = list(entry.items())[0] if key.startswith("stage("): names.append(key.replace('stage(', '').replace(')', '')) return names
Find **stages** in document. Args: document (dict): validated spline document loaded from a yaml file. Returns: list: stages as a part of the spline document or an empty list if not given. >>> find_stages({'pipeline': [{'stage(Prepare)':1}, {'stage(Build)':1}, {'stage(Deploy)':2}]}) ['Prepare', 'Build', 'Deploy']
juraj-google-style
def _attempt_slice_retry(self, shard_state, tstate): if ((shard_state.slice_retries + 1) < parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS): logging.warning('Slice %s %s failed for the %s of up to %s attempts (%s of %s taskqueue execution attempts). Will retry now.', tstate.shard_id, tstate.slice_id, (shard_state.slice_retries + 1), parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS, (self.task_retry_count() + 1), parameters.config.TASK_MAX_ATTEMPTS) sys.exc_clear() self._try_free_lease(shard_state, slice_retry=True) return self._TASK_DIRECTIVE.RETRY_SLICE if (parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0): logging.warning('Slice attempt %s exceeded %s max attempts.', (self.task_retry_count() + 1), parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS) return self._TASK_DIRECTIVE.RETRY_SHARD
Attempt to retry this slice. This method may modify shard_state and tstate to prepare for retry or fail. Args: shard_state: model.ShardState for current shard. tstate: model.TransientShardState for current shard. Returns: A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried. RETRY_SHARD if shard retry should be attempted.
codesearchnet
async def article(self, title, description=None, *, url=None, thumb=None, content=None, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None): result = types.InputBotInlineResult(id=(id or ''), type='article', send_message=(await self._message(text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons)), title=title, description=description, url=url, thumb=thumb, content=content) if (id is None): result.id = hashlib.sha256(bytes(result)).hexdigest() return result
Creates new inline result of article type. Args: title (`str`): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. url (`str`, optional): The URL to be shown for this result. thumb (:tl:`InputWebDocument`, optional): The thumbnail to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. content (:tl:`InputWebDocument`, optional): The content to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present.
codesearchnet
def CleanseRawStrings(raw_lines): delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: end = line.find(delimiter) if (end >= 0): leading_space = Match('^(\\s*)\\S', line) line = ((leading_space.group(1) + '""') + line[(end + len(delimiter)):]) delimiter = None else: line = '""' while (delimiter is None): matched = Match('^(.*?)\\b(?:R|u8R|uR|UR|LR)"([^\\s\\\\()]*)\\((.*)$', line) if (matched and (not Match('^([^\\\'"]|\\\'(\\\\.|[^\\\'])*\\\'|"(\\\\.|[^"])*")* delimiter = ((')' + matched.group(2)) + '"') end = matched.group(3).find(delimiter) if (end >= 0): line = ((matched.group(1) + '""') + matched.group(3)[(end + len(delimiter)):]) delimiter = None else: line = (matched.group(1) + '""') else: break lines_without_raw_strings.append(line) return lines_without_raw_strings
Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings.
codesearchnet
def _await_flow(self, client, flow_id): print('{0:s}: Waiting to finish'.format(flow_id)) while True: try: status = client.Flow(flow_id).Get().data except grr_errors.UnknownError: msg = 'Unable to stat flow {0:s} for host {1:s}'.format( flow_id, client.data.os_info.fqdn.lower()) self.state.add_error(msg) raise DFTimewolfError( 'Unable to stat flow {0:s} for host {1:s}'.format( flow_id, client.data.os_info.fqdn.lower())) if status.state == flows_pb2.FlowContext.ERROR: message = status.context.backtrace if 'ArtifactNotRegisteredError' in status.context.backtrace: message = status.context.backtrace.split('\n')[-2] raise DFTimewolfError( '{0:s}: FAILED! Message from GRR:\n{1:s}'.format( flow_id, message)) if status.state == flows_pb2.FlowContext.TERMINATED: print('{0:s}: Complete'.format(flow_id)) break time.sleep(self._CHECK_FLOW_INTERVAL_SEC)
Awaits flow completion. Args: client: GRR Client object in which to await the flow. flow_id: string containing ID of flow to await. Raises: DFTimewolfError: if flow error encountered.
juraj-google-style
def graph_execution_trace_to_tensor_value(self, trace): debug_event = self._reader.read_graph_execution_traces_event(trace.locator) return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto)
Read full tensor values from an Execution or ExecutionDigest. Args: trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object. Returns: A numpy array representing the output tensor value of the intra-graph tensor execution event.
github-repos
def compute_mu(L_aug, Y, k, p): (n, d) = L_aug.shape assert (Y.shape[0] == n) mu = np.zeros((d, k)) for y in range(1, (k + 1)): L_y = L_aug[(Y == y)] mu[(:, (y - 1))] = (L_y.sum(axis=0) / L_y.shape[0]) return mu
Given label matrix L_aug and labels Y, compute the true mu params. Args: L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix Y: (np.array int) [n] The true labels in {1,...,k} k: (int) Cardinality p: (np.array float) [k] The class balance
codesearchnet
def decode(self, ids, strip_extraneous=False): del strip_extraneous (_, tmp_file_path) = tempfile.mkstemp('_decode.png') if ((self._height is None) or (self._width is None)): size = int(math.sqrt((len(ids) / self._channels))) length = ((size * size) * self._channels) else: size = None length = ((self._height * self._width) * self._channels) if (len(ids) != length): raise ValueError(('Length of ids (%d) must be height (%d) x width (%d) x channels (%d); %d != %d.\n Ids: %s' % (len(ids), self._height, self._width, self._channels, len(ids), length, ' '.join([str(i) for i in ids])))) with tf.Graph().as_default(): raw = tf.constant(ids, dtype=tf.uint8) if (size is None): img = tf.reshape(raw, [self._height, self._width, self._channels]) else: img = tf.reshape(raw, [size, size, self._channels]) png = tf.image.encode_png(img) op = tf.write_file(tmp_file_path, png) with tf.Session() as sess: sess.run(op) return tmp_file_path
Transform a sequence of int ids into an image file. Args: ids: list of integers to be converted. strip_extraneous: unused Returns: Path to the temporary file where the image was saved. Raises: ValueError: if the ids are not of the appropriate size.
codesearchnet
def fetch_next_page(self): for page in self: return page else: return Page(self._resultset.cursor, iter(()))
Fetch the next Page of results. Returns: Page: The next page of results.
codesearchnet
def strip_hidden(key_tuples, visibilities): result = [] for key_tuple in key_tuples: if (len(key_tuple) != len(visibilities)): raise ValueError('length of key tuple {} is not equal to length of visibilities {}'.format(key_tuple, visibilities)) filtered_tuple = tuple((item for (item, visible) in zip(key_tuple, visibilities) if visible)) result.append(filtered_tuple) return result
Filter each tuple according to visibility. Args: key_tuples: A sequence of tuples of equal length (i.e. rectangular) visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples. Returns: A sequence equal in length to key_tuples where the items are tuples with a length corresponding to the number of items in visibility which are True.
codesearchnet
def GetUsername(self, event, default_username='-'): username = getattr(event, 'username', None) if username and username != '-': return username session_identifier = event.GetSessionIdentifier() if session_identifier is None: return default_username user_sid = getattr(event, 'user_sid', None) username = self._knowledge_base.GetUsernameByIdentifier( user_sid, session_identifier=session_identifier) return username or default_username
Retrieves the username related to the event. Args: event (EventObject): event. default_username (Optional[str]): default username. Returns: str: username.
juraj-google-style
def next_location(self, raw=False): if self._response: location = self._response.fields.get('location') if ((not location) or raw): return location return wpull.url.urljoin(self._response.request.url_info.url, location)
Returns the next location. Args: raw (bool): If True, the original string contained in the Location field will be returned. Otherwise, the URL will be normalized to a complete URL. Returns: str, None: If str, the location. Otherwise, no next location.
codesearchnet
def GetGroupMap(self, since=None): return GroupUpdateGetter().GetUpdates(self, self.conf['group_url'], since)
Return the group map from this source. Args: since: Get data only changed since this timestamp (inclusive) or None for all data. Returns: instance of group.GroupMap
github-repos
def _parse_vrf(self, config): match = re.search(r'^router ospf \d+ vrf (\w+)', config) if match: return dict(vrf=match.group(1)) return dict(vrf='default')
Parses config file for the OSPF vrf name Args: config(str): Running configuration Returns: dict: key: ospf_vrf (str)
juraj-google-style
def memory_usage(self, string=False): if string: n = getsizeof(self) return ' '.join((str(s) for s in convert_bytes(n))) return self.info()['size']
Get the memory usage estimate of the container. Args: string (bool): Human readable string (default false) See Also: :func:`~exa.core.container.Container.info`
codesearchnet
def get_cell_length(flow_model): assert flow_model.lower() in FlowModelConst.d8_lens return FlowModelConst.d8_lens.get(flow_model.lower())
Get flow direction induced cell length dict. Args: flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
juraj-google-style
def format_delta(__timedelta: datetime.timedelta) -> str: if __timedelta == datetime.timedelta(0): return '' days_s = '{}D'.format(__timedelta.days) if __timedelta.days else '' hours, minutes = divmod(__timedelta.seconds, 3600) minutes, seconds = divmod(minutes, 60) hours_s = '{:02d}H'.format(hours) if hours else '' minutes_s = '{:02d}M'.format(minutes) if minutes else '' seconds_s = '{:02d}S'.format(seconds) if seconds else '' return 'P{}{}{}{}{}'.format(days_s, 'T' if hours or minutes or seconds else '', hours_s, minutes_s, seconds_s)
Format ISO-8601 duration string. Args: __timedelta: Duration to process Returns: ISO-8601 representation of duration
juraj-google-style
def load_json(raw_json: str) -> Dict[str, Any]: return json.loads(raw_json, parse_float=decimal.Decimal, parse_int=decimal.Decimal)
Load JSON using Decimal objects for numerics. Args: raw_json: The JSON string to parse. Returns: A dictionary representation of the deserialized JSON.
github-repos