code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def resize_file(fobj, diff, BUFFER_SIZE=(2 ** 16)): fobj.seek(0, 2) filesize = fobj.tell() if (diff < 0): if ((filesize + diff) < 0): raise ValueError fobj.truncate((filesize + diff)) elif (diff > 0): try: while diff: addsize = min(BUFFER_SIZE, diff) fobj.write((b'\x00' * addsize)) diff -= addsize fobj.flush() except IOError as e: if (e.errno == errno.ENOSPC): fobj.truncate(filesize) raise
Resize a file by `diff`. New space will be filled with zeros. Args: fobj (fileobj) diff (int): amount of size to change Raises: IOError
codesearchnet
def func_str(func, args=[], kwargs={}, type_aliases=[], packed=False, packkw=None, truncate=False): import utool as ut truncatekw = {} argrepr_list = ([] if (args is None) else ut.get_itemstr_list(args, nl=False, truncate=truncate, truncatekw=truncatekw)) kwrepr_list = ([] if (kwargs is None) else ut.dict_itemstr_list(kwargs, explicit=True, nl=False, truncate=truncate, truncatekw=truncatekw)) repr_list = (argrepr_list + kwrepr_list) argskwargs_str = ', '.join(repr_list) _str = ('%s(%s)' % (meta_util_six.get_funcname(func), argskwargs_str)) if packed: packkw_ = dict(textwidth=80, nlprefix=' ', break_words=False) if (packkw is not None): packkw_.update(packkw_) _str = packstr(_str, **packkw_) return _str
string representation of function definition Returns: str: a representation of func with args, kwargs, and type_aliases Args: func (function): args (list): argument values (default = []) kwargs (dict): kwargs values (default = {}) type_aliases (list): (default = []) packed (bool): (default = False) packkw (None): (default = None) Returns: str: func_str CommandLine: python -m utool.util_str --exec-func_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> func = byte_str >>> args = [1024, 'MB'] >>> kwargs = dict(precision=2) >>> type_aliases = [] >>> packed = False >>> packkw = None >>> _str = func_str(func, args, kwargs, type_aliases, packed, packkw) >>> result = _str >>> print(result) byte_str(1024, 'MB', precision=2)
codesearchnet
def business_days_in_period(self, date_tensor, period_tensor): pass
Calculates number of business days in a period. Includes the dates in `date_tensor`, but excludes final dates resulting from addition of `period_tensor`. Args: date_tensor: DateTensor of starting dates. period_tensor: PeriodTensor, should be broadcastable to `date_tensor`. Returns: An int32 Tensor with the number of business days in given periods that start at given dates.
github-repos
def consume(generator): if hasattr(generator, '__next__'): return list(generator) if (not PY_35): raise RuntimeError('paco: asynchronous iterator protocol not supported') buf = [] while True: try: buf.append((yield from generator.__anext__())) except StopAsyncIteration: break return buf
Helper function to consume a synchronous or asynchronous generator. Arguments: generator (generator|asyncgenerator): generator to consume. Returns: list
codesearchnet
def _detect_encoding(data=None): import locale enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2', 'utf-16', 'cp720'] code = locale.getpreferredencoding(False) if (data is None): return code if (code.lower() not in enc_list): enc_list.insert(0, code.lower()) for c in enc_list: try: for line in data: line.decode(c) except (UnicodeDecodeError, UnicodeError, AttributeError): continue return c print('Encoding not detected. Please pass encoding value manually')
Return the default system encoding. If data is passed, try to decode the data with the default system encoding or from a short list of encoding types to test. Args: data - list of lists Returns: enc - system encoding
codesearchnet
def process_runway_configs(runway_dir=''): LOG.info('Processing application.json files from local directory "%s".', runway_dir) file_lookup = FileLookup(runway_dir=runway_dir) app_configs = process_configs(file_lookup, 'application-master-{env}.json', 'pipeline.json') return app_configs
Read the _application.json_ files. Args: runway_dir (str): Name of runway directory with app.json files. Returns: collections.defaultdict: Configurations stored for each environment found.
codesearchnet
def has_service_by_name(self, name): return name in self._service_objects
Checks if the manager has a service registered with a specific name. Args: name: string, the name to look for. Returns: True if a service is registered with the specified name, False otherwise.
github-repos
def GetExecutionDetails(self, request, global_params=None): config = self.GetMethodConfig('GetExecutionDetails') return self._RunMethod(config, request, global_params=global_params)
Request detailed information about the execution status of a stage of the job. EXPERIMENTAL. This API is subject to change or removal without notice. Args: request: (DataflowProjectsLocationsJobsStagesGetExecutionDetailsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (StageExecutionDetails) The response message.
github-repos
def __init__(self, count, string_length): self._count = count self._string_length = string_length
Initialize input reader. Args: count: number of entries this shard should generate. string_length: the length of generated random strings.
juraj-google-style
def dummyctrl(self, r, ctrl): dv = DummyVertex(r) (dv.view.w, dv.view.h) = (self.dw, self.dh) self.grx[dv] = dv dv.ctrl = ctrl ctrl[r] = dv self.layers[r].append(dv) return dv
creates a DummyVertex at rank r inserted in the ctrl dict of the associated edge and layer. Arguments: r (int): rank value ctrl (dict): the edge's control vertices Returns: DummyVertex : the created DummyVertex.
codesearchnet
def UpdateNumberOfEventReports( self, number_of_consumed_reports, number_of_produced_reports): consumed_reports_delta = 0 if number_of_consumed_reports is not None: if number_of_consumed_reports < self.number_of_consumed_reports: raise ValueError( 'Number of consumed reports smaller than previous update.') consumed_reports_delta = ( number_of_consumed_reports - self.number_of_consumed_reports) self.number_of_consumed_reports = number_of_consumed_reports self.number_of_consumed_reports_delta = consumed_reports_delta produced_reports_delta = 0 if number_of_produced_reports is not None: if number_of_produced_reports < self.number_of_produced_reports: raise ValueError( 'Number of produced reports smaller than previous update.') produced_reports_delta = ( number_of_produced_reports - self.number_of_produced_reports) self.number_of_produced_reports = number_of_produced_reports self.number_of_produced_reports_delta = produced_reports_delta return consumed_reports_delta > 0 or produced_reports_delta > 0
Updates the number of event reports. Args: number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. Returns: bool: True if either number of event reports has increased. Raises: ValueError: if the consumed or produced number of event reports is smaller than the value of the previous update.
juraj-google-style
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix): button_entry_comment = extract_element_internationalized_comment(button) if (button_entry_comment is None): return for state in button.getElementsByTagName('state'): state_name = state.attributes['key'].value state_entry_comment = (((button_entry_comment + ' - ') + state_name) + ' state of button') if (not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment)): try: button_entry_key = state.attributes['title'].value except KeyError: try: button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue except Exception: continue results.append((button_entry_key, state_entry_comment)) warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)
Adds strings pairs from a button xib element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. button(element): The button element from the xib, to extract the string pairs from. special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
codesearchnet
def _build(self, inputs): shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) max_dim = (np.max(self._dims) + 1) if (rank < max_dim): raise ValueError('Rank of inputs must be at least {}.'.format(max_dim)) full_begin = ([0] * rank) full_size = ([(- 1)] * rank) for (dim, begin, size) in zip(self._dims, self._begin, self._size): full_begin[dim] = begin full_size[dim] = size return tf.slice(inputs, begin=full_begin, size=full_size)
Connects the SliceByDim module into the graph. Args: inputs: `Tensor` to slice. Its rank must be greater than the maximum dimension specified in `dims` (plus one as python is 0 indexed). Returns: The sliced tensor. Raises: ValueError: If `inputs` tensor has insufficient rank.
codesearchnet
def _add_future(cls, future): if cls._local._activated: cls._local._in_order_futures.add(future)
Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list.
juraj-google-style
def _PrintStorageInformationAsJSON(self, storage_reader): serializer = json_serializer.JSONAttributeContainerSerializer storage_counters = self._CalculateStorageCounters(storage_reader) storage_counters_json = json.dumps(storage_counters) self._output_writer.Write('{') self._output_writer.Write('"storage_counters": {0:s}'.format( storage_counters_json)) self._output_writer.Write(',\n') self._output_writer.Write(' "sessions": {') for index, session in enumerate(storage_reader.GetSessions()): json_string = serializer.WriteSerialized(session) if index != 0: self._output_writer.Write(',\n') self._output_writer.Write('"session_{0:s}": {1:s} '.format( session.identifier, json_string)) self._output_writer.Write('}}')
Writes a summary of sessions as machine-readable JSON. Args: storage_reader (StorageReader): storage reader.
juraj-google-style
def _request(self, domain, type_name, search_command, db_method, body=None): headers = {'Content-Type': 'application/json', 'DB-Method': db_method} search_command = self._clean_datastore_path(search_command) url = '/v2/exchange/db/{}/{}/{}'.format(domain, type_name, search_command) r = self.tcex.session.post(url, data=body, headers=headers, params=self._params) data = [] status = 'Failed' if not r.ok or 'application/json' not in r.headers.get('content-type', ''): self.tcex.handle_error(350, [r.status_code, r.text]) data = r.json() status = 'Success' return {'data': data, 'response': r, 'status': status}
Make the API request for a Data Store CRUD operation Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. db_method (string): The DB method 'DELETE', 'GET', 'POST', or 'PUT' body (dict): JSON body
juraj-google-style
def apply_operation(self, symmop, fractional=False): if (not fractional): self._lattice = Lattice([symmop.apply_rotation_only(row) for row in self._lattice.matrix]) def operate_site(site): new_cart = symmop.operate(site.coords) new_frac = self._lattice.get_fractional_coords(new_cart) return PeriodicSite(site.species, new_frac, self._lattice, properties=site.properties) else: new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix) self._lattice = Lattice(new_latt) def operate_site(site): return PeriodicSite(site.species, symmop.operate(site.frac_coords), self._lattice, properties=site.properties) self._sites = [operate_site(s) for s in self._sites]
Apply a symmetry operation to the structure and return the new structure. The lattice is operated by the rotation matrix only. Coords are operated in full and then transformed to the new lattice. Args: symmop (SymmOp): Symmetry operation to apply. fractional (bool): Whether the symmetry operation is applied in fractional space. Defaults to False, i.e., symmetry operation is applied in cartesian coordinates.
codesearchnet
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() primary_url = event_values['primary_url'] secondary_url = event_values['secondary_url'] if primary_url == '': subject = 'local file' elif secondary_url in (primary_url, '*'): subject = primary_url elif secondary_url == '': subject = '{0:s} embedded in local file'.format(primary_url) else: subject = '{0:s} embedded in {1:s}'.format(primary_url, secondary_url) event_values['subject'] = subject return self._ConditionalFormatMessages(event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def remove(self, key): data = self._load_file() del data[key] self._save_file(data)
Remove a key from the data store Args: key (string): The key to remove Raises: KeyError: if the key was not found
codesearchnet
def _from_tensor_list_helper(decode_fn, element_spec, tensor_list): flat_specs = nest.flatten(element_spec) flat_spec_lengths = [len(spec._flat_tensor_specs) for spec in flat_specs] if sum(flat_spec_lengths) != len(tensor_list): raise ValueError('Expected {} tensors but got {}.'.format(sum(flat_spec_lengths), len(tensor_list))) i = 0 flat_ret = [] for component_spec, num_flat_values in zip(flat_specs, flat_spec_lengths): value = tensor_list[i:i + num_flat_values] flat_ret.append(decode_fn(component_spec, value)) i += num_flat_values return nest.pack_sequence_as(element_spec, flat_ret)
Returns an element constructed from the given spec and tensor list. Args: decode_fn: Method that constructs an element component from the element spec component and a tensor list. element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. tensor_list: A list of tensors to use for constructing the value. Returns: An element constructed from the given spec and tensor list. Raises: ValueError: If the number of tensors needed to construct an element for the given spec does not match the given number of tensors.
github-repos
def decode_csv(csv_string, column_names): import csv r = next(csv.reader([csv_string])) if len(r) != len(column_names): raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names))) return {k: v for k, v in zip(column_names, r)}
Parse a csv line into a dict. Args: csv_string: a csv string. May contain missing values "a,,c" column_names: list of column names Returns: Dict of {column_name, value_from_csv}. If there are missing values, value_from_csv will be ''.
juraj-google-style
def assertAllInRange(self, target, lower_bound, upper_bound, open_lower_bound=False, open_upper_bound=False): target = self._GetNdArray(target) if not (np.issubdtype(target.dtype, np.floating) or np.issubdtype(target.dtype, np.integer)): raise AssertionError('The value of %s does not have an ordered numeric type, instead it has type: %s' % (target, target.dtype)) nan_subscripts = np_where(np.isnan(target)) if np.size(nan_subscripts): raise AssertionError('%d of the %d element(s) are NaN. Subscripts(s) and value(s) of the NaN element(s):\n' % (len(nan_subscripts[0]), np.size(target)) + '\n'.join(self._format_subscripts(nan_subscripts, target))) range_str = ('(' if open_lower_bound else '[') + str(lower_bound) + ', ' + str(upper_bound) + (')' if open_upper_bound else ']') violations = np.less_equal(target, lower_bound) if open_lower_bound else np.less(target, lower_bound) violations = np.logical_or(violations, np.greater_equal(target, upper_bound) if open_upper_bound else np.greater(target, upper_bound)) violation_subscripts = np_where(violations) if np.size(violation_subscripts): raise AssertionError('%d of the %d element(s) are outside the range %s. ' % (len(violation_subscripts[0]), np.size(target), range_str) + 'Subscript(s) and value(s) of the offending elements:\n' + '\n'.join(self._format_subscripts(violation_subscripts, target)))
Assert that elements in a Tensor are all in a given range. Args: target: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). lower_bound: lower bound of the range upper_bound: upper bound of the range open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather than the default >=) open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather than the default <=) Raises: AssertionError: if the value tensor does not have an ordered numeric type (float* or int*), or if there are nan values, or if any of the elements do not fall in the specified range.
github-repos
def get_structure_seqs(pdb_file, file_type): my_structure = StructureIO(pdb_file) model = my_structure.first_model structure_seqs = {} for chain in model: chain_seq = '' tracker = 0 for res in chain.get_residues(): if Polypeptide.is_aa(res, standard=True): full_id = res.get_full_id() end_tracker = full_id[3][1] i_code = full_id[3][2] aa = Polypeptide.three_to_one(res.get_resname()) if end_tracker != (tracker + 1): if i_code != ' ': chain_seq += aa tracker = end_tracker + 1 continue else: chain_seq += 'X' * (end_tracker - tracker - 1) chain_seq += aa tracker = end_tracker else: continue structure_seqs[chain.get_id()] = chain_seq return structure_seqs
Get a dictionary of a PDB file's sequences. Special cases include: - Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR - HETATMs. Currently written as an "X", or unknown amino acid. Args: pdb_file: Path to PDB file Returns: dict: Dictionary of: {chain_id: sequence}
juraj-google-style
def _GetDateValuesWithEpoch(self, number_of_days, date_time_epoch): return self._GetDateValues( number_of_days, date_time_epoch.year, date_time_epoch.month, date_time_epoch.day_of_month)
Determines date values. Args: number_of_days (int): number of days since epoch. date_time_epoch (DateTimeEpoch): date and time of the epoch. Returns: tuple[int, int, int]: year, month, day of month.
juraj-google-style
def __init__(self, endpoint_name, sagemaker_session=None): super(PyTorchPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
Initialize an ``PyTorchPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
juraj-google-style
def astimezone(self, tzinfo): assert self.tzinfo is not None tzinfo = _tzinfome(tzinfo) d = self.asdatetime(naive=False).astimezone(tzinfo) return type(self)(d)
Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone.
juraj-google-style
def noise_new(dim: int, h: float=NOISE_DEFAULT_HURST, l: float=NOISE_DEFAULT_LACUNARITY, random: Optional[tcod.random.Random]=None) -> tcod.noise.Noise: return tcod.noise.Noise(dim, hurst=h, lacunarity=l, seed=random)
Return a new Noise instance. Args: dim (int): Number of dimensions. From 1 to 4. h (float): The hurst exponent. Should be in the 0.0-1.0 range. l (float): The noise lacunarity. random (Optional[Random]): A Random instance, or None. Returns: Noise: The new Noise instance.
codesearchnet
def RepackTemplate(self, template_path, output_dir, upload=False, token=None, sign=False, context=None, signed_template=False): orig_config = config.CONFIG repack_config = RepackConfig() print(('Repacking template: %s' % template_path)) config.CONFIG = repack_config.GetConfigFromTemplate(template_path) result_path = None try: repack_context = config.CONFIG['Template.build_context'] if context: repack_context.extend(context) output_path = os.path.join(output_dir, config.CONFIG.Get('ClientRepacker.output_filename', context=repack_context)) print(('Using context: %s and labels: %s' % (repack_context, config.CONFIG.Get('Client.labels', context=repack_context)))) try: signer = None if sign: signer = self.GetSigner(repack_context) builder_obj = self.GetRepacker(context=repack_context, signer=signer) builder_obj.signed_template = signed_template result_path = builder_obj.MakeDeployableBinary(template_path, output_path) except Exception: logging.exception('Repacking template %s failed:', template_path) if result_path: print(('Repacked into %s' % result_path)) if upload: from grr_response_server import maintenance_utils client_platform = config.CONFIG.Get('Client.platform', context=repack_context) repack_basename = config.CONFIG.Get('ClientRepacker.output_basename', context=repack_context) repack_extension = config.CONFIG.Get('ClientBuilder.output_extension', context=repack_context) repack_filename = (repack_basename + repack_extension) binary_urn = rdfvalue.RDFURN('aff4:/config/executables').Add(client_platform).Add('installers').Add(repack_filename) maintenance_utils.UploadSignedConfigBlob(open(result_path, 'rb').read(((100 * 1024) * 1024)), binary_urn, client_context=repack_context, token=token) else: print(('Failed to repack %s.' % template_path)) finally: config.CONFIG = orig_config return result_path
Repack binaries based on the configuration. We repack all templates in the templates directory. We expect to find only functioning templates, all other files should be removed. Each template contains a build.yaml that specifies how it was built and how it should be repacked. Args: template_path: template path string output_dir: Output files will be put in this directory. upload: If specified we also upload the repacked binary into the token: Token to use when uploading to the datastore. sign: If true, we want to digitally sign the installer. context: Array of context strings signed_template: If true, the libraries in the template are already signed. This is only used for windows when repacking the template multiple times. Returns: A list of output installers generated.
codesearchnet
def upload(self, title, description='', keywords='', developer_tags=None, access_control=AccessControl.Public): if (not self.authenticated): raise ApiError(_('Authentication is required')) my_media_group = gdata.media.Group(title=gdata.media.Title(text=title), description=gdata.media.Description(description_type='plain', text=description), keywords=gdata.media.Keywords(text=keywords), category=[gdata.media.Category(text='Autos', scheme='http: extension = self._access_control(access_control, my_media_group) video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group, extension_elements=extension) if developer_tags: video_entry.AddDeveloperTags(developer_tags) response = Api.yt_service.GetFormUploadToken(video_entry) post_url = response[0] youtube_token = response[1] return {'post_url': post_url, 'youtube_token': youtube_token}
Browser based upload Creates the video entry and meta data to initiate a browser upload Authentication is needed Params: title: string description: string keywords: comma seperated string developer_tags: tuple Return: dict contains post_url and youtube_token. i.e { 'post_url': post_url, 'youtube_token': youtube_token } Raises: ApiError: on no authentication
codesearchnet
def get_modname_from_modpath(module_fpath): modsubdir_list = get_module_subdir_list(module_fpath) modname = '.'.join(modsubdir_list) modname = modname.replace('.__init__', '').strip() modname = modname.replace('.__main__', '').strip() return modname
returns importable name from file path get_modname_from_modpath Args: module_fpath (str): module filepath Returns: str: modname Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> module_fpath = ut.util_path.__file__ >>> modname = ut.get_modname_from_modpath(module_fpath) >>> result = modname >>> print(result) utool.util_path
juraj-google-style
def get_and_setattr(cls, id, **kwargs): model = cls.get(id) for k, v in cls._preprocess_params(kwargs).items(): setattr(model, k, v) return model
Returns an updated instance of the service's model class. Args: model: the model to update **kwargs: update parameters
juraj-google-style
def __add_action(self, relative_directory, action): generator_action_container = self.__actions.retrieve_element_or_default(relative_directory, None) if (generator_action_container is None): generator_action_container = GeneratorActionContainer() generator_action_container.add_generator_action(action) self.__actions.add_element(location=relative_directory, element=generator_action_container) else: generator_action_container.add_generator_action(action)
Add action into the dictionary of actions. Args: relative_directory: action:
codesearchnet
class OneFormerTransformerDecoderOutput(BaseModelOutput): object_queries: Optional[torch.FloatTensor] = None contrastive_logits: Optional[torch.FloatTensor] = None prediction_masks: Optional[torch.FloatTensor] = None prediction_class: Optional[torch.FloatTensor] = None auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None
Base class for outputs of the Transformer decoder. This class adds attributes for class predictions, mask predictions and contrastive logits to BaseModelOutputWithCrossAttentions. Args: object_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`): Queries representation for the region proposals. contrastive_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`): Queries representation for the contrastive loss. prediction_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`): Mask predictions from last layer of the transformer decoder. prediction_class (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`): Class predictions from last layer of the transformer decoder. auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*): Tuple of class and mask predictions from each layer of the transformer decoder.
github-repos
def _get_clang_major_version(path_to_clang: str) -> int: logging.info('Running echo __clang_major__ | %s -E -P -', path_to_clang) clang_version_proc = subprocess.run([path_to_clang, '-E', '-P', '-'], input='__clang_major__', check=True, capture_output=True, text=True) major_version = int(clang_version_proc.stdout) logging.info('%s reports major version %s.', path_to_clang, major_version) return major_version
Gets the major version of the clang at `path_to_clang`. Args: path_to_clang: Path to a clang executable Returns: The major version.
github-repos
def create_transaction(self, to_account): from_account = self.statement_import.bank_account transaction = Transaction.objects.create() Leg.objects.create( transaction=transaction, account=from_account, amount=+(self.amount * -1) ) Leg.objects.create(transaction=transaction, account=to_account, amount=-(self.amount * -1)) transaction.date = self.date transaction.save() self.transaction = transaction self.save() return transaction
Create a transaction for this statement amount and account, into to_account This will also set this StatementLine's ``transaction`` attribute to the newly created transaction. Args: to_account (Account): The account the transaction is into / out of. Returns: Transaction: The newly created (and committed) transaction.
juraj-google-style
def push_image(registry, image): values = { 'registry': registry, 'image': image['name'], } log.info("Pushing <33>{registry}<35>/{image}".format(**values)) shell.run('docker push {registry}/{image}'.format(**values))
Push the given image to selected repository. Args: registry (str): The name of the registry we're pushing to. This is the address of the repository without the protocol specification (no http(s)://) image (dict[str, Any]): The dict containing the information about the image. This is the same dictionary as defined in DOCKER_IMAGES variable.
juraj-google-style
def get_available_versions(self, project_name): available_versions = self.pypi_client.package_releases(project_name) if (not available_versions): available_versions = self.pypi_client.package_releases(project_name.capitalize()) return dict(((self._parse_version(version), version) for version in available_versions))
Query PyPI to see if package has any available versions. Args: project_name (str): The name the project on PyPI. Returns: dict: Where keys are tuples of parsed versions and values are the versions returned by PyPI.
codesearchnet
def CreateUnit(self, parent=None, value=None, bid_amount=None): unit = { 'xsi_type': 'ProductPartition', 'partitionType': 'UNIT' } if parent is not None: unit['parentCriterionId'] = parent['id'] unit['caseValue'] = value if bid_amount is not None and bid_amount > 0: bidding_strategy_configuration = { 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'xsi_type': 'Money', 'microAmount': str(bid_amount) } }] } adgroup_criterion = { 'xsi_type': 'BiddableAdGroupCriterion', 'biddingStrategyConfiguration': bidding_strategy_configuration } else: adgroup_criterion = { 'xsi_type': 'NegativeAdGroupCriterion' } adgroup_criterion['adGroupId'] = self.adgroup_id adgroup_criterion['criterion'] = unit self.CreateAddOperation(adgroup_criterion) return unit
Creates a unit node. Args: parent: The node that should be this node's parent. value: The value being partitioned on. bid_amount: The amount to bid for matching products, in micros. Returns: A new unit node.
juraj-google-style
def postings(self, quarter, stats_counter=None): logging.info('Finding postings for %s', quarter) for posting in self._iter_postings(quarter): transformed = self._transform(posting) transformed['id'] = '{}_{}'.format(self.partner_id, self._id(posting)) if stats_counter: stats_counter.track(input_document=posting, output_document=transformed) (yield transformed)
Yield job postings in common schema format Args: quarter (str) The quarter, in format '2015Q1' stats_counter (object, optional) A counter that can track both input and output documents using a 'track' method.
codesearchnet
def getOrderedLinks(self, session): streamLinks = session.query(StreamLink).filter((StreamLink.channelInputFile == self)).order_by(StreamLink.linkNumber).all() return streamLinks
Retrieve the links in the order of the link number. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: list: A list of :class:`.StreamLink` objects.
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): msiecf_file = pymsiecf.file() msiecf_file.set_ascii_codepage(parser_mediator.codepage) try: msiecf_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return try: self._ParseItems(parser_mediator, msiecf_file) finally: msiecf_file.close()
Parses a MSIE Cache File (MSIECF) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
juraj-google-style
def convert_constant(params, w_name, scope_name, inputs, layers, weights, names): print('Converting constant ...') params_list = params['value'].numpy() def target_layer(x, value=params_list): return tf.constant(value.tolist(), shape=value.shape) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name + '_np'] = params_list layers[scope_name] = lambda_layer(layers[list(layers.keys())[0]])
Convert constant layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool: store = crypto.X509Store() for cert in certs_chain: store.add_cert(cert) default_verify_paths = ssl.get_default_verify_paths() default_verify_file = default_verify_paths.cafile default_verify_file = (Path(default_verify_file).resolve() if default_verify_file else None) default_verify_path = default_verify_paths.capath default_verify_path = (Path(default_verify_path).resolve() if default_verify_path else None) ca_files = ([ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else []) if default_verify_file: ca_files.append(default_verify_file) for ca_file in ca_files: ca_file: Path if ca_file.is_file(): with ca_file.open('r', encoding='ascii') as crt_f: ca_certs_txt = crt_f.read() ca_certs = extract_certs(ca_certs_txt) for cert in ca_certs: store.add_cert(cert) ssl_context = ssl.create_default_context() der_certs = ssl_context.get_ca_certs(binary_form=True) pem_certs = '\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs]) ca_certs = extract_certs(pem_certs) for ca_cert in ca_certs: store.add_cert(ca_cert) store_context = crypto.X509StoreContext(store, amazon_cert) try: store_context.verify_certificate() result = True except crypto.X509StoreContextError: result = False return result
Verifies if Amazon and additional certificates creates chain of trust to a root CA. Args: certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL. amazon_cert: Pycrypto X509 Amazon certificate. Returns: result: True if verification was successful, False if not.
codesearchnet
def ffn_expert_fn(input_size, hidden_sizes, output_size, hidden_activation=tf.nn.relu): def my_fn(x): layer_sizes = (([input_size] + hidden_sizes) + [output_size]) for i in range((1 + len(hidden_sizes))): w = tf.get_variable(('w_%d' % i), layer_sizes[i:(i + 2)], tf.float32) x = tf.matmul(x, w) if (i < len(hidden_sizes)): x = hidden_activation(x) if (layer_sizes[i] != input_size): x *= ((layer_sizes[i] / float(input_size)) ** (- 0.5)) return x return my_fn
Returns a function that creates a feed-forward network. Use this function to create the expert_fn argument to distributed_moe. Args: input_size: an integer hidden_sizes: a list of integers output_size: an integer hidden_activation: a unary function. Returns: a unary function
codesearchnet
def Sample(self, profile_name, used_memory): sample_time = time.time() sample = '{0:f}\t{1:s}\t{2:d}\n'.format( sample_time, profile_name, used_memory) self._WritesString(sample)
Takes a sample for profiling. Args: profile_name (str): name of the profile to sample. used_memory (int): amount of used memory in bytes.
juraj-google-style
def DeserializeFromBufer(buffer, offset=0): mstream = StreamManager.GetStream(buffer) reader = BinaryReader(mstream) tx = Transaction.DeserializeFrom(reader) StreamManager.ReleaseStream(mstream) return tx
Deserialize object instance from the specified buffer. Args: buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from. offset: UNUSED Returns: Transaction:
juraj-google-style
class FastSpeech2ConformerEncoder(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config, use_encoder_input_layer=False): super().__init__() self.embed = None if use_encoder_input_layer: self.embed = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, padding_idx=0) self.pos_enc = FastSpeech2ConformerRelPositionalEncoding(config, module_config) self.conformer_layers = nn.ModuleList([FastSpeech2ConformerEncoderLayer(config, module_config) for _ in range(module_config['layers'])]) def forward(self, input_tensor: torch.LongTensor, attention_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=False, return_dict: Optional[bool]=None): feature_representation = input_tensor if self.embed is not None: feature_representation = self.embed(feature_representation) hidden_states, pos_emb = self.pos_enc(feature_representation) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for conformer_layer in self.conformer_layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = conformer_layer(hidden_states, pos_emb, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
FastSpeech2ConformerEncoder encoder module. Args: config (`FastSpeech2ConformerConfig`): FastSpeech2ConformerConfig instance. module_config (`dict`): Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`. use_encoder_input_layer (`bool`, *optional*, defaults to `False`): Input layer type.
github-repos
async def leave(self): is_group_conversation = (self._conversation.type == hangouts_pb2.CONVERSATION_TYPE_GROUP) try: if is_group_conversation: (await self._client.remove_user(hangouts_pb2.RemoveUserRequest(request_header=self._client.get_request_header(), event_request_header=self._get_event_request_header()))) else: (await self._client.delete_conversation(hangouts_pb2.DeleteConversationRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), delete_upper_bound_timestamp=parsers.to_timestamp(datetime.datetime.now(tz=datetime.timezone.utc))))) except exceptions.NetworkError as e: logger.warning('Failed to leave conversation: {}'.format(e)) raise
Leave this conversation. Raises: .NetworkError: If conversation cannot be left.
codesearchnet
def _DrawTrips(self,triplist,colpar=""): stations = [] if not self._stations and triplist: self._stations = self._CalculateYLines(self._TravelTimes(triplist)) if not self._stations: self._AddWarning("Failed to use traveltimes for graph") self._stations = self._CalculateYLines(self._Uniform(triplist)) if not self._stations: self._AddWarning("Failed to calculate station distances") return stations = self._stations tmpstrs = [] servlist = [] for t in triplist: if not colpar: if t.service_id not in servlist: servlist.append(t.service_id) shade = int(servlist.index(t.service_id) * (200/len(servlist))+55) color = " else: color=colpar start_offsets = [0] first_stop = t.GetTimeStops()[0] for j,freq_offset in enumerate(start_offsets): if j>0 and not colpar: color="purple" scriptcall = 'onmouseover="LineClick(\'%s\',\'Trip %s starting %s\')"' % (t.trip_id, t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime())) tmpstrhead = '<polyline class="T" id="%s" stroke="%s" %s points="' % \ (str(t.trip_id),color, scriptcall) tmpstrs.append(tmpstrhead) for i, s in enumerate(t.GetTimeStops()): arr_t = s[0] dep_t = s[1] if arr_t is None or dep_t is None: continue arr_x = int(arr_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset dep_x = int(dep_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset tmpstrs.append("%s,%s " % (int(arr_x+20), int(stations[i]+20))) tmpstrs.append("%s,%s " % (int(dep_x+20), int(stations[i]+20))) tmpstrs.append('" />') return "".join(tmpstrs)
Generates svg polylines for each transit trip. Args: # Class Trip is defined in transitfeed.py [Trip, Trip, ...] Returns: # A string containing a polyline tag for each trip ' <polyline class="T" stroke="#336633" points="433,0 ...'
juraj-google-style
def _parse_frange_part(frange): match = FRANGE_RE.match(frange) if not match: msg = 'Could not parse "{0}": did not match {1}' raise ParseException(msg.format(frange, FRANGE_RE.pattern)) start, end, modifier, chunk = match.groups() start = int(start) end = int(end) if end is not None else start if end > start and chunk is not None and int(chunk) < 0: msg = 'Could not parse "{0}: chunk can not be negative' raise ParseException(msg.format(frange)) chunk = abs(int(chunk)) if chunk is not None else 1 if chunk == 0: msg = 'Could not parse "{0}": chunk cannot be 0' raise ParseException(msg.format(frange)) return start, end, modifier, chunk
Internal method: parse a discrete frame range part. Args: frange (str): single part of a frame range as a string (ie "1-100x5") Returns: tuple: (start, end, modifier, chunk) Raises: :class:`.ParseException`: if the frame range can not be parsed
juraj-google-style
def __init__(self): super(JLinkSWOSpeedInfo, self).__init__() self.SizeofStruct = ctypes.sizeof(self) self.Interface = enums.JLinkSWOInterfaces.UART
Initializes the J-Link SWO Speed Information instance. Args: self (JLinkSWOSpeedInfo): the ``JLinkSWOSpeedInfo`` instance Returns: ``None``
juraj-google-style
def __init__(self, save_path): reader = py_checkpoint_reader.NewCheckpointReader(save_path) try: object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY) except errors_impl.NotFoundError as not_found_error: raise ValueError(f'The specified checkpoint "{save_path}" does not appear to be object-based (saved with TF2) since it is missing the key "{base.OBJECT_GRAPH_PROTO_KEY}". Likely it was created with the TF1 name-based saver and does not contain an object dependency graph.') from not_found_error object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph() object_graph_proto.ParseFromString(object_graph_string) self._object_graph_proto = object_graph_proto
Configure the checkpoint view. Args: save_path: The path to the checkpoint. Raises: ValueError: If the save_path does not lead to a TF2 checkpoint.
github-repos
def Patch(self, request, global_params=None): config = self.GetMethodConfig('Patch') return self._RunMethod(config, request, global_params=global_params)
Update an association between a GCP project and a GitHub Enterprise server. Args: request: (CloudbuildProjectsGithubEnterpriseConfigsPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def explore_package(module_name): packages = [] loader = pkgutil.get_loader(module_name) for sub_module in pkgutil.walk_packages([os.path.dirname(loader.get_filename())], prefix=module_name + '.'): _, sub_module_name, _ = sub_module packages.append(sub_module_name) return packages
returns all the packages in the module Args: module_name: name of module Returns:
juraj-google-style
def glyph_has_ink(font: TTFont, name: Text) -> bool: if ('glyf' in font): return ttf_glyph_has_ink(font, name) elif (('CFF ' in font) or ('CFF2' in font)): return cff_glyph_has_ink(font, name) else: raise Exception("Could not find 'glyf', 'CFF ', or 'CFF2' table.")
Checks if specified glyph has any ink. That is, that it has at least one defined contour associated. Composites are considered to have ink if any of their components have ink. Args: font: the font glyph_name: The name of the glyph to check for ink. Returns: True if the font has at least one contour associated with it.
codesearchnet
def code_challenge(verifier): digest = hashlib.sha256(verifier).digest() return base64.urlsafe_b64encode(digest).rstrip(b'=')
Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest, without '=' padding.
juraj-google-style
def slice_list(in_list, lens): if not isinstance(lens, list): raise TypeError('"indices" must be a list of integers') elif sum(lens) != len(in_list): raise ValueError( 'sum of lens and list length does not match: {} != {}'.format( sum(lens), len(in_list))) out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:idx + lens[i]]) idx += lens[i] return out_list
Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list.
juraj-google-style
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0): thrown_out_count = 0 source_target_output = [] if (not max_equal_to_diff_ratio): return (source_target_input, thrown_out_count) for src_tgt in source_target_input: opcodes = fast_match_sequences(*src_tgt) diff_char_count = 0 equal_char_count = 0 for (tag, i1, i2, j1, j2) in opcodes: if (tag == 'diff'): diff_char_count += max((i2 - i1), (j2 - j1)) else: equal_char_count += (i2 - i1) if (diff_char_count <= (max_equal_to_diff_ratio * equal_char_count)): source_target_output.append(src_tgt) else: thrown_out_count += 1 return (source_target_output, thrown_out_count)
Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out
codesearchnet
def get_config_multiline_option(parser: ConfigParser, section: str, option: str, default: List[str] = None) -> List[str]: default = default or [] if not parser.has_section(section): raise ValueError("config missing section: " + section) try: multiline = parser.get(section, option) values = [x.strip() for x in multiline.splitlines() if x.strip()] return values except NoOptionError: return default
Retrieves a multi-line string value from a parser as a list of strings (one per line, ignoring blank lines). Args: parser: instance of :class:`ConfigParser` section: section name within config file option: option (variable) name within that section default: value to return if option is absent (``None`` is mapped to ``[]``) Returns: list of strings Raises: ValueError: if the section is absent
juraj-google-style
def CreateProductPartition(client, adgroup_id): ad_group_criterion_service = client.GetService('AdGroupCriterionService', 'v201809') helper = ProductPartitionHelper(adgroup_id) root = helper.CreateSubdivision() new_product_canonical_condition = { 'xsi_type': 'ProductCanonicalCondition', 'condition': 'NEW' } used_product_canonical_condition = { 'xsi_type': 'ProductCanonicalCondition', 'condition': 'USED' } other_product_canonical_condition = { 'xsi_type': 'ProductCanonicalCondition', } helper.CreateUnit(root, new_product_canonical_condition) helper.CreateUnit(root, used_product_canonical_condition) helper.CreateUnit(root, other_product_canonical_condition) result = ad_group_criterion_service.mutate(helper.operations) return result['value']
Creates a ProductPartition tree for the given AdGroup ID. Args: client: an AdWordsClient instance. adgroup_id: a str AdGroup ID. Returns: The ProductPartition tree as a sudsobject.
juraj-google-style
def insert_flux_bias(cur, chain, system, flux_bias, chain_strength, encoded_data=None): if (encoded_data is None): encoded_data = {} insert_chain(cur, chain, encoded_data) insert_system(cur, system, encoded_data) if ('flux_bias' not in encoded_data): encoded_data['flux_bias'] = _encode_real(flux_bias) if ('chain_strength' not in encoded_data): encoded_data['chain_strength'] = _encode_real(chain_strength) if ('insert_time' not in encoded_data): encoded_data['insert_time'] = datetime.datetime.now() insert = '\n INSERT OR REPLACE INTO flux_bias(chain_id, system_id, insert_time, flux_bias, chain_strength)\n SELECT\n chain.id,\n system.id,\n :insert_time,\n :flux_bias,\n :chain_strength\n FROM chain, system\n WHERE\n chain.chain_length = :chain_length AND\n chain.nodes = :nodes AND\n system.system_name = :system_name;\n ' cur.execute(insert, encoded_data)
Insert a flux bias offset into the cache. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. chain (iterable): A collection of nodes. Chains in embedding act as one node. system (str): The unique name of a system. flux_bias (float): The flux bias offset associated with the given chain. chain_strength (float): The magnitude of the negative quadratic bias that induces the given chain in an Ising problem. encoded_data (dict, optional): If a dictionary is provided, it will be populated with the serialized data. This is useful for preventing encoding the same information many times.
codesearchnet
def resolve_artifacts_by_builder_compat( self, package_names, builder_name, dependencies=False): paths = self.compat_builders.get(builder_name) if not paths: return resolver = ( find_packages_requirements_dists if dependencies else pkg_names_to_dists ) for distribution in resolver(package_names): path = paths.get(distribution.project_name) if path: yield path
Yield the list of paths to the artifacts in the order of the dependency resolution Arguments: package_names The names of the packages to probe the dependency graph, to be provided as a list of strings. artifact_name The exact name of the artifact. dependencies Trace dependencies. Default is off. Returns the path of where the artifact should be if it has been declared, otherwise None.
juraj-google-style
def __init__(self, **options): self.options = options self.logging_level = logging.DEBUG self.setup_logging() self.logger = Logger.get_logger(__name__) self.results = []
Initialize application with command line options. Args: options (ApplicationOptions): given command line options.
juraj-google-style
def _IsWindowsDrivePathSegment(cls, path_segment): if (len(path_segment) == 2 and path_segment[1] == ':' and path_segment[0].isalpha()): return True path_segment = path_segment.upper() return path_segment in ('%%ENVIRON_SYSTEMDRIVE%%', '%SYSTEMDRIVE%')
Determines if the path segment contains a Windows Drive indicator. A drive indicator can be a drive letter or %SystemDrive%. Args: path_segment (str): path segment. Returns: bool: True if the path segment contains a Windows Drive indicator.
juraj-google-style
def bulk_lookup(self, api_name, keys): cached_data = {} for key in keys: value = self.lookup_value(api_name, key) if (value is not None): cached_data[key] = value return cached_data
Perform lookup on an enumerable of keys. Args: api_name: a string name of the API. Keys and values are segmented by api_name. keys: an enumerable of string keys.
codesearchnet
def __init__(self, states, internals, actions, include_next_states, capacity, scope='replay', summary_labels=None): super(Replay, self).__init__( states=states, internals=internals, actions=actions, include_next_states=include_next_states, capacity=capacity, scope=scope, summary_labels=summary_labels )
Replay memory. Args: states (dict): States specification. internals (dict): Internal states specification. actions (dict): Actions specification. include_next_states (bool): Include subsequent state if true. capacity (int): Memory capacity (number of state/internals/action/(next-state)? records).
juraj-google-style
def index_by_id(cls, target_id, resources): for index in range(len(resources)): if cls.id_by_index(index, resources) == target_id: return index raise AssertionError
Helper method to fetch the index of a resource by its id or address Args: resources (list of objects): The resources to be paginated target_id (string): The address or header_signature of the resource Returns: integer: The index of the target resource Raises: AssertionError: Raised if the target is not found
juraj-google-style
def import_entities(self, entities): edata = Entity.create_payload(entities) r = fapi.upload_entities(self.namespace, self.name, edata, self.api_url) fapi._check_response_code(r, 201)
Upload entity objects. Args: entities: iterable of firecloud.Entity objects.
juraj-google-style
def document(self, name, file_name, owner=None, **kwargs): return Document(self.tcex, name, file_name, owner=owner, **kwargs)
Create the Document TI object. Args: owner: name: file_name: **kwargs: Return:
codesearchnet
def wait_for_fresh_games(self, poll_interval=15.0): wait_until_game = self.read_wait_cell() if (not wait_until_game): return latest_game = self.latest_game_number last_latest = latest_game while (latest_game < wait_until_game): utils.dbg('Latest game {} not yet at required game {} (+{}, {:0.3f} games/sec)'.format(latest_game, wait_until_game, (latest_game - last_latest), ((latest_game - last_latest) / poll_interval))) time.sleep(poll_interval) last_latest = latest_game latest_game = self.latest_game_number
Block caller until required new games have been played. Args: poll_interval: number of seconds to wait between checks If the cell `table_state=metadata:wait_for_game_number` exists, then block the caller, checking every `poll_interval` seconds, until `table_state=metadata:game_counter is at least the value in that cell.
codesearchnet
def __init__( self, symbol: str = '', exchange: str = '', currency: str = '', **kwargs): Contract.__init__( self, secType='STK', symbol=symbol, exchange=exchange, currency=currency, **kwargs)
Stock contract. Args: symbol: Symbol name. exchange: Destination exchange. currency: Underlying currency.
juraj-google-style
def verify_file_exists(file_name, file_location): return __os.path.isfile(__os.path.join(file_location, file_name))
Function to verify if a file exists Args: file_name: The name of file to check file_location: The location of the file, derive from the os module Returns: returns boolean True or False
juraj-google-style
def main(argv: Optional[Sequence[str]] = None) -> None: args = parse_arguments(argv=argv) if args.logging: logging.basicConfig(level=logging.DEBUG) handle_skip() action = args.action request = parse_request() LOGGER.debug('Received action %s with request:\n%s', action, request) try: mapping = parse_mapping(args.mapping) except Exception as error: LOGGER.critical('Unable to parse mapping file', exc_info=True) print( 'Unable to parse mapping file: {error}'.format( error=error), file=sys.stderr) sys.exit(1) if action == 'get': get_password(request, mapping) else: LOGGER.info('Action %s is currently not supported', action) sys.exit(1)
Start the pass-git-helper script. Args: argv: If not ``None``, use the provided command line arguments for parsing. Otherwise, extract them automatically.
juraj-google-style
def find_model_patch_tracks(self): self.model_grid.load_data() tracked_model_objects = [] model_objects = [] if (self.model_grid.data is None): print('No model output found') return tracked_model_objects min_orig = self.model_ew.min_thresh max_orig = self.model_ew.max_thresh data_increment_orig = self.model_ew.data_increment self.model_ew.min_thresh = 0 self.model_ew.data_increment = 1 self.model_ew.max_thresh = 100 for (h, hour) in enumerate(self.hours): print('Finding {0} objects for run {1} Hour: {2:02d}'.format(self.ensemble_member, self.run_date.strftime('%Y%m%d%H'), hour)) if (self.mask is not None): model_data = (self.model_grid.data[h] * self.mask) else: model_data = self.model_grid.data[h] model_data[:self.patch_radius] = 0 model_data[(- self.patch_radius):] = 0 model_data[(:, :self.patch_radius)] = 0 model_data[(:, (- self.patch_radius):)] = 0 scaled_data = np.array(rescale_data(model_data, min_orig, max_orig)) hour_labels = label_storm_objects(scaled_data, 'ew', self.model_ew.min_thresh, self.model_ew.max_thresh, min_area=self.size_filter, max_area=self.model_ew.max_size, max_range=self.model_ew.delta, increment=self.model_ew.data_increment, gaussian_sd=self.gaussian_window) model_objects.extend(extract_storm_patches(hour_labels, model_data, self.model_grid.x, self.model_grid.y, [hour], dx=self.model_grid.dx, patch_radius=self.patch_radius)) for model_obj in model_objects[(- 1)]: dims = model_obj.timesteps[(- 1)].shape if (h > 0): model_obj.estimate_motion(hour, self.model_grid.data[(h - 1)], dims[1], dims[0]) del scaled_data del model_data del hour_labels tracked_model_objects.extend(track_storms(model_objects, self.hours, self.object_matcher.cost_function_components, self.object_matcher.max_values, self.object_matcher.weights)) self.model_ew.min_thresh = min_orig self.model_ew.max_thresh = max_orig self.model_ew.data_increment = data_increment_orig return tracked_model_objects
Identify storms in gridded model output and extract uniform sized patches around the storm centers of mass. Returns:
codesearchnet
def infer_channel_dimension_format(image: np.ndarray, num_channels: Optional[Union[int, tuple[int, ...]]]=None) -> ChannelDimension: num_channels = num_channels if num_channels is not None else (1, 3) num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels if image.ndim == 3: first_dim, last_dim = (0, 2) elif image.ndim == 4: first_dim, last_dim = (1, 3) elif image.ndim == 5: first_dim, last_dim = (2, 4) else: raise ValueError(f'Unsupported number of image dimensions: {image.ndim}') if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels: logger.warning(f'The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension. Use the [input_data_format](https: return ChannelDimension.FIRST elif image.shape[first_dim] in num_channels: return ChannelDimension.FIRST elif image.shape[last_dim] in num_channels: return ChannelDimension.LAST raise ValueError('Unable to infer channel dimension format')
Infers the channel dimension format of `image`. Args: image (`np.ndarray`): The image to infer the channel dimension of. num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`): The number of channels of the image. Returns: The channel dimension of the image.
github-repos
def delete_storage_account(access_token, subscription_id, rgname, account_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '?api-version=', STORAGE_API]) return do_delete(endpoint, access_token)
Delete a storage account in the specified resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new storage account. Returns: HTTP response.
codesearchnet
def set_reprompt_ssml(self, ssml): self.response.reprompt.outputSpeech.type = 'SSML' self.response.reprompt.outputSpeech.ssml = ssml
Set response reprompt output speech as SSML type. Args: ssml: str. Response speech used when type is 'SSML', should be formatted with Speech Synthesis Markup Language. Cannot exceed 8,000 characters.
juraj-google-style
def __init__(self, params, train): self.train = train self.params = params self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights( params.vocab_size, params.hidden_size) self.encoder_stack = EncoderStack(params, train) self.decoder_stack = DecoderStack(params, train)
Initialize layers to build Transformer model. Args: params: hyperparameter object defining layer sizes, dropout values, etc. train: boolean indicating whether the model is in training mode. Used to determine if dropout layers should be added.
juraj-google-style
def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None): name = name or self._name + '_price' with tf.name_scope(name): pay_cf = self._pay_leg.price(market) receive_cf = self._receive_leg.price(market) return receive_cf - pay_cf
Returns the present value of the stream on the valuation date. Args: market: An instance of `ProcessedMarketData`. name: Python str. The name to give to the ops created by this function. Default value: `None` which maps to 'price'. Returns: A `Tensor` of shape `batch_shape` containing the modeled price of each IRS contract based on the input market data.
github-repos
def get_variable_value_for_variation(self, variable, variation): if not variable or not variation: return None if variation.id not in self.variation_variable_usage_map: self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id) return None variable_usages = self.variation_variable_usage_map[variation.id] variable_usage = None if variable_usages: variable_usage = variable_usages.get(variable.id) if variable_usage: variable_value = variable_usage.value self.logger.info('Value for variable "%s" for variation "%s" is "%s".' % ( variable.key, variation.key, variable_value )) else: variable_value = variable.defaultValue self.logger.info('Variable "%s" is not used in variation "%s". Assigning default value "%s".' % ( variable.key, variation.key, variable_value )) return variable_value
Get the variable value for the given variation. Args: variable: The Variable for which we are getting the value. variation: The Variation for which we are getting the variable value. Returns: The variable value or None if any of the inputs are invalid.
juraj-google-style
def _GetPlistRootKey(self, file_entry): file_object = file_entry.GetFileObject() try: plist_file = plist.PlistFile() plist_file.Read(file_object) except IOError as exception: location = getattr(file_entry.path_spec, 'location', '') raise errors.PreProcessFail( 'Unable to read plist file: {0:s} with error: {1!s}'.format( location, exception)) finally: file_object.close() return plist_file.root_key
Retrieves the root key of a plist file. Args: file_entry (dfvfs.FileEntry): file entry of the plist. Returns: dict[str, object]: plist root key. Raises: errors.PreProcessFail: if the preprocessing fails.
juraj-google-style
def load_variant(self, variant_obj): try: result = self.variant_collection.insert_one(variant_obj) except DuplicateKeyError as err: raise IntegrityError('Variant %s already exists in database', variant_obj['_id']) return result
Load a variant object Args: variant_obj(dict) Returns: inserted_id
codesearchnet
def decompose(P): P = P.copy() if (not P): return P out = [Poly({key: P.A[key]}) for key in P.keys] return Poly(out, None, None, None)
Decompose a polynomial to component form. In array missing values are padded with 0 to make decomposition compatible with ``chaospy.sum(Q, 0)``. Args: P (Poly) : Input data. Returns: (Poly) : Decomposed polynomial with `P.shape==(M,)+Q.shape` where `M` is the number of components in `P`. Examples: >>> q = cp.variable() >>> P = cp.Poly([q**2-1, 2]) >>> print(P) [q0^2-1, 2] >>> print(cp.decompose(P)) [[-1, 2], [q0^2, 0]] >>> print(cp.sum(cp.decompose(P), 0)) [q0^2-1, 2]
codesearchnet
def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False): residual = x x, attn_weights = self.self_attn(query=x, key=x, key_padding_mask=encoder_padding_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return (x, attn_weights)
Args: x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)* encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape *(batch, src_len)* where padding elements are indicated by `1`. for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size *(config.encoder_attention_heads,)*. Returns: encoded output of shape *(seq_len, batch, embed_dim)*
github-repos
def create_model() -> tf.keras.Model: model = tf.keras.Sequential([tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax')]) return model
Create model for training. Create a simple tf.keras model for training. Returns: The model to use for training.
github-repos
def addUrlScheme(self, url): if (not isinstance(url, str)): raise TypeError('url must be a string value') if (not (url in self._urlSchemes)): self._urlSchemes[url] = OEmbedUrlScheme(url)
Add a url scheme to this endpoint. It takes a url string and create the OEmbedUrlScheme object internally. Args: url: The url string that represents a url scheme to add.
codesearchnet
def _get_ngrams(n, text): ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams
juraj-google-style
def post_headline(self, name, level, message): self.post_command(OPERATIONS.CMD_SET_HEADLINE, {'name': name, 'level': level, 'message': message})
Asynchronously update the sticky headline for a service. Args: name (string): The name of the service level (int): A message level in states.*_LEVEL message (string): The user facing error message that will be stored for the service and can be queried later.
juraj-google-style
def get_layer_opt(self, lrs, wds): return LayerOptimizer(self.opt_fn, self.get_layer_groups(), lrs, wds)
Method returns an instance of the LayerOptimizer class, which allows for setting differential learning rates for different parts of the model. An example of how a model maybe differentiated into different parts for application of differential learning rates and weight decays is seen in ../.../courses/dl1/fastai/conv_learner.py, using the dict 'model_meta'. Currently, this seems supported only for convolutional networks such as VGG-19, ResNet-XX etc. Args: lrs (float or list(float)): learning rate(s) for the model wds (float or list(float)): weight decay parameter(s). Returns: An instance of a LayerOptimizer
codesearchnet
def ExtractEvents( self, parser_mediator, registry_key, codepage='cp1252', **kwargs): self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. codepage (Optional[str]): extended ASCII string codepage.
juraj-google-style
def list_types_poi(self, **kwargs): url_args = {'language': util.language_code(kwargs.get('lang'))} result = self.make_request('list_poi_types', url_args) if not util.check_result(result): return False, result.get('message', 'UNKNOWN ERROR') values = util.response_list(result, 'Data') return True, [emtype.ParkingPoiType(**a) for a in values]
Obtain a list of families, types and categories of POI. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[ParkingPoiType]), or message string in case of error.
juraj-google-style
def parse_ensembl_line(line, header): line = line.rstrip().split('\t') header = [head.lower() for head in header] raw_info = dict(zip(header, line)) ensembl_info = {} for word in raw_info: value = raw_info[word] if not value: continue if 'chromosome' in word: ensembl_info['chrom'] = value if 'gene' in word: if 'id' in word: ensembl_info['ensembl_gene_id'] = value elif 'start' in word: ensembl_info['gene_start'] = int(value) elif 'end' in word: ensembl_info['gene_end'] = int(value) if 'hgnc symbol' in word: ensembl_info['hgnc_symbol'] = value if "gene name" in word: ensembl_info['hgnc_symbol'] = value if 'hgnc id' in word: ensembl_info['hgnc_id'] = int(value.split(':')[-1]) if 'transcript' in word: if 'id' in word: ensembl_info['ensembl_transcript_id'] = value elif 'start' in word: ensembl_info['transcript_start'] = int(value) elif 'end' in word: ensembl_info['transcript_end'] = int(value) if 'exon' in word: if 'start' in word: ensembl_info['exon_start'] = int(value) elif 'end' in word: ensembl_info['exon_end'] = int(value) elif 'rank' in word: ensembl_info['exon_rank'] = int(value) if 'utr' in word: if 'start' in word: if '5' in word: ensembl_info['utr_5_start'] = int(value) elif '3' in word: ensembl_info['utr_3_start'] = int(value) elif 'end' in word: if '5' in word: ensembl_info['utr_5_end'] = int(value) elif '3' in word: ensembl_info['utr_3_end'] = int(value) if 'strand' in word: ensembl_info['strand'] = int(value) if 'refseq' in word: if 'mrna' in word: if 'predicted' in word: ensembl_info['refseq_mrna_predicted'] = value else: ensembl_info['refseq_mrna'] = value if 'ncrna' in word: ensembl_info['refseq_ncrna'] = value return ensembl_info
Parse an ensembl formated line Args: line(list): A list with ensembl gene info header(list): A list with the header info Returns: ensembl_info(dict): A dictionary with the relevant info
juraj-google-style
def changed(self, path_info, checksum_info): logger.debug("checking if '{}'('{}') has changed.".format(path_info, checksum_info)) if (not self.exists(path_info)): logger.debug("'{}' doesn't exist.".format(path_info)) return True checksum = checksum_info.get(self.PARAM_CHECKSUM) if (checksum is None): logger.debug("checksum for '{}' is missing.".format(path_info)) return True if self.changed_cache(checksum): logger.debug("cache for '{}'('{}') has changed.".format(path_info, checksum)) return True actual = self.save_info(path_info)[self.PARAM_CHECKSUM] if (checksum != actual): logger.debug("checksum '{}'(actual '{}') for '{}' has changed.".format(checksum, actual, path_info)) return True logger.debug("'{}' hasn't changed.".format(path_info)) return False
Checks if data has changed. A file is considered changed if: - It doesn't exist on the working directory (was unlinked) - Checksum is not computed (saving a new file) - The checkusm stored in the State is different from the given one - There's no file in the cache Args: path_info: dict with path information. checksum: expected checksum for this data. Returns: bool: True if data has changed, False otherwise.
codesearchnet
def update_pipeline_field(self, pipeline_key, field): uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.fields_suffix ]) return self._update_field(uri, field)
Upates pipeline field as specified Args: pipeline_key key for pipeline where the fields lives field StreakField object with fresh data returns (status code, updated field dict)
juraj-google-style
def validate_addr(self, address, id=None, endpoint=None): return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint)
returns whether or not addr string is valid Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
codesearchnet
def _process_name_or_alias_filter_directive(filter_operation_info, location, context, parameters): filtered_field_type = filter_operation_info.field_type if isinstance(filtered_field_type, GraphQLUnionType): raise GraphQLCompilationError(u'Cannot apply "name_or_alias" to union type {}'.format(filtered_field_type)) current_type_fields = filtered_field_type.fields name_field = current_type_fields.get('name', None) alias_field = current_type_fields.get('alias', None) if ((not name_field) or (not alias_field)): raise GraphQLCompilationError(u'Cannot apply "name_or_alias" to type {} because it lacks a "name" or "alias" field.'.format(filtered_field_type)) name_field_type = strip_non_null_from_type(name_field.type) alias_field_type = strip_non_null_from_type(alias_field.type) if (not isinstance(name_field_type, GraphQLScalarType)): raise GraphQLCompilationError(u'Cannot apply "name_or_alias" to type {} because its "name" field is not a scalar.'.format(filtered_field_type)) if (not isinstance(alias_field_type, GraphQLList)): raise GraphQLCompilationError(u'Cannot apply "name_or_alias" to type {} because its "alias" field is not a list.'.format(filtered_field_type)) alias_field_inner_type = strip_non_null_from_type(alias_field_type.of_type) if (alias_field_inner_type != name_field_type): raise GraphQLCompilationError(u'Cannot apply "name_or_alias" to type {} because the "name" field and the inner type of the "alias" field do not match: {} vs {}'.format(filtered_field_type, name_field_type, alias_field_inner_type)) argument_inferred_type = name_field_type (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type) check_against_name = expressions.BinaryComposition(u'=', expressions.LocalField('name'), argument_expression) check_against_alias = expressions.BinaryComposition(u'contains', expressions.LocalField('alias'), argument_expression) filter_predicate = expressions.BinaryComposition(u'||', check_against_name, check_against_alias) if (non_existence_expression is not None): filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate) return blocks.Filter(filter_predicate)
Return a Filter basic block that checks for a match against an Entity's name or alias. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! parameters: list of 1 element, containing the value to check the name or alias against; if the parameter is optional and missing, the check will return True Returns: a Filter basic block that performs the check against the name or alias
codesearchnet
async def tag(self, name: str, repo: str, *, tag: str = None) -> bool: params = {"repo": repo} if tag: params["tag"] = tag await self.docker._query( "images/{name}/tag".format(name=name), "POST", params=params, headers={"content-type": "application/json"}, ) return True
Tag the given image so that it becomes part of a repository. Args: repo: the repository to tag in tag: the name for the new tag
juraj-google-style
def get_meta_graph_def(saved_model_dir, tag_set): saved_model = read_saved_model(saved_model_dir) set_of_tags = set([tag for tag in tag_set.split(',') if tag]) valid_tags = [] for meta_graph_def in saved_model.meta_graphs: meta_graph_tags = set(meta_graph_def.meta_info_def.tags) if meta_graph_tags == set_of_tags: return meta_graph_def else: valid_tags.append(','.join(meta_graph_tags)) raise RuntimeError(f'MetaGraphDef associated with tag-set {tag_set} could not be found in the SavedModel. Please use one of the following tag-sets: {valid_tags}')
Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. The empty string tag is ignored so that passing '' means the empty tag set. For tag-set contains multiple tags, all tags must be passed in. Raises: RuntimeError: An error when the given tag-set does not exist in the SavedModel. Returns: A MetaGraphDef corresponding to the tag-set.
github-repos
def check_models_are_tested(module: types.ModuleType, test_file: str) -> List[str]: defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file.replace(os.path.sep, '/') in TEST_FILES_WITH_NO_COMMON_TESTS: return return [f'{test_file} should define `all_model_classes` to apply common tests to the models it tests. ' + 'If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file ' + '`utils/check_repo.py`.'] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and should_be_tested(model_name): failures.append(f'{model_name} is defined in {module.__name__} but is not tested in ' + f'{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file.' + 'If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`' + 'in the file `utils/check_repo.py`.') return failures
Check models defined in a module are all tested in a given file. Args: module (`types.ModuleType`): The module in which we get the models. test_file (`str`): The path to the file where the module is tested. Returns: `List[str]`: The list of error messages corresponding to models not tested.
github-repos
def predict(self, version_name, data): full_version_name = ('%s/versions/%s' % (self._full_model_name, version_name)) request = self._api.projects().predict(body={'instances': data}, name=full_version_name) request.headers['user-agent'] = 'GoogleCloudDataLab/1.0' result = request.execute() if ('predictions' not in result): raise Exception('Invalid response from service. Cannot find "predictions" in response.') return result['predictions']
Get prediction results from features instances. Args: version_name: the name of the version used for prediction. data: typically a list of instance to be submitted for prediction. The format of the instance depends on the model. For example, structured data model may require a csv line for each instance. Note that online prediction only works on models that take one placeholder value, such as a string encoding a csv line. Returns: A list of prediction results for given instances. Each element is a dictionary representing output mapping from the graph. An example: [{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]}, {"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}]
codesearchnet