code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def add_layout(self, obj, place='center'):
valid_places = ['left', 'right', 'above', 'below', 'center']
if (place not in valid_places):
raise ValueError(("Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places))))
getattr(self, place).append(obj) | Adds an object to the plot in a specified place.
Args:
obj (Renderer) : the object to add to the Plot
place (str, optional) : where to add the object (default: 'center')
Valid places are: 'left', 'right', 'above', 'below', 'center'.
Returns:
None | codesearchnet |
def read(*components, **kwargs):
rstrip = kwargs.get('rstrip', True)
comment_char = kwargs.get('comment_char', None)
ignore_comments = (comment_char is not None)
file = open(path(*components))
lines = file.readlines()
file.close()
if ignore_comments:
comment_line_re = re.compile('^\\s*{char}'.format(char=comment_char))
not_comment_re = re.compile('[^{char}]+'.format(char=comment_char))
if rstrip:
return [re.match(not_comment_re, line).group(0).rstrip() for line in lines if (not re.match(comment_line_re, line))]
else:
return [re.match(not_comment_re, line).group(0) for line in lines if (not re.match(comment_line_re, line))]
elif rstrip:
return [line.rstrip() for line in lines]
else:
return lines | Read file and return a list of lines. If comment_char is set, ignore the
contents of lines following the comment_char.
Raises:
IOError: if reading path fails | codesearchnet |
def check_updates(transformers, datastore=None, stimuli=None):
datastore = datastore or expanduser('~/.pliers_updates')
prior_data = pd.read_csv(datastore) if exists(datastore) else None
stimuli = stimuli or glob.glob(
join(dirname(realpath(__file__)), '../tests/data/image/CC0/*'))
stimuli = load_stims(stimuli)
loaded_transformers = {get_transformer(name, **params): (name, params)
for name, params in transformers}
results = pd.DataFrame({'time_extracted': [datetime.datetime.now()]})
for trans in loaded_transformers.keys():
for stim in stimuli:
if trans._stim_matches_input_types(stim):
res = trans.transform(stim)
try:
res = [getattr(res, '_data', res.data) for r in res]
except TypeError:
res = getattr(res, '_data', res.data)
res = hash_data(res)
results["{}.{}".format(trans.__hash__(), stim.name)] = [res]
mismatches = []
if prior_data is not None:
last = prior_data[
prior_data.time_extracted == prior_data.time_extracted.max()]. \
iloc[0].drop('time_extracted')
for label, value in results.iteritems():
old = last.get(label)
new = value.values[0]
if old is not None:
if isinstance(new, str):
if new != old:
mismatches.append(label)
elif not np.isclose(old, new):
mismatches.append(label)
results = prior_data.append(results)
results.to_csv(datastore, index=False)
def get_trans(hash_tr):
for obj, attr in loaded_transformers.items():
if str(obj.__hash__()) == hash_tr:
return attr
delta_t = set([m.split('.')[0] for m in mismatches])
delta_t = [get_trans(dt) for dt in delta_t]
return {'transformers': delta_t, 'mismatches': mismatches} | Run transformers through a battery of stimuli, and check if output has
changed. Store results in csv file for comparison.
Args:
transformers (list): A list of tuples of transformer names and
dictionary of parameters to instantiate with (or empty dict).
datastore (str): Filepath of CSV file with results. Stored in home dir
by default.
stimuli (list): List of stimuli file paths to extract from. If None,
use test data. | juraj-google-style |
def user_entry(entry_int, num_inst, command):
valid_entry = False
if (not entry_int):
print('{}aborting{} - {} instance\n'.format(C_ERR, C_NORM, command))
sys.exit()
elif ((entry_int >= 1) and (entry_int <= num_inst)):
entry_idx = (entry_int - 1)
valid_entry = True
else:
print('{}Invalid entry:{} enter a number between 1 and {}.'.format(C_ERR, C_NORM, num_inst))
entry_idx = entry_int
return (entry_idx, valid_entry) | Validate user entry and returns index and validity flag.
Processes the user entry and take the appropriate action: abort
if '0' entered, set validity flag and index is valid entry, else
return invalid index and the still unset validity flag.
Args:
entry_int (int): a number entered or 999 if a non-int was entered.
num_inst (int): the largest valid number that can be entered.
command (str): program command to display in prompt.
Returns:
entry_idx(int): the dictionary index number of the targeted instance
valid_entry (bool): specifies if entry_idx is valid.
Raises:
SystemExit: if the user enters 0 when they are choosing from the
list it triggers the "abort" option offered to the user. | codesearchnet |
def setup(__pkg: str) -> jinja2.Environment:
dirs = [path.join(d, 'templates') for d in xdg_basedir.get_data_dirs(__pkg)]
env = jinja2.Environment(autoescape=jinja2.select_autoescape(['html', 'xml']), loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs]))
env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates'))
env.filters.update(FILTERS)
return env | Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment | codesearchnet |
def find_existing_record(env, zone_id, dns_name, check_key=None, check_value=None):
client = boto3.Session(profile_name=env).client('route53')
pager = client.get_paginator('list_resource_record_sets')
existingrecord = None
for rset in pager.paginate(HostedZoneId=zone_id):
for record in rset['ResourceRecordSets']:
if check_key:
if record['Name'].rstrip('.') == dns_name and record.get(check_key) == check_value:
LOG.info("Found existing record: %s", record)
existingrecord = record
break
return existingrecord | Check if a specific DNS record exists.
Args:
env (str): Deployment environment.
zone_id (str): Route53 zone id.
dns_name (str): FQDN of application's dns entry to add/update.
check_key(str): Key to look for in record. Example: "Type"
check_value(str): Value to look for with check_key. Example: "CNAME"
Returns:
json: Found Record. Returns None if no record found | juraj-google-style |
def _check_mr_state(cls, state, mr_id):
if state is None:
logging.warning(
"Mapreduce State for job %s is missing. Dropping Task.",
mr_id)
return False
if not state.active:
logging.warning(
"Mapreduce %s is not active. Looks like spurious task "
"execution. Dropping Task.", mr_id)
return False
return True | Check MapreduceState.
Args:
state: an MapreduceState instance.
mr_id: mapreduce id.
Returns:
True if state is valid. False if not and this task should be dropped. | juraj-google-style |
def load_plugins(self):
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.collectors']['plugins']:
cls = entry_point.load()
if cls.enabled():
self.log.debug('Collector loaded: {} in module {}'.format(cls.__name__, cls.__module__))
self.collectors.setdefault(cls.type, []).append(Worker(cls.name, cls.interval, {'name': entry_point.name, 'module_name': entry_point.module_name, 'attrs': entry_point.attrs}))
else:
self.log.debug('Collector disabled: {} in module {}'.format(cls.__name__, cls.__module__))
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auditors']['plugins']:
cls = entry_point.load()
if cls.enabled():
self.log.debug('Auditor loaded: {} in module {}'.format(cls.__name__, cls.__module__))
self.auditors.append(Worker(cls.name, cls.interval, {'name': entry_point.name, 'module_name': entry_point.module_name, 'attrs': entry_point.attrs}))
else:
self.log.debug('Auditor disabled: {} in module {}'.format(cls.__name__, cls.__module__))
collector_count = sum((len(x) for x in self.collectors.values()))
auditor_count = len(self.auditors)
if ((collector_count + auditor_count) == 0):
raise Exception('No auditors or collectors loaded, aborting scheduler')
self.log.info('Scheduler loaded {} collectors and {} auditors'.format(collector_count, auditor_count)) | Refresh the list of available collectors and auditors
Returns:
`None` | codesearchnet |
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)
return gen_state_ops.scatter_mul(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) | Multiply this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to multiply this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered multiplication has completed.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`. | github-repos |
def _get_localized_fn(path, root_dir):
local_fn = path
if path.startswith(root_dir):
local_fn = path.replace(root_dir, "", 1)
if not local_fn.startswith("/"):
return "/" + local_fn
return local_fn | Return absolute `path` relative to `root_dir`.
When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``,
returned path will be ``/xex/somefile.txt``.
Args:
path (str): Absolute path beginning in `root_dir`.
root_dir (str): Absolute path containing `path` argument.
Returns:
str: Local `path` when `root_dir` is considered as root of FS. | juraj-google-style |
def AddArguments(cls, argument_group):
default_fields = ','.join(cls._DEFAULT_FIELDS)
argument_group.add_argument(
'--fields', dest='fields', type=str, action='store',
default=default_fields, help=(
'Defines which fields should be included in the output.'))
default_fields = ', '.join(cls._DEFAULT_FIELDS)
argument_group.add_argument(
'--additional_fields', dest='additional_fields', type=str,
action='store', default='', help=(
'Defines extra fields to be included in the output, in addition to'
' the default fields, which are {0:s}.'.format(default_fields))) | Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group. | juraj-google-style |
def ttr(self, kloc, acc=(10 ** 3), verbose=1):
kloc = numpy.asarray(kloc, dtype=int)
shape = kloc.shape
kloc = kloc.reshape(len(self), (- 1))
cache = {}
out = [evaluation.evaluate_recurrence_coefficients(self, k) for k in kloc.T]
out = numpy.array(out).T
return out.reshape(((2,) + shape)) | Three terms relation's coefficient generator
Args:
k (numpy.ndarray, int):
The order of the coefficients.
acc (int):
Accuracy of discretized Stieltjes if analytical methods are
unavailable.
Returns:
(Recurrence coefficients):
Where out[0] is the first (A) and out[1] is the second
coefficient With ``out.shape==(2,)+k.shape``. | codesearchnet |
def read(self, offset, length):
if (not isinstance(offset, (int, long))):
raise TypeError('Invalid offset type, should be integer.')
offset = self._adjust_offset(offset)
self._validate_offset(offset, length)
return bytes(self.mapping[offset:(offset + length)]) | Read a string of bytes from the specified `offset` in bytes,
relative to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
length (int): number of bytes to read.
Returns:
bytes: bytes read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds. | codesearchnet |
def __init__(self, port, log=False):
super(OpenThreadController, self).__init__()
self.port = port
self.handle = None
self.lines = []
self._log = log
self._is_net = False
self._init() | Initialize the controller
Args:
port (str): serial port's path or name(windows) | juraj-google-style |
def collect_doc(module, base_class=None, prefix="", flag_exclude_prefix=False):
ret = []
for attrname in module.__all__:
if prefix and not attrname.startswith(prefix):
continue
attr = module.__getattribute__(attrname)
if base_class is not None and not issubclass(attr, base_class):
continue
spec = inspect.signature(attr)
ret.append((attrname if not flag_exclude_prefix else attrname[len(prefix):], spec, attr.__doc__))
return ret | Collects class names and docstrings in module for classes starting with prefix
Arguments:
module -- Python module
prefix -- argument for str.startswith(); if not passed, does not filter
base_class -- filters only descendants of this class
flag_exclude_prefix -- whether or not to exclude prefix from class name in result
Returns: [(classname0, signature, docstring0), ...] | juraj-google-style |
def __init__(self, descriptor_db=None):
self._internal_db = descriptor_database.DescriptorDatabase()
self._descriptor_db = descriptor_db
self._descriptors = {}
self._enum_descriptors = {}
self._file_descriptors = {} | Initializes a Pool of proto buffs.
The descriptor_db argument to the constructor is provided to allow
specialized file descriptor proto lookup code to be triggered on demand. An
example would be an implementation which will read and compile a file
specified in a call to FindFileByName() and not require the call to Add()
at all. Results from this database will be cached internally here as well.
Args:
descriptor_db: A secondary source of file descriptors. | juraj-google-style |
def repr(self, changed_widgets=None):
if changed_widgets is None:
changed_widgets={}
local_changed_widgets = {}
self._set_updated()
return ''.join(('<', self.type, '>\n', self.innerHTML(local_changed_widgets), '\n</', self.type, '>')) | It is used to automatically represent the object to HTML format
packs all the attributes, children and so on.
Args:
changed_widgets (dict): A dictionary containing a collection of tags that have to be updated.
The tag that have to be updated is the key, and the value is its textual repr. | juraj-google-style |
def find_local_maxima(self, input_grid):
pixels, q_data = self.quantize(input_grid)
centers = OrderedDict()
for p in pixels.keys():
centers[p] = []
marked = np.ones(q_data.shape, dtype=int) * self.UNMARKED
MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_size)))
MAX_INFL = 2 * MIN_INFL
marked_so_far = []
for b in sorted(pixels.keys(),reverse=True):
infl_dist = MIN_INFL + int(np.round(float(b) / self.max_bin * (MAX_INFL - MIN_INFL)))
for p in pixels[b]:
if marked[p] == self.UNMARKED:
ok = False
del marked_so_far[:]
for (i, j), v in np.ndenumerate(marked[p[0] - infl_dist:p[0] + infl_dist + 1,
p[1] - infl_dist:p[1]+ infl_dist + 1]):
if v == self.UNMARKED:
ok = True
marked[i - infl_dist + p[0],j - infl_dist + p[1]] = b
marked_so_far.append((i - infl_dist + p[0],j - infl_dist + p[1]))
else:
ok = False
break
if ok:
centers[b].append(p)
else:
for m in marked_so_far:
marked[m] = self.UNMARKED
marked[:, :] = self.UNMARKED
deferred_from_last = []
deferred_to_next = []
for delta in range(0, self.delta + 1):
for b in sorted(centers.keys(), reverse=True):
bin_lower = b - delta
deferred_from_last[:] = deferred_to_next[:]
del deferred_to_next[:]
foothills = []
n_centers = len(centers[b])
tot_centers = n_centers + len(deferred_from_last)
for i in range(tot_centers):
if i < n_centers:
center = centers[b][i]
else:
center = deferred_from_last[i - n_centers]
if bin_lower < 0:
bin_lower = 0
if marked[center] == self.UNMARKED:
captured = self.set_maximum(q_data, marked, center, bin_lower, foothills)
if not captured:
deferred_to_next.append(center)
else:
pass
self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills)
del deferred_from_last[:]
del deferred_to_next[:]
return marked | Finds the local maxima in the inputGrid and perform region growing to identify objects.
Args:
input_grid: Raw input data.
Returns:
array with labeled objects. | juraj-google-style |
def write_int8(self, value, little_endian=True):
if little_endian:
endian = '<'
else:
endian = '>'
return self.pack(('%sb' % endian), value) | Pack the value as a signed byte and write 1 byte to the stream.
Args:
value:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written. | codesearchnet |
def defer_remainder(self, deferred_time=None):
with self._lock:
self._timestamp = Timestamp.now()
if deferred_time and (not isinstance(deferred_time, (Duration, Timestamp))):
raise ValueError('The timestamp of deter_remainder() should be a Duration or a Timestamp, or None.')
self._deferred_timestamp = deferred_time
checkpoint = self.try_split(0)
if checkpoint:
_, self._deferred_residual = checkpoint | Performs self-checkpoint on current processing restriction with an
expected resuming time.
Self-checkpoint could happen during processing elements. When executing an
DoFn.process(), you may want to stop processing an element and resuming
later if current element has been processed quit a long time or you also
want to have some outputs from other elements. ``defer_remainder()`` can be
called on per element if needed.
Args:
deferred_time: A relative ``Duration`` that indicates the ideal time gap
between now and resuming, or an absolute ``Timestamp`` for resuming
execution time. If the time_delay is None, the deferred work will be
executed as soon as possible. | github-repos |
def add_gene_links(gene_obj, build=37):
try:
build = int(build)
except ValueError:
build = 37
hgnc_id = gene_obj['hgnc_id']
gene_obj['hgnc_link'] = genenames(hgnc_id)
gene_obj['omim_link'] = omim(hgnc_id)
if not 'ensembl_id' in gene_obj:
ensembl_id = gene_obj.get('common',{}).get('ensembl_id')
else:
ensembl_id = gene_obj['ensembl_id']
ensembl_37_link = ensembl(ensembl_id, build=37)
ensembl_38_link = ensembl(ensembl_id, build=38)
gene_obj['ensembl_37_link'] = ensembl_37_link
gene_obj['ensembl_38_link'] = ensembl_38_link
gene_obj['ensembl_link'] = ensembl_37_link
if build == 38:
gene_obj['ensembl_link'] = ensembl_38_link
gene_obj['hpa_link'] = hpa(ensembl_id)
gene_obj['string_link'] = string(ensembl_id)
gene_obj['reactome_link'] = reactome(ensembl_id)
gene_obj['clingen_link'] = clingen(hgnc_id)
gene_obj['expression_atlas_link'] = expression_atlas(ensembl_id)
gene_obj['exac_link'] = exac(ensembl_id)
gene_obj['entrez_link'] = entrez(gene_obj.get('entrez_id'))
gene_obj['omim_link'] = omim(gene_obj.get('omim_id'))
gene_obj['ppaint_link'] = ppaint(gene_obj['hgnc_symbol'])
gene_obj['vega_link'] = vega(gene_obj.get('vega_id'))
gene_obj['ucsc_link'] = ucsc(gene_obj.get('ucsc_id')) | Update a gene object with links
Args:
gene_obj(dict)
build(int)
Returns:
gene_obj(dict): gene_obj updated with many links | juraj-google-style |
def __init__(self,
validate_args=False,
name="exp"):
super(Exp, self).__init__(
validate_args=validate_args,
name=name) | Instantiates the `Exp` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object. | juraj-google-style |
def contains_vasp_input(dir_name):
for f in ['INCAR', 'POSCAR', 'POTCAR', 'KPOINTS']:
if ((not os.path.exists(os.path.join(dir_name, f))) and (not os.path.exists(os.path.join(dir_name, (f + '.orig'))))):
return False
return True | Checks if a directory contains valid VASP input.
Args:
dir_name:
Directory name to check.
Returns:
True if directory contains all four VASP input files (INCAR, POSCAR,
KPOINTS and POTCAR). | codesearchnet |
def coerce_to_pendulum_date(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[Date]:
p = coerce_to_pendulum(x, assume_local=assume_local)
return None if p is None else p.date() | Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible | juraj-google-style |
def validate(source, scheme=None, format=None):
(detected_scheme, detected_format) = helpers.detect_scheme_and_format(source)
scheme = (scheme or detected_scheme)
format = (format or detected_format)
if (scheme is not None):
if (scheme not in config.LOADERS):
raise exceptions.SchemeError(('Scheme "%s" is not supported' % scheme))
if (format not in config.PARSERS):
raise exceptions.FormatError(('Format "%s" is not supported' % format))
return True | Check if tabulator is able to load the source.
Args:
source (Union[str, IO]): The source path or IO object.
scheme (str, optional): The source scheme. Auto-detect by default.
format (str, optional): The source file format. Auto-detect by default.
Returns:
bool: Whether tabulator is able to load the source file.
Raises:
`tabulator.exceptions.SchemeError`: The file scheme is not supported.
`tabulator.exceptions.FormatError`: The file format is not supported. | codesearchnet |
def course_blocks(self, course_id, username):
resp = self.requester.get(
urljoin(self.base_url, '/api/courses/v1/blocks/'),
params={
"depth": "all",
"username": username,
"course_id": course_id,
"requested_fields": "children,display_name,id,type,visible_to_staff_only",
})
resp.raise_for_status()
return Structure(resp.json()) | Fetches course blocks.
Args:
course_id (str): An edx course id.
username (str): username of the user to query for (can reveal hidden
modules)
Returns:
Structure | juraj-google-style |
def is_number_match(num1, num2):
if (isinstance(num1, PhoneNumber) and isinstance(num2, PhoneNumber)):
return _is_number_match_OO(num1, num2)
elif isinstance(num1, PhoneNumber):
return _is_number_match_OS(num1, num2)
elif isinstance(num2, PhoneNumber):
return _is_number_match_OS(num2, num1)
else:
return _is_number_match_SS(num1, num2) | Takes two phone numbers and compares them for equality.
For example, the numbers +1 345 657 1234 and 657 1234 are a SHORT_NSN_MATCH.
The numbers +1 345 657 1234 and 345 657 are a NO_MATCH.
Arguments
num1 -- First number object or string to compare. Can contain formatting,
and can have country calling code specified with + at the start.
num2 -- Second number object or string to compare. Can contain formatting,
and can have country calling code specified with + at the start.
Returns:
- EXACT_MATCH if the country_code, NSN, presence of a leading zero for
Italian numbers and any extension present are the same.
- NSN_MATCH if either or both has no region specified, and the NSNs and
extensions are the same.
- SHORT_NSN_MATCH if either or both has no region specified, or the
region specified is the same, and one NSN could be a shorter version of
the other number. This includes the case where one has an extension
specified, and the other does not.
- NO_MATCH otherwise. | codesearchnet |
def debug_string(self, with_typing: bool=False, indent: int=0) -> str:
operand_name = f'{self} '
operand_prints = ''.join(('\n' + op.debug_string(with_typing, indent + 1) for op in self.operands))
type_print = f' type={self.return_type}' if with_typing else ''
return f'{'| ' * indent}+ {operand_name}<{self.__class__.__name__}{type_print}> ({operand_prints})' | Builds a string describing the expression tree starting from this node.
Args:
with_typing: If true, includes the type each node evaluates to.
indent: The initial number of spaces to use as indentation for the debug
string.
Returns:
A string which recursively describes this node and its operands. | github-repos |
def _get_bucketing_id(self, user_id, attributes):
attributes = attributes or {}
bucketing_id = attributes.get(enums.ControlAttributes.BUCKETING_ID)
if bucketing_id is not None:
if isinstance(bucketing_id, string_types):
return bucketing_id
self.logger.warning('Bucketing ID attribute is not a string. Defaulted to user_id.')
return user_id | Helper method to determine bucketing ID for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes. May consist of bucketing ID to be used.
Returns:
String representing bucketing ID if it is a String type in attributes else return user ID. | juraj-google-style |
def construct_concept_to_indicator_mapping(n: int = 1) -> Dict[str, List[str]]:
df = pd.read_sql_table("concept_to_indicator_mapping", con=engine)
gb = df.groupby("Concept")
_dict = {
k: [get_variable_and_source(x) for x in take(n, v["Indicator"].values)]
for k, v in gb
}
return _dict | Create a dictionary mapping high-level concepts to low-level indicators
Args:
n: Number of indicators to return
Returns:
Dictionary that maps concept names to lists of indicator names. | juraj-google-style |
def add_identifier(self, name, obj):
name = str(name)
self._known_identifiers[name] = obj | Add a known identifier resolution.
Args:
name (str): The name of the identifier
obj (object): The object that is should resolve to | juraj-google-style |
def FromString(cls, desc):
if (language.stream is None):
language.get_language()
parse_exp = (((Optional((time_interval('time') - Literal(':').suppress())) - language.stream('stream')) - Literal('=').suppress()) - number('value'))
try:
data = parse_exp.parseString(desc)
time = 0
if ('time' in data):
time = data['time'][0]
return SimulationStimulus(time, data['stream'][0], data['value'])
except (ParseException, ParseSyntaxException):
raise ArgumentError('Could not parse stimulus descriptor', descriptor=desc) | Create a new stimulus from a description string.
The string must have the format:
[time: ][system ]input X = Y
where X and Y are integers. The time, if given must
be a time_interval, which is an integer followed by a
time unit such as second(s), minute(s), etc.
Args:
desc (str): A string description of the stimulus.
Returns:
SimulationStimulus: The parsed stimulus object. | codesearchnet |
def download(timestamp, dataset, path=None, products=None,
levels=None, offset=0):
if path is None:
path = DATA_PATH
closest = timestamp.hour
filename = dataset(closest, offset)
gfs_timestamp = '%s%02d' % (timestamp.strftime('%Y%m%d'), closest)
url = baseurl(gfs_timestamp, filename)
index = url + '.idx'
messages = message_index(index)
segments = _filter_messages(messages, products, levels)
dl_path = path + '/%s/' % gfs_timestamp
_verify_path(dl_path)
_download_segments(path + filename, url, segments) | save GFS grib file to DATA_PATH.
Args:
dataset(function): naming convention function. eg. pgrb2
timestamp(datetime): ???
path(str): if None defaults to DATA_PATH
products(list): TMP, etc. if None downloads all.
layers(list): surface, etc. if None downloads all.
offset(int): should be multiple of 3 | juraj-google-style |
def update_fetch_positions(self, partitions):
for tp in partitions:
if (not self._subscriptions.is_assigned(tp)):
log.warning('partition %s is not assigned - skipping offset update', tp)
continue
elif self._subscriptions.is_fetchable(tp):
log.warning('partition %s is still fetchable -- skipping offset update', tp)
continue
if self._subscriptions.is_offset_reset_needed(tp):
self._reset_offset(tp)
elif (self._subscriptions.assignment[tp].committed is None):
self._subscriptions.need_offset_reset(tp)
self._reset_offset(tp)
else:
committed = self._subscriptions.assignment[tp].committed
log.debug('Resetting offset for partition %s to the committed offset %s', tp, committed)
self._subscriptions.seek(tp, committed) | Update the fetch positions for the provided partitions.
Arguments:
partitions (list of TopicPartitions): partitions to update
Raises:
NoOffsetForPartitionError: if no offset is stored for a given
partition and no reset policy is available | codesearchnet |
def get_periodic_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
suce = _get_supercell(obj, rCut)
arrsoap = get_soap_locals(suce, Hpos, alp, bet, rCut, nMax=nMax, Lmax=Lmax, crossOver=crossOver, all_atomtypes=all_atomtypes, eta=eta)
return arrsoap | Get the RBF basis SOAP output for the given position in a periodic system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum nmber of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given position. | codesearchnet |
def get_environment_details(zone, environment):
default_context = google.datalab.Context.default()
url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,
environment)))
return google.datalab.utils.Http.request(url, credentials=default_context.credentials) | Issues a request to Composer to get the environment details.
Args:
zone: GCP zone of the composer environment
environment: name of the Composer environment
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | juraj-google-style |
def _FormatSource(self, event):
_, source = self._output_mediator.GetFormattedSources(event)
if source is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
return source | Formats the source.
Args:
event (EventObject): event.
Returns:
str: source field.
Raises:
NoFormatterFound: if no event formatter can be found to match the data
type in the event. | juraj-google-style |
def setup(self, reason, grr_server_url, grr_username, grr_password,
approvers=None, verify=True):
grr_auth = (grr_username, grr_password)
self.approvers = []
if approvers:
self.approvers = [item.strip() for item in approvers.strip().split(',')]
self.grr_api = grr_api.InitHttp(api_endpoint=grr_server_url,
auth=grr_auth,
verify=verify)
self.output_path = tempfile.mkdtemp()
self.reason = reason | Initializes a GRR hunt result collector.
Args:
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate. | juraj-google-style |
def __init__(self, enterprise_configuration):
super(SAPSuccessFactorsAPIClient, self).__init__(enterprise_configuration)
self.global_sap_config = apps.get_model('sap_success_factors', 'SAPSuccessFactorsGlobalConfiguration').current()
self._create_session() | Instantiate a new client.
Args:
enterprise_configuration (SAPSuccessFactorsEnterpriseCustomerConfiguration): An enterprise customers's
configuration model for connecting with SAP SuccessFactors | juraj-google-style |
def transpose(self, name=None):
if (name is None):
name = (self.module_name + '_transpose')
if (self._data_format == DATA_FORMAT_NHWC):
stride = self._stride[1:(- 1)]
else:
stride = self._stride[2:]
return Conv2D(output_channels=(lambda : self.input_channels), kernel_shape=self._kernel_shape, stride=stride, padding=self._padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name) | Returns matching `Conv2D` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv2D` module. | codesearchnet |
def compare_version(a, b):
aa = string.split(a, '.')
bb = string.split(b, '.')
for i in range(0, 4):
if (aa[i] != bb[i]):
return cmp(int(aa[i]), int(bb[i]))
return 0 | Compare two version number strings of the form W.X.Y.Z.
The numbers are compared most-significant to least-significant.
For example, 12.345.67.89 > 2.987.88.99.
Args:
a: First version number string to compare
b: Second version number string to compare
Returns:
0 if the numbers are identical, a positive number if 'a' is larger, and
a negative number if 'b' is larger. | codesearchnet |
def parse_coach_ec_df(infile):
ec_df = pd.read_table(infile, delim_whitespace=True, names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues'])
ec_df['pdb_template_id'] = ec_df['pdb_template'].apply((lambda x: x[:4]))
ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply((lambda x: x[4]))
ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]
ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')
return ec_df | Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions | codesearchnet |
def __init__(self, name, num_qubits, params, label=None):
self._label = label
super().__init__(name, num_qubits, 0, params) | Create a new gate.
Args:
name (str): the Qobj name of the gate
num_qubits (int): the number of qubits the gate acts on.
params (list): a list of parameters.
label (str or None): An optional label for the gate [Default: None] | juraj-google-style |
def HashFile(self, fd, byte_count):
while (byte_count > 0):
buf_size = min(byte_count, constants.CLIENT_MAX_BUFFER_SIZE)
buf = fd.read(buf_size)
if (not buf):
break
self.HashBuffer(buf)
byte_count -= buf_size | Updates underlying hashers with a given file.
Args:
fd: A file object that is going to be fed to the hashers.
byte_count: A maximum number of bytes that are going to be processed. | codesearchnet |
def get_client_kwargs(self, path):
path = path.split('?', 1)[0]
share_name, relpath = self.split_locator(path)
kwargs = dict(share_name=share_name)
if relpath and relpath[-1] == '/':
kwargs['directory_name'] = relpath.rstrip('/')
elif relpath:
try:
kwargs['directory_name'], kwargs['file_name'] = relpath.rsplit(
'/', 1)
except ValueError:
kwargs['directory_name'] = ''
kwargs['file_name'] = relpath
return kwargs | Get base keyword arguments for client for a
specific path.
Args:
path (str): Absolute path or URL.
Returns:
dict: client args | juraj-google-style |
def affine_transform(boxes, angle, translate_x, translate_y, scale, shear_x, shear_y, height, width, center_x=None, center_y=None, bounding_box_format='xyxy'):
if bounding_box_format != 'xyxy':
raise NotImplementedError
box_utils = BoundingBox()
if backend_utils.in_tf_graph():
box_utils.backend.set_backend('tensorflow')
boxes = box_utils.affine(boxes, angle, translate_x, translate_y, scale, shear_x, shear_y, height, width, center_x=center_x, center_y=center_y)
box_utils.backend.reset()
return boxes | Applies an affine transformation to the bounding boxes.
The `height` and `width` parameters are used to normalize the
translation and scaling factors.
Args:
boxes: The bounding boxes to transform, a tensor/array of shape
`(N, 4)` or `(batch_size, N, 4)`.
angle: Rotation angle in degrees.
translate_x: Horizontal translation fraction.
translate_y: Vertical translation fraction.
scale: Scaling factor.
shear_x: Shear angle in x-direction (degrees).
shear_y: Shear angle in y-direction (degrees).
height: Height of the image/data.
width: Width of the image/data.
center_x: x-coordinate of the transformation center (fraction).
center_y: y-coordinate of the transformation center (fraction).
bounding_box_format: The format of the input bounding boxes. Defaults to
`"xyxy"`.
Returns:
The transformed bounding boxes, a tensor/array with the same shape
as the input `boxes`. | github-repos |
def copy_graph(subject, existing_graph):
new_graph = rdflib.Graph()
for (predicate, object_) in existing_graph.predicate_objects():
new_graph.add((subject, predicate, object_))
return new_graph | Function takes a subject and an existing graph, returns a new graph with
all predicate and objects of the existing graph copied to the new_graph with
subject as the new subject
Args:
subject(rdflib.URIRef): A URIRef subject
existing_graph(rdflib.Graph): A rdflib.Graph
Returns:
rdflib.Graph | codesearchnet |
def to_wider_model(self, pre_layer_id, n_add):
self.operation_history.append(("to_wider_model", pre_layer_id, n_add))
pre_layer = self.layer_list[pre_layer_id]
output_id = self.layer_id_to_output_node_ids[pre_layer_id][0]
dim = layer_width(pre_layer)
self.vis = {}
self._search(output_id, dim, dim, n_add)
for u in self.topological_order:
for v, layer_id in self.adj_list[u]:
self.node_list[v].shape = self.layer_list[layer_id].output_shape | Widen the last dimension of the output of the pre_layer.
Args:
pre_layer_id: The ID of a convolutional layer or dense layer.
n_add: The number of dimensions to add. | juraj-google-style |
def __init__(self,
mesh_impl,
laid_out_input,
mesh_axes,
add_counter_fn=None):
self.mesh_impl = mesh_impl
self.laid_out_input = laid_out_input
self.mesh_axes = mesh_axes
self.add_counter_fn = add_counter_fn
self._reduced = None | Create a LazyAllreduceSum.
Args:
mesh_impl: a mesh_impl
laid_out_input: a LaidOutTensor
mesh_axes: a list of mesh axes
add_counter_fn: a function taking no arguments which calls
lowering.add_counter if and when the allreduce executes.
Returns:
a LazyAllreduceSum | juraj-google-style |
class ProgbarLogger(Callback):
def __init__(self):
super().__init__()
self.seen = 0
self.progbar = None
self.target = None
self.verbose = 1
self.epochs = 1
self._called_in_fit = False
def set_params(self, params):
verbose = params['verbose']
if verbose == 'auto':
verbose = 1
self.verbose = verbose
self.epochs = params['epochs']
self.target = params['steps']
def on_train_begin(self, logs=None):
self._called_in_fit = True
def on_test_begin(self, logs=None):
if not self._called_in_fit:
self._reset_progbar()
self._maybe_init_progbar()
def on_predict_begin(self, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
def on_epoch_begin(self, epoch, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
if self.verbose and self.epochs > 1:
io_utils.print_msg(f'Epoch {epoch + 1}/{self.epochs}')
def on_train_batch_end(self, batch, logs=None):
self._update_progbar(batch, logs)
def on_test_batch_end(self, batch, logs=None):
if not self._called_in_fit:
self._update_progbar(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
self._update_progbar(batch, None)
def on_epoch_end(self, epoch, logs=None):
self._finalize_progbar(logs)
def on_test_end(self, logs=None):
if not self._called_in_fit:
self._finalize_progbar(logs)
def on_predict_end(self, logs=None):
self._finalize_progbar(logs)
def _reset_progbar(self):
self.seen = 0
self.progbar = None
def _maybe_init_progbar(self):
if self.progbar is None:
self.progbar = Progbar(target=self.target, verbose=self.verbose, unit_name='step')
def _update_progbar(self, batch, logs=None):
logs = logs or {}
self._maybe_init_progbar()
self.seen = batch + 1
if self.verbose == 1:
self.progbar.update(self.seen, list(logs.items()), finalize=False)
def _finalize_progbar(self, logs):
logs = logs or {}
if self.target is None:
self.target = self.seen
self.progbar.target = self.target
self.progbar.update(self.target, list(logs.items()), finalize=True) | Callback that prints metrics to stdout.
Args:
count_mode: One of `"steps"` or `"samples"`.
Whether the progress bar should
count samples seen or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`. | github-repos |
def from_json(cls, json):
return cls(
namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM]),
json[cls.BATCH_SIZE_PARAM]) | Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json. | juraj-google-style |
def next_event(self, event_id, prev=False):
i = self.events.index(self._events_dict[event_id])
if prev and i > 0:
return self.events[i - 1]
elif not prev and i + 1 < len(self.events):
return self.events[i + 1]
else:
return None | Get the event following another event in this conversation.
Args:
event_id (str): ID of the event.
prev (bool): If ``True``, return the previous event rather than the
next event. Defaults to ``False``.
Raises:
KeyError: If no such :class:`.ConversationEvent` is known.
Returns:
:class:`.ConversationEvent` or ``None`` if there is no following
event. | juraj-google-style |
def generate_nodes(tpm, cm, network_state, indices, node_labels=None):
if node_labels is None:
node_labels = NodeLabels(None, indices)
node_state = utils.state_of(indices, network_state)
return tuple(Node(tpm, cm, index, state, node_labels)
for index, state in zip(indices, node_state)) | Generate |Node| objects for a subsystem.
Args:
tpm (np.ndarray): The system's TPM
cm (np.ndarray): The corresponding CM.
network_state (tuple): The state of the network.
indices (tuple[int]): Indices to generate nodes for.
Keyword Args:
node_labels (|NodeLabels|): Textual labels for each node.
Returns:
tuple[Node]: The nodes of the system. | juraj-google-style |
def extract_storm_objects(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, obj_buffer=0):
storm_objects = []
if (len(label_grid.shape) == 3):
ij_grid = np.indices(label_grid.shape[1:])
for (t, time) in enumerate(times):
storm_objects.append([])
object_slices = list(find_objects(label_grid[t], label_grid[t].max()))
if (len(object_slices) > 0):
for (o, obj_slice) in enumerate(object_slices):
if (obj_buffer > 0):
obj_slice_buff = [slice(np.maximum(0, (osl.start - obj_buffer)), np.minimum((osl.stop + obj_buffer), label_grid.shape[(l + 1)])) for (l, osl) in enumerate(obj_slice)]
else:
obj_slice_buff = obj_slice
storm_objects[(- 1)].append(STObject(data[t][obj_slice_buff], np.where((label_grid[t][obj_slice_buff] == (o + 1)), 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], time, time, dx=dx, step=dt))
if (t > 0):
dims = storm_objects[(- 1)][(- 1)].timesteps[0].shape
storm_objects[(- 1)][(- 1)].estimate_motion(time, data[(t - 1)], dims[1], dims[0])
else:
ij_grid = np.indices(label_grid.shape)
storm_objects.append([])
object_slices = list(find_objects(label_grid, label_grid.max()))
if (len(object_slices) > 0):
for (o, obj_slice) in enumerate(object_slices):
if (obj_buffer > 0):
obj_slice_buff = [slice(np.maximum(0, (osl.start - obj_buffer)), np.minimum((osl.stop + obj_buffer), label_grid.shape[(l + 1)])) for (l, osl) in enumerate(obj_slice)]
else:
obj_slice_buff = obj_slice
storm_objects[(- 1)].append(STObject(data[obj_slice_buff], np.where((label_grid[obj_slice_buff] == (o + 1)), 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], times, times, dx=dx, step=dt))
return storm_objects | After storms are labeled, this method extracts the storm objects from the grid and places them into STObjects.
The STObjects contain intensity, location, and shape information about each storm at each timestep.
Args:
label_grid: 2D or 3D array output by label_storm_objects.
data: 2D or 3D array used as input to label_storm_objects.
x_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length.
y_grid: 2D array of y-coordinate data.
times: List or array of time values, preferably as integers
dx: grid spacing in same units as x_grid and y_grid.
dt: period elapsed between times
obj_buffer: number of extra pixels beyond bounding box of object to store in each STObject
Returns:
storm_objects: list of lists containing STObjects identified at each time. | codesearchnet |
def _from_safe_path_param_name(safe_parameter):
assert safe_parameter.startswith('_')
safe_parameter_as_base32 = safe_parameter[1:]
padding_length = - len(safe_parameter_as_base32) % 8
padding = '=' * padding_length
return base64.b32decode(safe_parameter_as_base32 + padding) | Takes a safe regex group name and converts it back to the original value.
Only alphanumeric characters and underscore are allowed in variable name
tokens, and numeric are not allowed as the first character.
The safe_parameter is a base32 representation of the actual value.
Args:
safe_parameter: A string that was generated by _to_safe_path_param_name.
Returns:
A string, the parameter matched from the URL template. | juraj-google-style |
def _get_time(header, keys, name):
for key in keys:
try:
date_value = header.pop(key)
except KeyError:
continue
try:
return to_timestamp(parse(date_value))
except TypeError:
return float(date_value)
raise UnsupportedOperation(name) | Get time from header
Args:
header (dict): Object header.
keys (tuple of str): Header keys.
name (str): Method name.
Returns:
float: The number of seconds since the epoch | juraj-google-style |
def get_controller_info_records(self):
info_records = []
for controller_module_name in self._controller_objects.keys():
with expects.expect_no_raises('Failed to collect controller info from %s' % controller_module_name):
record = self._create_controller_info_record(controller_module_name)
if record:
info_records.append(record)
return info_records | Get the info records for all the controller objects in the manager.
New info records for each controller object are created for every call
so the latest info is included.
Returns:
List of records.ControllerInfoRecord objects. Each opject conatins
the info of a type of controller | github-repos |
def generate_identifier(sender, instance, **kwargs):
identifier = Concept.create_identifier(instance.query)
qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)
if instance.pk:
qs = qs.exclude(pk=instance.pk)
if (qs.count() > 0):
raise ValueError('Concept identifier conflict')
instance.identifier = identifier | Generate and set identifier of concept before saving object to DB
Args:
sender (class): should be Concept
instance (Concept): saving concept | codesearchnet |
def get_ss_class(pdb_file, dssp_file, chain):
prag = pr.parsePDB(pdb_file)
pr.parseDSSP(dssp_file, prag)
alpha, threeTen, beta = get_dssp_ss_content_multiplechains(prag, chain)
if alpha == 0 and beta > 0:
classification = 'all-beta'
elif beta == 0 and alpha > 0:
classification = 'all-alpha'
elif beta == 0 and alpha == 0:
classification = 'mixed'
elif float(alpha) / beta >= 20:
classification = 'all-alpha'
else:
classification = 'mixed'
return classification | Define the secondary structure class of a PDB file at the specific chain
Args:
pdb_file:
dssp_file:
chain:
Returns: | juraj-google-style |
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
cls_len = int(getattr(self, 'cls_token_id', None) is not None)
sep_len = int(getattr(self, 'sep_token_id', None) is not None)
if token_ids_1 is None:
return [0] * (cls_len + len(token_ids_0) + sep_len)
return [0] * (cls_len + len(token_ids_0) + sep_len) + [1] * (len(token_ids_1) + sep_len) | Create the token type IDs corresponding to the sequences passed. [What are token type
IDs?](../glossary#token-type-ids)
Should be overridden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (`List[int]`): The first tokenized sequence.
token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
Returns:
`List[int]`: The token type ids. | github-repos |
def set_intra_op_parallelism_threads(num_threads):
context.context().intra_op_parallelism_threads = num_threads | Set number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Args:
num_threads: Number of parallel threads | github-repos |
def _GetCheckpointFilename(save_dir, latest_filename):
if latest_filename is None:
latest_filename = 'checkpoint'
return os.path.join(save_dir, latest_filename) | Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto. | github-repos |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = utils.BytearrayStream()
if (self._unique_identifier is not None):
self._unique_identifier.write(local_stream, kmip_version=kmip_version)
if (self._key_format_type is not None):
self._key_format_type.write(local_stream, kmip_version=kmip_version)
if (self._key_compression_type is not None):
self._key_compression_type.write(local_stream, kmip_version=kmip_version)
if (self._key_wrapping_specification is not None):
self._key_wrapping_specification.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(GetRequestPayload, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer) | Write the data encoding the Get request payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | codesearchnet |
def BreachDepressions(
dem,
in_place = False,
topology = 'D8'
):
if type(dem) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if topology not in ['D8','D4']:
raise Exception("Unknown topology!")
if not in_place:
dem = dem.copy()
_AddAnalysis(dem, "BreachDepressions(dem)")
demw = dem.wrap()
if topology=='D8':
_richdem.rdBreachDepressionsD8(demw)
elif topology=='D4':
_richdem.rdBreachDepressionsD4(demw)
dem.copyFromWrapped(demw)
if not in_place:
return dem | Breaches all depressions in a DEM.
Args:
dem (rdarray): An elevation model
in_place (bool): If True, the DEM is modified in place and there is
no return; otherwise, a new, altered DEM is returned.
topology (string): A topology indicator
Returns:
DEM without depressions. | juraj-google-style |
def to_bqm(self, model):
linear = ((v, float(model.get_py_value(bias))) for (v, bias) in self.linear.items())
quadratic = ((u, v, float(model.get_py_value(bias))) for ((u, v), bias) in self.quadratic.items())
offset = float(model.get_py_value(self.offset))
return dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.SPIN) | Given a pysmt model, return a bqm.
Adds the values of the biases as determined by the SMT solver to a bqm.
Args:
model: A pysmt model.
Returns:
:obj:`dimod.BinaryQuadraticModel` | codesearchnet |
def _convert_variables_to_tensors(self):
components = self._type_spec._to_components(self)
tensor_components = variable_utils.convert_variables_to_tensors(components)
return self._type_spec._from_components(tensor_components) | Recursively converts ResourceVariables in the LinearOperator to Tensors.
The usage of `self._type_spec._from_components` violates the contract of
`CompositeTensor`, since it is called on a different nested structure
(one containing only `Tensor`s) than `self.type_spec` specifies (one that
may contain `ResourceVariable`s). Since `LinearOperator`'s
`_from_components` method just passes the contents of the nested structure
to `__init__` to rebuild the operator, and any `LinearOperator` that may be
instantiated with `ResourceVariables` may also be instantiated with
`Tensor`s, this usage is valid.
Returns:
tensor_operator: `self` with all internal Variables converted to Tensors. | github-repos |
def _FormatDescription(self, event):
date_time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp, timezone=self._output_mediator.timezone)
timestamp_description = (event.timestamp_desc or 'UNKNOWN')
(message, _) = self._output_mediator.GetFormattedMessages(event)
if (message is None):
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))
description = '{0:s}; {1:s}; {2:s}'.format(date_time_string, timestamp_description, message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' '))
return self._SanitizeField(description) | Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field. | codesearchnet |
def volumes(self):
return [EBSVolume(res) for res in db.Resource.join(ResourceProperty, (Resource.resource_id == ResourceProperty.resource_id)).filter((Resource.resource_type_id == ResourceType.get('aws_ebs_volume').resource_type_id), (ResourceProperty.name == 'attachments'), func.JSON_CONTAINS(ResourceProperty.value, func.JSON_QUOTE(self.id))).all()] | Returns a list of the volumes attached to the instance
Returns:
`list` of `EBSVolume` | codesearchnet |
def connect(portname, baudrate):
global SERPORT
try:
SERPORT = serial.Serial(portname, baudrate, timeout = 0.1)
except:
raise HerkulexError("could not open the serial port") | Connect to the Herkulex bus
Connect to serial port to which Herkulex Servos are attatched
Args:
portname (str): The serial port name
baudrate (int): The serial port baudrate
Raises:
SerialException: Error occured while opening serial port | juraj-google-style |
def download(s3_conn, out_filename, s3_path):
(bucket_name, prefix) = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
key = boto.s3.key.Key(bucket=bucket, name=prefix)
logging.info('loading from %s into %s', key, out_filename)
key.get_contents_to_filename(out_filename, cb=log_download_progress) | Downloads the given s3_path
Args:
s3_conn (boto.s3.connection) a boto s3 connection
out_filename (str) local filename to save the file
s3_path (str) the source path on s3 | codesearchnet |
def GetTSKVsPartByPathSpec(tsk_volume, path_spec):
location = getattr(path_spec, 'location', None)
part_index = getattr(path_spec, 'part_index', None)
start_offset = getattr(path_spec, 'start_offset', None)
partition_index = None
if (part_index is None):
if (location is not None):
if location.startswith('/p'):
try:
partition_index = (int(location[2:], 10) - 1)
except ValueError:
pass
if ((partition_index is None) or (partition_index < 0)):
location = None
if ((location is None) and (start_offset is None)):
return (None, None)
bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)
current_part_index = 0
current_partition_index = 0
tsk_vs_part = None
tsk_vs_part_list = list(tsk_volume)
number_of_tsk_vs_parts = len(tsk_vs_part_list)
if (number_of_tsk_vs_parts > 0):
if ((part_index is not None) and ((part_index < 0) or (part_index >= number_of_tsk_vs_parts))):
return (None, None)
for tsk_vs_part in tsk_vs_part_list:
if TSKVsPartIsAllocated(tsk_vs_part):
if (partition_index is not None):
if (partition_index == current_partition_index):
break
current_partition_index += 1
if ((part_index is not None) and (part_index == current_part_index)):
break
if (start_offset is not None):
start_sector = TSKVsPartGetStartSector(tsk_vs_part)
if (start_sector is not None):
start_sector *= bytes_per_sector
if (start_sector == start_offset):
break
current_part_index += 1
if ((tsk_vs_part is None) or (current_part_index >= number_of_tsk_vs_parts)):
return (None, None)
if (not TSKVsPartIsAllocated(tsk_vs_part)):
current_partition_index = None
return (tsk_vs_part, current_partition_index) | Retrieves the TSK volume system part object from the TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
path_spec (PathSpec): path specification.
Returns:
tuple: contains:
pytsk3.TSK_VS_PART_INFO: TSK volume system part information or
None on error.
int: partition index or None if not available. | codesearchnet |
def mash_dist_trusted(fasta_path):
args = [MASH_BIN, 'dist', MASH_SKETCH_FILE, fasta_path]
p = Popen(args, stderr=PIPE, stdout=PIPE)
(stdout, stderr) = p.communicate()
retcode = p.returncode
if (retcode != 0):
raise Exception('Could not run Mash dist {}'.format(stderr))
return stdout | Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
Returns:
(str): Mash STDOUT string | codesearchnet |
def _init_header(self, string):
taf_header_pattern =
metar_header_pattern =
header_taf = re.match(taf_header_pattern, string, re.VERBOSE)
header_metar = re.match(metar_header_pattern, string, re.VERBOSE)
if header_taf:
header_dict = header_taf.groupdict()
header_dict['form'] = 'taf'
elif header_metar:
header_dict = header_metar.groupdict()
header_dict['form'] = 'metar'
else:
raise MalformedTAF("No valid TAF/METAR header found")
return header_dict | Extracts header part from TAF/METAR string and populates header dict
Args:
TAF/METAR report string
Raises:
MalformedTAF: An error parsing the report
Returns:
Header dictionary | juraj-google-style |
def get_percentage_bond_dist_changes(self, max_radius=3.0):
data = collections.defaultdict(dict)
for inds in itertools.combinations(list(range(len(self.initial))), 2):
(i, j) = sorted(inds)
initial_dist = self.initial[i].distance(self.initial[j])
if (initial_dist < max_radius):
final_dist = self.final[i].distance(self.final[j])
data[i][j] = ((final_dist / initial_dist) - 1)
return data | Returns the percentage bond distance changes for each site up to a
maximum radius for nearest neighbors.
Args:
max_radius (float): Maximum radius to search for nearest
neighbors. This radius is applied to the initial structure,
not the final structure.
Returns:
Bond distance changes as a dict of dicts. E.g.,
{index1: {index2: 0.011, ...}}. For economy of representation, the
index1 is always less than index2, i.e., since bonding between
site1 and siten is the same as bonding between siten and site1,
there is no reason to duplicate the information or computation. | codesearchnet |
async def update_server_data(server):
data = datatools.get_data()
send_welcome_message = False
if (server.id not in data['discord']['servers']):
logger.debug('Adding new server to serverdata')
data['discord']['servers'][server.id] = {'prefix': '!'}
if (('mute_intro' not in data) or (not data['mute_intro'])):
send_welcome_message = True
_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
_dir_modules = '{}/../'.format(_dir)
for module_name in os.listdir(_dir_modules):
if (module_name.startswith('_') or module_name.startswith('!')):
continue
if (not os.path.isfile('{}/{}/_data.py'.format(_dir_modules, module_name))):
logger.warning('No _data.py file found for module {}'.format(module_name))
continue
try:
import_name = '.discord_modis.modules.{}.{}'.format(module_name, '_data')
_data = importlib.import_module(import_name, 'modis')
if (_data.modulename not in data['discord']['servers'][server.id]):
data['discord']['servers'][server.id][_data.modulename] = _data.sd_structure
datatools.write_data(data)
except Exception as e:
logger.error('Could not initialise module {}'.format(module_name))
logger.exception(e)
datatools.write_data(data)
if send_welcome_message:
default_channel = server.default_channel
if (not default_channel):
for channel in server.channels:
if (channel.name == 'general'):
default_channel = channel
break
if (not default_channel):
for channel in server.channels:
if ('general' in channel.name):
default_channel = channel
break
if (not default_channel):
for channel in server.channels:
if (channel.type == discord.ChannelType.text):
default_channel = channel
break
if default_channel:
hello_message = ((("Hello! I'm Modis.\n\n" + 'The prefix is currently `!`, and can be changed at any time using `!prefix`\n\n') + 'You can use `!help` to get help commands for all modules, ') + 'or {} me to get the server prefix and help commands.'.format(server.me.mention))
(await client.send_message(default_channel, hello_message)) | Updates the server info for the given server
Args:
server: The Discord server to update info for | codesearchnet |
def verify_signature(message, signature, certs):
if isinstance(certs, (six.text_type, six.binary_type)):
certs = [certs]
for cert in certs:
verifier = rsa.RSAVerifier.from_string(cert)
if verifier.verify(message, signature):
return True
return False | Verify an RSA cryptographic signature.
Checks that the provided ``signature`` was generated from ``bytes`` using
the private key associated with the ``cert``.
Args:
message (Union[str, bytes]): The plaintext message.
signature (Union[str, bytes]): The cryptographic signature to check.
certs (Union[Sequence, str, bytes]): The certificate or certificates
to use to check the signature.
Returns:
bool: True if the signature is valid, otherwise False. | codesearchnet |
def PrepareMergeTaskStorage(self, task):
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
merge_storage_file_path = self._GetMergeTaskStorageFilePath(task)
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
task.storage_file_size = os.path.getsize(processed_storage_file_path)
try:
os.rename(processed_storage_file_path, merge_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to rename task storage file: {0:s} with error: '
'{1!s}').format(processed_storage_file_path, exception)) | Prepares a task storage for merging.
Moves the task storage file from the processed directory to the merge
directory.
Args:
task (Task): task.
Raises:
IOError: if the storage type is not supported or
if the storage file cannot be renamed.
OSError: if the storage type is not supported or
if the storage file cannot be renamed. | juraj-google-style |
def delete(self, filename):
for repo in self._children:
if hasattr(repo, "delete"):
repo.delete(filename) | Delete a file from all repositories which support it.
Individual repositories will determine correct location to
delete from (Scripts vs. Packages).
This will not remove the corresponding Package or Script object
from the JSS's database!
Args:
filename: The filename you wish to delete (do not include a
path). | juraj-google-style |
def __get_unused_context(self, parse_result, context):
tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])
result_context = [c for c in context if c['key'] not in tags_keys]
return result_context | Used to get unused context from context. Any keys not in
parse_result
Args:
parse_results(list): parsed results used to identify what keys
in the context are used.
context(list): this is the context used to match with parsed results
keys missing in the parsed results are the unused context
Returns:
list: A list of the unused context results. | juraj-google-style |
def decode(cls, command_str):
(name, _, arg) = command_str.partition(' ')
args = []
if (len(arg) > 0):
if ((arg[0] != '{') or (arg[(- 1)] != '}')):
raise DataError('Invalid command, argument is not contained in { and }', arg=arg, cmd=name)
arg = arg[1:(- 1)]
args = arg.split(',')
proc = []
for arg in args:
if arg.startswith('hex:'):
arg = unhexlify(arg[4:]).decode('utf-8')
proc.append(arg)
return Command(name, proc) | Decode a string encoded command back into a Command object.
Args:
command_str (str): The encoded command string output from a
previous call to encode.
Returns:
Command: The decoded Command object. | codesearchnet |
def _add_message_field(self, field_name, value, params):
if ('.' not in field_name):
params[field_name] = value
return
(root, remaining) = field_name.split('.', 1)
sub_params = params.setdefault(root, {})
self._add_message_field(remaining, value, sub_params) | Converts a . delimitied field name to a message field in parameters.
This adds the field to the params dict, broken out so that message
parameters appear as sub-dicts within the outer param.
For example:
{'a.b.c': ['foo']}
becomes:
{'a': {'b': {'c': ['foo']}}}
Args:
field_name: A string containing the '.' delimitied name to be converted
into a dictionary.
value: The value to be set.
params: The dictionary holding all the parameters, where the value is
eventually set. | codesearchnet |
def gen_conversion_log_html(conversion_log_dir, quantization_enabled, tflite_graph_path):
template_filename = _resource_loader.get_path_to_datafile('template.html')
if not os.path.exists(template_filename):
raise IOError("Failed to generate HTML: file '{0}' doesn't exist.".format(template_filename))
toco_log_before_path = os.path.join(conversion_log_dir, 'toco_log_before.pb')
toco_log_after_path = os.path.join(conversion_log_dir, 'toco_log_after.pb')
dot_before_path = os.path.join(conversion_log_dir, 'toco_tf_graph.dot')
dot_after_path = os.path.join(conversion_log_dir, 'toco_tflite_graph.dot')
if not os.path.exists(toco_log_before_path):
raise IOError("Failed to generate HTML: file '{0}' doesn't exist.".format(toco_log_before_path))
if not os.path.exists(toco_log_after_path):
raise IOError("Failed to generate HTML: file '{0}' doesn't exist.".format(toco_log_after_path))
if not os.path.exists(dot_before_path):
raise IOError("Failed to generate HTML: file '{0}' doesn't exist.".format(dot_before_path))
if not os.path.exists(dot_after_path):
raise IOError("Failed to generate HTML: file '{0}' doesn't exist.".format(dot_after_path))
html_generator = HTMLGenerator(template_filename, os.path.join(conversion_log_dir, 'toco_conversion_summary.html'))
toco_conversion_log_before = _toco_conversion_log_pb2.TocoConversionLog()
toco_conversion_log_after = _toco_conversion_log_pb2.TocoConversionLog()
with open(toco_log_before_path, 'rb') as f:
toco_conversion_log_before.ParseFromString(f.read())
with open(toco_log_after_path, 'rb') as f:
toco_conversion_log_after.ParseFromString(f.read())
with io.open(dot_before_path, 'r', encoding='utf-8') as f:
dot_before = f.read().rstrip()
with io.open(dot_after_path, 'r', encoding='utf-8') as f:
dot_after = f.read().rstrip()
html_generator.generate(toco_conversion_log_before, toco_conversion_log_after, quantization_enabled, dot_before, dot_after, toco_conversion_log_after.toco_err_logs, tflite_graph_path) | Generates an HTML report about the conversion process.
Args:
conversion_log_dir: A string specifying the file directory of the conversion
logs. It's required that before calling this function, the
`conversion_log_dir`
already contains the following files: `toco_log_before.pb`,
`toco_log_after.pb`, `toco_tf_graph.dot`,
`toco_tflite_graph.dot`.
quantization_enabled: A boolean, passed from the tflite converter to
indicate whether post-training quantization is enabled during conversion.
tflite_graph_path: A string, the filepath to the converted TFLite model.
Raises:
IOError: When any of the required files doesn't exist. | github-repos |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
values_dict = {}
for registry_value in registry_key.GetValues():
if not registry_value.name or not self._RE_VALUE_NAME.search(
registry_value.name):
continue
if not registry_value.data or not registry_value.DataIsString():
continue
value_string = registry_value.GetDataAsObject()
values = self._RE_VALUE_DATA.findall(value_string)
if len(values) != 1 or len(values[0]) != 2:
continue
try:
timestamp = int(values[0][0], 16)
except ValueError:
parser_mediator.ProduceExtractionWarning((
'unable to convert filetime string to an integer for '
'value: {0:s}.').format(registry_value.name))
continue
event_data = OfficeMRUWindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_value.offset
event_data.value_string = value_string
values_dict[registry_value.name] = value_string
if not timestamp:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key. | juraj-google-style |
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
rngs = {}
if dropout_rng is not None:
rngs['dropout'] = dropout_rng
inputs = {'params': params or self.params}
if past_key_values:
inputs['cache'] = past_key_values
mutable = ['cache']
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs)
outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs['past_key_values'] = unfreeze(past['cache'])
return outputs
elif past_key_values is not None and (not return_dict):
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]
return outputs | Returns:
Example:
```python
>>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
>>> import jax.numpy as jnp
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
>>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, return_tensors="np")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
``` | github-repos |
def _GenOpenApiSpec(service_class_names, output_path, hostname=None, application_path=None, x_google_api_name=False):
output_files = []
service_configs = GenApiConfig(service_class_names, hostname=hostname, config_string_generator=openapi_generator.OpenApiGenerator(), application_path=application_path, x_google_api_name=x_google_api_name)
for (api_name_version, config) in service_configs.iteritems():
openapi_name = (api_name_version.replace('-', '') + 'openapi.json')
output_files.append(_WriteFile(output_path, openapi_name, config))
return output_files | Write openapi documents generated from the service classes to file.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
output_path: The directory to which to output the OpenAPI specs.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specified in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Returns:
A list of OpenAPI spec filenames. | codesearchnet |
def Getattr(self, path, fh=None):
del fh
if (not path):
raise fuse.FuseOSError(errno.ENOENT)
if (path != self.root):
full_path = self.root.Add(path)
else:
full_path = path
fd = aff4.FACTORY.Open(full_path, token=self.token)
if (full_path == '/'):
return self.MakePartialStat(fd)
fd = aff4.FACTORY.Open(full_path, token=self.token)
aff4_stat = fd.Get(fd.Schema.STAT)
if aff4_stat:
return aff4_stat.AsDict()
elif (fd.Get(fd.Schema.LAST) is None):
raise fuse.FuseOSError(errno.ENOENT)
else:
pass
return self.MakePartialStat(fd) | Performs a stat on a file or directory.
Args:
path: The path to stat.
fh: A file handler. Not used.
Returns:
A dictionary mapping st_ names to their values.
Raises:
FuseOSError: When a path is supplied that grr doesn't know about, ie an
invalid file path.
ValueError: If an empty path is passed. (The empty string, when passed to
self.root.Add, returns a path for aff4:/, the root directory, which is not
the behaviour we want.) | codesearchnet |
def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Union[int, List[int]], vision_feature_select_strategy: str):
batch_size, frames, channels, height, width = pixel_values.shape
pixel_values = pixel_values.view(batch_size * frames, channels, height, width)
video_features = self.vision_tower(pixel_values, output_hidden_states=True)
if isinstance(vision_feature_layer, int):
selected_video_feature = video_features.hidden_states[vision_feature_layer]
else:
hs_pool = [video_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
selected_video_feature = torch.cat(hs_pool, dim=-1)
if vision_feature_select_strategy == 'default':
selected_video_feature = selected_video_feature[:, 1:]
elif vision_feature_select_strategy == 'full':
selected_video_feature = selected_video_feature
video_features = self.multi_modal_projector(selected_video_feature)
video_features = self.apply_pooling(video_features)
video_features = video_features.reshape(batch_size, frames * video_features.shape[1], -1)
return video_features | Obtains video last hidden states from the vision tower, apply multimodal projection and pooling.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)
The tensors corresponding to the input video.
vision_feature_layer (`Union[int, List[int]], *optional*, defaults to -2`):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
video_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches
and are of shape `(num_videos, video_length, embed_dim)`). | github-repos |
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
vision_data = {}
if image_sizes is not None:
images_kwargs = Qwen2VLProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get('merge_size', None) or self.image_processor.merge_size
num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]
num_image_tokens = [num_patches
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
if video_sizes is not None:
videos_kwargs = Qwen2VLProcessorKwargs._defaults.get('videos_kwargs', {})
videos_kwargs.update(kwargs)
num_video_patches = [self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) for video_size in video_sizes]
num_video_tokens = [num_patches
vision_data['num_video_tokens'] = num_video_tokens
return MultiModalData(**vision_data) | Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`List[List[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`List[List[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data. | github-repos |
def __init__(self, weekend_mask=None, holidays=None):
if weekend_mask is not None:
weekend_mask = tf.cast(weekend_mask, dtype=tf.bool)
if holidays is not None:
holidays = dt.convert_to_date_tensor(holidays).ordinal()
self._to_biz_space, self._from_biz_space = hol.business_day_mappers(weekend_mask=weekend_mask, holidays=holidays) | Initializer.
Args:
weekend_mask: Boolean `Tensor` of 7 elements one for each day of the week
starting with Monday at index 0. A `True` value indicates the day is
considered a weekend day and a `False` value implies a week day.
Default value: None which means no weekends are applied.
holidays: Defines the holidays that are added to the weekends defined by
`weekend_mask`. An instance of `dates.DateTensor` or an object
convertible to `DateTensor`.
Default value: None which means no holidays other than those implied by
the weekends (if any). | github-repos |
def filter_aliases(alias_table):
for alias in alias_table.sections():
if alias_table.has_option(alias, 'command'):
(yield (alias.split()[0], remove_pos_arg_placeholders(alias_table.get(alias, 'command')))) | Filter aliases that does not have a command field in the configuration file.
Args:
alias_table: The alias table.
Yield:
A tuple with [0] being the first word of the alias and
[1] being the command that the alias points to. | codesearchnet |
def update(self, forecasts, observations):
for t, threshold in enumerate(self.thresholds):
tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold))
fp = np.count_nonzero((forecasts >= threshold) &
(observations < self.obs_threshold))
fn = np.count_nonzero((forecasts < threshold) &
(observations >= self.obs_threshold))
tn = np.count_nonzero((forecasts < threshold) &
(observations < self.obs_threshold))
self.contingency_tables.iloc[t] += [tp, fp, fn, tn] | Update the ROC curve with a set of forecasts and observations
Args:
forecasts: 1D array of forecast values
observations: 1D array of observation values. | juraj-google-style |
def __validate(self, value, validate_element):
if (not self.repeated):
return validate_element(value)
elif isinstance(value, (list, tuple)):
result = []
for element in value:
if (element is None):
try:
name = self.name
except AttributeError:
raise ValidationError(('Repeated values for %s may not be None' % self.__class__.__name__))
else:
raise ValidationError(('Repeated values for field %s may not be None' % name))
result.append(validate_element(element))
return result
elif (value is not None):
try:
name = self.name
except AttributeError:
raise ValidationError(('%s is repeated. Found: %s' % (self.__class__.__name__, value)))
else:
raise ValidationError(('Field %s is repeated. Found: %s' % (name, value)))
return value | Internal validation function.
Validate an internal value using a function to validate
individual elements.
Args:
value: Value to validate.
validate_element: Function to use to validate individual elements.
Raises:
ValidationError if value is not expected type. | codesearchnet |
def extract_xml(input_):
if (type(input_) == str):
file_object = open(input_, 'rb')
elif (type(input_) == bytes):
file_object = BytesIO(input_)
else:
file_object = input_
try:
header = file_object.read(6)
file_object.seek(0)
if header.startswith(MAGIC_ZIP):
_zip = zipfile.ZipFile(file_object)
xml = _zip.open(_zip.namelist()[0]).read().decode()
elif header.startswith(MAGIC_GZIP):
xml = GzipFile(fileobj=file_object).read().decode()
elif header.startswith(MAGIC_XML):
xml = file_object.read().decode()
else:
file_object.close()
raise InvalidAggregateReport('Not a valid zip, gzip, or xml file')
file_object.close()
except UnicodeDecodeError:
raise InvalidAggregateReport('File objects must be opened in binary (rb) mode')
except Exception as error:
raise InvalidAggregateReport('Invalid archive file: {0}'.format(error.__str__()))
return xml | Extracts xml from a zip or gzip file at the given path, file-like object,
or bytes.
Args:
input_: A path to a file, a file like object, or bytes
Returns:
str: The extracted XML | codesearchnet |
def get_realtime_urls(admin_view_func=(lambda x: x)):
from .widgets import REALTIME_WIDGETS
return [url(w.url_regex, admin_view_func(w.as_view()), name=w.url_name) for w in REALTIME_WIDGETS] | Get the URL for real-time widgets.
Args:
admin_view_func (callable): an admin_view method from an AdminSite
instance. By default: identity.
Returns:
list: the list of the real-time URLs as django's ``url()``. | codesearchnet |
def parse_exac_line(line, header):
exac_gene = {}
splitted_line = line.rstrip().split('\t')
exac_gene = dict(zip(header, splitted_line))
exac_gene['hgnc_symbol'] = exac_gene['gene']
exac_gene['pli_score'] = float(exac_gene['pLI'])
exac_gene['raw'] = line
return exac_gene | Parse an exac formated line
Args:
line(list): A list with exac gene info
header(list): A list with the header info
Returns:
exac_info(dict): A dictionary with the relevant info | juraj-google-style |
def sample_id(self, lon):
if (self.grid == 'WAC'):
sample = np.rint(((float(self.SAMPLE_PROJECTION_OFFSET) + 1.0) + ((((((lon * np.pi) / 180.0) - float(self.CENTER_LONGITUDE)) * self.A_AXIS_RADIUS) * np.cos(((self.CENTER_LATITUDE * np.pi) / 180.0))) / (self.MAP_SCALE * 0.001))))
else:
sample = (np.rint((float(self.SAMPLE_PROJECTION_OFFSET) + (float(self.MAP_RESOLUTION) * (lon - float(self.CENTER_LONGITUDE))))) + 1)
return self._control_sample(sample) | Return the corresponding sample
Args:
lon (int): longidute in degree
Returns:
Correponding sample | codesearchnet |
def ContainsNone(self, *values):
self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_NONE')
return self._query_builder | Sets the type of the WHERE clause as "contains none".
Args:
*values: The values to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | codesearchnet |
def add_log_file(path):
logfile_handler = RotatingFileHandler(path, maxBytes=50000, backupCount=2)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(module)s - %(message)s', datefmt='%d-%b-%Y %H:%M:%S')
logfile_handler.setFormatter(formatter)
geoparse_logger.addHandler(logfile_handler) | Add log file.
Args:
path (:obj:`str`): Path to the log file. | codesearchnet |
def run(self, args):
jlink = pylink.JLink()
if args.test:
if jlink.test():
print('Self-test succeeded.')
else:
print('Self-test failed.')
elif args.list is None or args.list in ['usb', 'ip']:
host = pylink.JLinkHost.USB_OR_IP
if args.list == 'usb':
host = pylink.JLinkHost.USB
elif args.list == 'ip':
host = pylink.JLinkHost.IP
emulators = jlink.connected_emulators(host)
for (index, emulator) in enumerate(emulators):
if index > 0:
print('')
print('Product Name: %s' % emulator.acProduct.decode())
print('Serial Number: %s' % emulator.SerialNumber)
usb = bool(emulator.Connection)
if not usb:
print('Nickname: %s' % emulator.acNickname.decode())
print('Firmware: %s' % emulator.acFWString.decode())
print('Connection: %s' % ('USB' if usb else 'IP'))
if not usb:
print('IP Address: %s' % emulator.aIPAddr)
elif args.supported is not None:
device = args.supported[0]
num_supported_devices = jlink.num_supported_devices()
for i in range(num_supported_devices):
found_device = jlink.supported_device(i)
if device.lower() == found_device.name.lower():
print('Device Name: %s' % device)
print('Core ID: %s' % found_device.CoreId)
print('Flash Address: %s' % found_device.FlashAddr)
print('Flash Size: %s bytes' % found_device.FlashSize)
print('RAM Address: %s' % found_device.RAMAddr)
print('RAM Size: %s bytes' % found_device.RAMSize)
print('Manufacturer: %s' % found_device.manufacturer)
break
else:
print('%s is not supported :(' % device)
return None | Runs the emulator command.
Args:
self (EmulatorCommand): the ``EmulatorCommand`` instance
args (Namespace): arguments to parse
Returns:
``None`` | juraj-google-style |
def replace_gradient_components(self, value, component_grads):
raise NotImplementedError(f'{type(self).__name__}.replace_gradient_components()') | Replaces the gradient components in `value` with `component_grads`.
Args:
value: A value with its gradient components compatible with
`component_grads`.
component_grads: A nested structure of `Tensor` or `IndexedSlices` or
`None` (for unconnected gradients).
Returns:
A copy of `value`, where the components that should be included in
gradients have been replaced by `component_grads`; or `None` (if
`component_grads` includes `None`). | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.