code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def parse_problem_name(name):
if name.endswith("_rev"):
base, was_reversed, was_copy = parse_problem_name(name[:-4])
if was_reversed:
raise ValueError(
"Invalid problem name %s: multiple '_rev' instances" % name)
return ProblemSpec(base, True, was_copy)
elif name.endswith("_copy"):
base, was_reversed, was_copy = parse_problem_name(name[:-5])
if was_copy:
raise ValueError(
"Invalid problem_name %s: multiple '_copy' instances" % name)
return ProblemSpec(base, was_reversed, True)
else:
return ProblemSpec(name, False, False) | Determines if problem_name specifies a copy and/or reversal.
Args:
name: str, problem name, possibly with suffixes.
Returns:
ProblemSpec: namedtuple with ["base_name", "was_reversed", "was_copy"]
Raises:
ValueError if name contains multiple suffixes of the same type
('_rev' or '_copy'). One of each is ok. | juraj-google-style |
def get_metadata(self, resource, keys):
self.metadata_service.set_auth(self._token_metadata)
return self.metadata_service.get(resource, keys) | Gets the values for given keys associated with the given resource.
Args:
resource (intern.resource.boss.BossResource)
keys (list)
Returns:
(dictionary)
Raises:
HTTPErrorList on failure. | codesearchnet |
def forward_feed(self, amount):
if amount <= 255 and amount >=0:
self.send(chr(27)+'J'+chr(amount))
else:
raise RuntimeError('Invalid foward feed, must be less than 255 and >= 0') | Calling this function finishes input of the current line, then moves the vertical
print position forward by x/300 inch.
Args:
amount: how far foward you want the position moved. Actual movement is calculated as
amount/300 inches.
Returns:
None
Raises:
RuntimeError: Invalid foward feed. | juraj-google-style |
async def rename(self, name):
await self._client.rename_conversation(
hangouts_pb2.RenameConversationRequest(
request_header=self._client.get_request_header(),
new_name=name,
event_request_header=self._get_event_request_header(),
)
) | Rename this conversation.
Hangouts only officially supports renaming group conversations, so
custom names for one-to-one conversations may or may not appear in all
first party clients.
Args:
name (str): New name.
Raises:
.NetworkError: If conversation cannot be renamed. | juraj-google-style |
def isHostCert(self, name):
crtpath = self._getPathJoin('hosts', '%s.crt' % name)
return os.path.isfile(crtpath) | Checks if a host certificate exists.
Args:
name (str): The name of the host keypair.
Examples:
Check if the host cert "myhost" exists:
exists = cdir.isUserCert('myhost')
Returns:
bool: True if the certificate is present, False otherwise. | juraj-google-style |
def __init__(self, channel):
self.GenerateAccessToken = channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/GenerateAccessToken",
request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateAccessTokenRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateAccessTokenResponse.FromString,
)
self.GenerateIdToken = channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/GenerateIdToken",
request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdTokenRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdTokenResponse.FromString,
)
self.SignBlob = channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/SignBlob",
request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignBlobRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignBlobResponse.FromString,
)
self.SignJwt = channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/SignJwt",
request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignJwtRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignJwtResponse.FromString,
)
self.GenerateIdentityBindingAccessToken = channel.unary_unary(
"/google.iam.credentials.v1.IAMCredentials/GenerateIdentityBindingAccessToken",
request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdentityBindingAccessTokenRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdentityBindingAccessTokenResponse.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def get_example_from_prop_spec(self, prop_spec, from_allof=False):
easy_keys = ['example', 'x-example', 'default']
for key in easy_keys:
if ((key in prop_spec.keys()) and self.use_example):
return prop_spec[key]
if ('enum' in prop_spec.keys()):
return prop_spec['enum'][0]
if ('$ref' in prop_spec.keys()):
return self._example_from_definition(prop_spec)
if ('allOf' in prop_spec.keys()):
return self._example_from_allof(prop_spec)
if ('type' not in prop_spec):
return self._example_from_complex_def(prop_spec)
if (prop_spec['type'] == 'object'):
(example, additional_properties) = self._get_example_from_properties(prop_spec)
if (additional_properties or from_allof):
return example
return [example]
if ((prop_spec['type'] == 'array') or (isinstance(prop_spec['type'], list) and (prop_spec['type'][0] == 'array'))):
return self._example_from_array_spec(prop_spec)
if (prop_spec['type'] == 'file'):
return (StringIO('my file contents'), 'hello world.txt')
if (('format' in prop_spec.keys()) and (prop_spec['format'] == 'date-time')):
return self._get_example_from_basic_type('datetime')[0]
if isinstance(prop_spec['type'], list):
return self._get_example_from_basic_type(prop_spec['type'][0])[0]
logging.info('falling back to basic type, no other match found')
return self._get_example_from_basic_type(prop_spec['type'])[0] | Return an example value from a property specification.
Args:
prop_spec: the specification of the property.
from_allof: whether these properties are part of an
allOf section
Returns:
An example value | codesearchnet |
def plot_term_kdes(self, words, **kwargs):
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show() | Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms. | juraj-google-style |
def complies_with_scope(queue_item, new_request, scope):
if (not URLHelper.is_parsable(queue_item.request.url)):
return False
if (not URLHelper.is_parsable(new_request.url)):
return False
if scope.request_methods:
if (not (queue_item.request.method in scope.request_methods)):
return False
if scope.protocol_must_match:
if (URLHelper.get_protocol(queue_item.request.url) != URLHelper.get_protocol(new_request.url)):
return False
if scope.subdomain_must_match:
current_subdomain = URLHelper.get_subdomain(queue_item.request.url)
new_subdomain = URLHelper.get_subdomain(new_request.url)
www_matches = False
if ((current_subdomain == 'www') and (new_subdomain == '')):
www_matches = True
if ((new_subdomain == 'www') and (current_subdomain == '')):
www_matches = True
if ((not www_matches) and (current_subdomain != new_subdomain)):
return False
if scope.hostname_must_match:
if (URLHelper.get_hostname(queue_item.request.url) != URLHelper.get_hostname(new_request.url)):
return False
if scope.tld_must_match:
if (URLHelper.get_tld(queue_item.request.url) != URLHelper.get_tld(new_request.url)):
return False
return True | Check if the new request complies with the crawling scope.
Args:
queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.
new_request (:class:`nyawc.http.Request`): The request to check.
scope (:class:`nyawc.Options.OptionsScope`): The scope to check.
Returns:
bool: True if it complies, False otherwise. | codesearchnet |
def coefficients(self):
vector = self.get_parameter_vector(include_frozen=True)
pars = self.get_all_coefficients(vector)
if (len(pars) != 6):
raise ValueError('there must be 6 coefficient blocks')
if any(((len(p.shape) != 1) for p in pars)):
raise ValueError('coefficient blocks must be 1D')
if (len(pars[0]) != len(pars[1])):
raise ValueError('coefficient blocks must have the same shape')
if any(((len(pars[2]) != len(p)) for p in pars[3:])):
raise ValueError('coefficient blocks must have the same shape')
return pars | All of the coefficient arrays
This property is the concatenation of the results from
:func:`terms.Term.get_real_coefficients` and
:func:`terms.Term.get_complex_coefficients` but it will always return
a tuple of length 6, even if ``alpha_complex_imag`` was omitted from
``get_complex_coefficients``.
Returns:
(array[j_real], array[j_real], array[j_complex], array[j_complex],
array[j_complex], array[j_complex]): ``alpha_real``, ``beta_real``,
``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real``, and ``beta_complex_imag`` as described
above.
Raises:
ValueError: For invalid dimensions for the coefficients. | codesearchnet |
def codemirror_script(self, inputid):
varname = '{}_codemirror'.format(inputid)
html = self.get_codemirror_field_js()
opts = self.codemirror_config()
return html.format(varname=varname, inputid=inputid, settings=json.dumps(opts, sort_keys=True)) | Build CodeMirror HTML script tag which contains CodeMirror init.
Arguments:
inputid (string): Input id.
Returns:
string: HTML for field CodeMirror instance. | codesearchnet |
def propagate(self, date):
if (self.propagator.orbit is not self):
self.propagator.orbit = self
return self.propagator.propagate(date) | Propagate the orbit to a new date
Args:
date (Date)
Return:
Orbit | codesearchnet |
def counts(self, *args, **kwargs):
n = Counts.read_cellframe(self, prune_neighbors=False)
if ('measured_regions' in kwargs):
n.measured_regions = kwargs['measured_regions']
else:
n.measured_regions = self.get_measured_regions()
if ('measured_phenotypes' in kwargs):
n.measured_phenotypes = kwargs['measured_phenotypes']
else:
n.measured_phenotypes = self.phenotypes
n.microns_per_pixel = self.microns_per_pixel
if ('minimum_region_size_pixels' in kwargs):
n.minimum_region_size_pixels = kwargs['minimum_region_size_pixels']
else:
n.minimum_region_size_pixels = 1
return n | Return a class that can be used to access count densities
Args:
measured_regions (pandas.DataFrame): Dataframe of regions that are being measured (defaults to all the regions)
measured_phenotypes (list): List of phenotypes present (defaults to all the phenotypes)
minimum_region_size_pixels (int): Minimum region size to calculate counts on in pixels (Default: 1)
Returns:
Counts: returns a class that holds the counts. | codesearchnet |
def _get_reaction(self, x):
mix_comp = self.comp1 * x + self.comp2 * (1-x)
decomp = self.pd.get_decomposition(mix_comp)
if np.isclose(x, 0):
reactant = [self.c2_original]
elif np.isclose(x, 1):
reactant = [self.c1_original]
else:
reactant = list(set([self.c1_original, self.c2_original]))
if self.grand:
reactant += [Composition(e.symbol)
for e, v in self.pd.chempots.items()]
product = [Composition(k.name) for k, v in decomp.items()]
reaction = Reaction(reactant, product)
x_original = self._get_original_composition_ratio(reaction)
if np.isclose(x_original, 1):
reaction.normalize_to(self.c1_original, x_original)
else:
reaction.normalize_to(self.c2_original, 1-x_original)
return reaction | Generates balanced reaction at mixing ratio x : (1-x) for
self.comp1 : self.comp2.
Args:
x (float): Mixing ratio x of reactants, a float between 0 and 1.
Returns:
Reaction object. | juraj-google-style |
def load_vasp_summary(filename):
with open(filename, 'r') as stream:
docs = yaml.load_all(stream, Loader=yaml.SafeLoader)
data = {d['title']: d for d in docs}
return data | Reads a `vasp_summary.yaml` format YAML file and returns
a dictionary of dictionaries. Each YAML document in the file
corresponds to one sub-dictionary, with the corresponding
top-level key given by the `title` value.
Example:
The file:
---
title: foo
data: foo_data
---
title: bar
data: bar_data
is converted to the dictionary
{ 'foo': { 'title': 'foo', 'data': 'foo_data' },
'bar': { 'title': 'bar', 'data': 'bar_data' } }
Args:
filename (str): File path for the `vasp_summary.yaml` file.
Returns:
dict(dict,dict,...): A dictionary of separate YAML documents,
each as dictionaries.a | codesearchnet |
def start(logdir):
if logdir.startswith('gs:
datalab.storage._api.Api.verify_permitted_to_read(logdir)
port = datalab.utils.pick_unused_port()
args = ['tensorboard', '--logdir=' + logdir, '--port=' + str(port)]
p = subprocess.Popen(args)
retry = 10
while (retry > 0):
if datalab.utils.is_http_running_on(port):
basepath = os.environ.get('DATALAB_ENDPOINT_URL', '')
url = '%s/_proxy/%d/' % (basepath.rstrip('/'), port)
html = '<p>TensorBoard was started successfully with pid %d. ' % p.pid
html += 'Click <a href="%s" target="_blank">here</a> to access it.</p>' % url
IPython.display.display_html(html, raw=True)
return p.pid
time.sleep(1)
retry -= 1
raise Exception('Cannot start TensorBoard.') | Start a TensorBoard instance.
Args:
logdir: the logdir to run TensorBoard on.
Raises:
Exception if the instance cannot be started. | juraj-google-style |
def _parse_metadata(self, message):
metadata = Metadata(source=self.actor_urn).__dict__
if ('author' in message['d']):
metadata['source_user'] = message['d']['author']['username']
else:
metadata['source_user'] = None
if ('channel_id' in message['d']):
metadata['source_channel'] = message['d']['channel_id']
else:
metadata['source_channel'] = None
metadata['user_id'] = metadata['source_user']
metadata['display_name'] = metadata['source_user']
metadata['source_connector'] = 'discord'
return metadata | Sets metadata in Legobot message
Args:
message (dict): Full message from Discord websocket connection"
Returns:
Legobot.Metadata | codesearchnet |
def to_dict(self):
out = {}
out['reason'] = self.msg
out['type'] = self.__class__.__name__
out['params'] = self.params
return out | Convert this exception to a dictionary.
Returns:
dist: A dictionary of information about this exception,
Has a 'reason' key, a 'type' key and a dictionary of params | codesearchnet |
def inspect_virtual(self, stream_id):
stream = DataStream.FromEncoded(stream_id)
if stream.buffered:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]
try:
reading = self.storage.inspect_last(stream, only_allocated=True)
return [Error.NO_ERROR, reading.value]
except StreamEmptyError:
return [Error.NO_ERROR, 0]
except UnresolvedIdentifierError:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0] | Inspect the last value written into a virtual stream.
Args:
stream_id (int): The virtual stream was want to inspect.
Returns:
(int, int): An error code and the stream value. | codesearchnet |
def find_node_by_value(self, value):
try:
return next((n for n in self.node_list if (n.value == value)))
except StopIteration:
return None | Find and return a node in self.node_list with the value ``value``.
If multiple nodes exist with the value ``value``,
return the first one found.
If no such node exists, this returns ``None``.
Args:
value (Any): The value of the node to find
Returns:
Node: A node with value ``value`` if it was found
None: If no node exists with value ``value``
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> found_node = graph.find_node_by_value('One')
>>> found_node == node_1
True | codesearchnet |
def update_torch_dtype(self, torch_dtype: 'torch.dtype') -> 'torch.dtype':
return torch_dtype | Some quantization methods require to explicitly set the dtype of the model to a
target dtype. You need to override this method in case you want to make sure that behavior is
preserved
Args:
torch_dtype (`torch.dtype`):
The input dtype that is passed in `from_pretrained` | github-repos |
class BaseModelOutputWithNoAttention(ModelOutput):
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None | Base class for model's outputs, with potential hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. | github-repos |
def get_pipeline_path(pipeline_name, working_directory):
logger.debug('starting')
logger.debug(f'current directory is {working_directory}')
pipeline_path = os.path.abspath(os.path.join(working_directory, 'pipelines', (pipeline_name + '.yaml')))
if os.path.isfile(pipeline_path):
logger.debug(f'Found {pipeline_path}')
else:
logger.debug(f'{pipeline_name} not found in current directory/pipelines folder. Looking in pypyr install directory instead.')
pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.debug(f'pypyr installation directory is: {pypyr_dir}')
pipeline_path = os.path.abspath(os.path.join(pypyr_dir, 'pipelines', (pipeline_name + '.yaml')))
if os.path.isfile(pipeline_path):
logger.debug(f'Found {pipeline_path}')
else:
raise PipelineNotFoundError(f'{pipeline_name}.yaml not found in either {working_directory}/pipelines or {pypyr_dir}/pipelines')
logger.debug('done')
return pipeline_path | Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines. | codesearchnet |
def save(self, recipe):
if (('id' in recipe) and (recipe['id'] is not None)):
self.logger.debug(('Updating existing recipe: ' + json.dumps(recipe)))
url = ('%(base_url)s/recipe/json/%(recipe_id)s' % {'base_url': self.base_url, 'recipe_id': recipe['id']})
r = self.gbdx_connection.put(url, json=recipe)
try:
r.raise_for_status()
except:
print(r.text)
raise
return recipe['id']
else:
self.logger.debug(('Creating new recipe: ' + json.dumps(recipe)))
url = ('%(base_url)s/recipe/json' % {'base_url': self.base_url})
r = self.gbdx_connection.post(url, json=recipe)
try:
r.raise_for_status()
except:
print(r.text)
raise
recipe_json = r.json()
return recipe_json['id'] | Saves an AnswerFactory Recipe
Args:
recipe (dict): Dictionary specifying a recipe
Returns:
AnswerFactory Recipe id | codesearchnet |
def market_open(self, session, mins) -> Session:
if (session not in self.exch):
return SessNA
start_time = self.exch[session][0]
return Session(start_time, shift_time(start_time, int(mins))) | Time intervals for market open
Args:
session: [allday, day, am, pm, night]
mins: mintues after open
Returns:
Session of start_time and end_time | codesearchnet |
def dummyctrl(self,r,ctrl):
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv | creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex. | juraj-google-style |
def split(self, desired_bundle_size: int, start_position: Union[int, str, bytes, ObjectId]=None, stop_position: Union[int, str, bytes, ObjectId]=None):
desired_bundle_size_in_mb = desired_bundle_size
desired_bundle_size_in_mb = max(desired_bundle_size_in_mb, 1)
is_initial_split = start_position is None and stop_position is None
start_position, stop_position = self._replace_none_positions(start_position, stop_position)
if self.bucket_auto:
split_keys = []
weights = []
for bucket in self._get_auto_buckets(desired_bundle_size_in_mb, start_position, stop_position, is_initial_split):
split_keys.append({'_id': bucket['_id']['max']})
weights.append(bucket['count'])
else:
split_keys = self._get_split_keys(desired_bundle_size_in_mb, start_position, stop_position)
weights = itertools.cycle((desired_bundle_size_in_mb,))
bundle_start = start_position
for split_key_id, weight in zip(split_keys, weights):
if bundle_start >= stop_position:
break
bundle_end = min(stop_position, split_key_id['_id'])
yield iobase.SourceBundle(weight=weight, source=self, start_position=bundle_start, stop_position=bundle_end)
bundle_start = bundle_end
if bundle_start < stop_position:
weight = 1 if self.bucket_auto else desired_bundle_size_in_mb
yield iobase.SourceBundle(weight=weight, source=self, start_position=bundle_start, stop_position=stop_position) | Splits the source into a set of bundles.
Bundles should be approximately of size ``desired_bundle_size`` bytes.
Args:
desired_bundle_size: the desired size (in bytes) of the bundles returned.
start_position: if specified the given position must be used as the
starting position of the first bundle.
stop_position: if specified the given position must be used as the ending
position of the last bundle.
Returns:
an iterator of objects of type 'SourceBundle' that gives information about
the generated bundles. | github-repos |
def get(self, url):
self._driver.get(url)
if self.bot_diary:
self.bot_diary.add_auto_entry('I went on', target=url, take_screenshot=True)
if BROME_CONFIG['proxy_driver']['intercept_javascript_error']:
self.init_javascript_error_interception()
return True | Navigate to a specific url
This specific implementation inject a javascript
script to intercept the javascript error
Configurable with the "proxy_driver:intercept_javascript_error" config
Args:
url (str): the url to navigate to
Returns:
bool | codesearchnet |
def describe_field(field_definition):
field_descriptor = FieldDescriptor()
field_descriptor.name = field_definition.name
field_descriptor.number = field_definition.number
field_descriptor.variant = field_definition.variant
if isinstance(field_definition, messages.EnumField):
field_descriptor.type_name = field_definition.type.definition_name()
if isinstance(field_definition, messages.MessageField):
field_descriptor.type_name = (
field_definition.message_type.definition_name())
if field_definition.default is not None:
field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[
type(field_definition)](field_definition.default)
if field_definition.repeated:
field_descriptor.label = FieldDescriptor.Label.REPEATED
elif field_definition.required:
field_descriptor.label = FieldDescriptor.Label.REQUIRED
else:
field_descriptor.label = FieldDescriptor.Label.OPTIONAL
return field_descriptor | Build descriptor for Field instance.
Args:
field_definition: Field instance to provide descriptor for.
Returns:
Initialized FieldDescriptor instance describing the Field instance. | juraj-google-style |
def _IsWindowsDrivePathSegment(cls, path_segment):
if ((len(path_segment) == 2) and (path_segment[1] == ':') and path_segment[0].isalpha()):
return True
path_segment = path_segment.upper()
return (path_segment in ('%%ENVIRON_SYSTEMDRIVE%%', '%SYSTEMDRIVE%')) | Determines if the path segment contains a Windows Drive indicator.
A drive indicator can be a drive letter or %SystemDrive%.
Args:
path_segment (str): path segment.
Returns:
bool: True if the path segment contains a Windows Drive indicator. | codesearchnet |
def _CreateOutputModule(self, options):
formatter_mediator = formatters_mediator.FormatterMediator(data_location=self._data_location)
try:
formatter_mediator.SetPreferredLanguageIdentifier(self._preferred_language)
except (KeyError, TypeError) as exception:
raise RuntimeError(exception)
mediator = output_mediator.OutputMediator(self._knowledge_base, formatter_mediator, preferred_encoding=self.preferred_encoding)
mediator.SetTimezone(self._preferred_time_zone)
try:
output_module = output_manager.OutputManager.NewOutputModule(self._output_format, mediator)
except (KeyError, ValueError) as exception:
raise RuntimeError('Unable to create output module with error: {0!s}'.format(exception))
if output_manager.OutputManager.IsLinearOutputModule(self._output_format):
output_file_object = open(self._output_filename, 'wb')
output_writer = tools.FileObjectOutputWriter(output_file_object)
output_module.SetOutputWriter(output_writer)
helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)
missing_parameters = output_module.GetMissingArguments()
while missing_parameters:
for parameter in missing_parameters:
value = self._PromptUserForInput('Missing parameter {0:s} for output module'.format(parameter))
if (value is None):
logger.warning('Unable to set the missing parameter for: {0:s}'.format(parameter))
continue
setattr(options, parameter, value)
helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)
missing_parameters = output_module.GetMissingArguments()
return output_module | Creates the output module.
Args:
options (argparse.Namespace): command line arguments.
Returns:
OutputModule: output module.
Raises:
RuntimeError: if the output module cannot be created. | codesearchnet |
def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True):
parts = read_tuple_name.split("__")
parts[0] = self._fill_right(parts[0], "-", self.prefix_width)
if read_tuple_id is not None:
parts[1] = "{:x}".format(read_tuple_id)
parts[1] = self._fill_left(parts[1], "0", self.read_tuple_id_width)
if synchronize_widths:
new_segments = []
segments = parts[2][1:-1].split("),(")
for segment in segments:
values = segment.split(",")
values[0] = values[0].zfill(self.genome_id_width)
values[1] = values[1].zfill(self.chr_id_width)
values[3] = values[3].zfill(self.coor_width)
values[4] = values[4].zfill(self.coor_width)
new_segments.append("(" + ",".join(values) + ")")
parts[2] = ",".join(new_segments)
return "__".join(parts) | Apply profile on a read tuple name and update read tuple ID.
Args:
read_tuple_name (str): Read tuple name to be updated.
read_tuple_id (id): New read tuple ID.
synchronize_widths (bool): Update widths (in accordance to this profile). | juraj-google-style |
def _make_sent_vector(self, sent: List, bucket_length: int =None) -> np.ndarray:
bucket_length = bucket_length or len(sent)
answer = np.zeros(shape=(bucket_length, MAX_WORD_LENGTH+2), dtype=np.int32)
for i, word in enumerate(sent):
answer[i, 0] = self.tags.tok2idx("BEGIN")
m = min(len(word), MAX_WORD_LENGTH)
for j, x in enumerate(word[-m:]):
answer[i, j+1] = self.symbols.tok2idx(x)
answer[i, m+1] = self.tags.tok2idx("END")
answer[i, m+2:] = self.tags.tok2idx("PAD")
return answer | Transforms a sentence to Numpy array, which will be the network input.
Args:
sent: input sentence
bucket_length: the width of the bucket
Returns:
A 3d array, answer[i][j][k] contains the index of k-th letter
in j-th word of i-th input sentence. | juraj-google-style |
def nCr(n, r):
f = math.factorial
return int(((f(n) / f(r)) / f((n - r)))) | Calculates nCr.
Args:
n (int): total number of items.
r (int): items to choose
Returns:
nCr. | codesearchnet |
def get_site_energy(self, site_index):
if self._charged:
warn('Per atom energies for charged structures not supported in EwaldSummation')
return np.sum(self._recip[:,site_index]) + np.sum(self._real[:,site_index]) \
+ self._point[site_index] | Compute the energy for a single site in the structure
Args:
site_index (int): Index of site
ReturnS:
(float) - Energy of that site | juraj-google-style |
def get_airports(self, country):
url = AIRPORT_BASE.format(country.replace(' ', '-'))
return self._fr24.get_airports_data(url) | Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import FlightData
f=FlightData()
f.get_airports('India') | codesearchnet |
def alias_inplace_add(x, i, v):
return _inplace_helper(x, i, v, gen_array_ops.inplace_add) | Applies an inplace add on input x at index i with value v. Aliases x.
If i is None, x and v must be the same shape. Computes
x += v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] += v;
Otherwise, x and v must have the same rank. Computes
x[i, :] += v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns x. | github-repos |
def log_variables(variables=None):
if variables is None:
variables = tf.global_variables() + tf.local_variables()
for row in format_variables(variables, join_lines=False):
tf.logging.info(row) | Logs variable information.
This function logs the name, shape, type, collections, and device for either
all variables or a given iterable of variables. In the "Device" columns,
the nature of the variable (legacy or resource (for ResourceVariables)) is
also specified in parenthesis.
Args:
variables: iterable of variables; if not provided, then all variables
(in the default graph) are logged. | juraj-google-style |
def to_tensorflow_dataset(evset: EventSet, timestamps: str='timestamp') -> 'tensorflow.data.Dataset':
tf = import_tf()
if len(evset.schema.indexes) != 0:
evset = drop_index(evset)
data = evset.get_arbitrary_index_data()
dict_data = {timestamps: data.timestamps}
for feature_idx, feature in enumerate(evset.schema.features):
dict_data[feature.name] = data.features[feature_idx]
return tf.data.Dataset.from_tensor_slices(dict_data) | Converts an [`EventSet`][temporian.EventSet] to a tensorflow Dataset.
Usage example:
```python
evset = event_set(
timestamps=[1, 2, 3, 4],
features={
"f1": [10, 11, 12, 13],
"f2": [b"a", b"b", b"c", b"d"],
"label": [0, 1, 0, 1],
},
)
tf_dataset = tp.to_tensorflow_dataset(evset)
def extract_label(example):
label = example.pop("label")
return example, label
tf_dataset = tf_dataset.map(extract_label).batch(100)
model = ... # A Keras model
model.fit(tf_dataset)
```
Args:
evset: Input event set.
timestamps: Output key containing the timestamps.
Returns:
TensorFlow dataset created from EventSet. | github-repos |
def min_sequence_length(self, dataset_split):
return {
problem.DatasetSplit.TRAIN: 8,
problem.DatasetSplit.EVAL: 65,
problem.DatasetSplit.TEST: 65
}[dataset_split] | Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split. | juraj-google-style |
def __init__(self, data_location=None):
super(FormatterMediator, self).__init__()
self._data_location = data_location
self._language_identifier = self.DEFAULT_LANGUAGE_IDENTIFIER
self._lcid = self.DEFAULT_LCID
self._winevt_database_reader = None | Initializes a formatter mediator object.
Args:
data_location (str): path of the formatter data files. | juraj-google-style |
def detail_poi(self, **kwargs):
params = {
'language': util.language_code(kwargs.get('lang')),
'family': kwargs.get('family')
}
if kwargs.get('id'):
params['id'] = kwargs['id']
result = self.make_request('detail_poi', {}, **params)
if not util.check_result(result):
return False, result.get('message', 'UNKNOWN ERROR')
values = util.response_list(result, 'Data')
return True, [emtype.PoiDetails(**a) for a in values] | Obtain detailed info of a given POI.
Args:
family (str): Family code of the POI (3 chars).
lang (str): Language code (*es* or *en*).
id (int): Optional, ID of the POI to query. Passing value -1 will
result in information from all POIs.
Returns:
Status boolean and parsed response (list[PoiDetails]), or
message string in case of error. | juraj-google-style |
def serialize(self):
segment = hangouts_pb2.Segment(type=self.type_, text=self.text, formatting=hangouts_pb2.Formatting(bold=self.is_bold, italic=self.is_italic, strikethrough=self.is_strikethrough, underline=self.is_underline))
if (self.link_target is not None):
segment.link_data.link_target = self.link_target
return segment | Serialize this segment to a ``Segment`` message.
Returns:
``Segment`` message. | codesearchnet |
def unpack_grad_tuple(gv, gpt):
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope('unpack'):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for (idx, s) in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))
return unpacked_gv | Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction. | codesearchnet |
def days(value: Union[int, float]) -> Duration:
return float(value * 60 * 60 * 24) | Converts input value from number of days to a `Duration` in seconds.
Example:
```python
>>> a = tp.event_set(
... # Dates are converted to unix timestamps
... timestamps=["2020-01-01", "2020-01-02", "2020-01-31"],
... features={"f1": [1, 5, -5]}
... )
>>> a.moving_sum(window_length=tp.duration.days(2))
indexes: ...
timestamps: ['2020-01-01T00:00:00' '2020-01-02T00:00:00'
'2020-01-31T00:00:00']
'f1': [ 1 6 -5]
...
```
Args:
value: number of days.
Returns:
Equivalent number of seconds. | github-repos |
def addSearchers(self, *searchers):
self._searchers.extend(searchers)
((debug.logger & debug.flagCompiler) and debug.logger(('current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))))
return self | Add more transformed MIBs repositories.
MibCompiler.compile will invoke each of configured searcher objects
in order of their addition asking each if already transformed MIB
module already exists and is more recent than specified.
Args:
searchers: searcher object(s)
Returns:
reference to itself (can be used for call chaining) | codesearchnet |
def create_module_graph(module_spec):
height, width = hub.get_expected_image_size(module_spec)
with tf.Graph().as_default() as graph:
resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
m = hub.Module(module_spec)
bottleneck_tensor = m(resized_input_tensor)
wants_quantization = any(node.op in FAKE_QUANT_OPS
for node in graph.as_graph_def().node)
return graph, bottleneck_tensor, resized_input_tensor, wants_quantization | Creates a graph and loads Hub Module into it.
Args:
module_spec: the hub.ModuleSpec for the image module being used.
Returns:
graph: the tf.Graph that was created.
bottleneck_tensor: the bottleneck values output by the module.
resized_input_tensor: the input images, resized as expected by the module.
wants_quantization: a boolean, whether the module has been instrumented
with fake quantization ops. | juraj-google-style |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
installation_value = None
string_values = {}
for registry_value in registry_key.GetValues():
if not registry_value.name:
continue
if (registry_value.name == 'InstallDate' and
registry_value.DataIsInteger()):
installation_value = registry_value
continue
if not registry_value.data or not registry_value.DataIsString():
continue
string_value_name = self._STRING_VALUE_NAME_STRINGS.get(
registry_value.name, None)
if not string_value_name:
continue
string_values[string_value_name] = registry_value.GetDataAsObject()
values_dict = {}
values_dict['Owner'] = string_values.get('owner', '')
values_dict['Product name'] = string_values.get('product_name', '')
values_dict['Service pack'] = string_values.get('service_pack', '')
values_dict['Windows Version Information'] = string_values.get(
'version', '')
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if installation_value:
event_data = windows_events.WindowsRegistryInstallationEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.owner = string_values.get('owner', None)
event_data.product_name = string_values.get('product_name', None)
event_data.service_pack = string_values.get('service_pack', None)
event_data.version = string_values.get('version', None)
installation_time = installation_value.GetDataAsObject()
date_time = dfdatetime_posix_time.PosixTime(timestamp=installation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_INSTALLATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key. | juraj-google-style |
def packtext(text, width=80):
r
import utool as ut
import textwrap
new_text = '\n'.join(textwrap.wrap(text, width))
new_text = ut.remove_doublspaces(new_text).strip()
return new_text | r"""
Args:
text (str):
CommandLine:
python -m utool.util_str --exec-pack_paragraph --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> import utool as ut
>>> width = 80
>>> text = lorium_ipsum()
>>> result = packtext(text)
>>> print(result) | juraj-google-style |
def merge_and_fit(self, track, pairings):
for (self_seg_index, track_seg_index, _) in pairings:
self_s = self.segments[self_seg_index]
ss_start = self_s.points[0]
track_s = track.segments[track_seg_index]
tt_start = track_s.points[0]
tt_end = track_s.points[-1]
d_start = ss_start.distance(tt_start)
d_end = ss_start.distance(tt_end)
if d_start > d_end:
track_s = track_s.copy()
track_s.points = list(reversed(track_s.points))
self_s.merge_and_fit(track_s)
return self | Merges another track with this one, ordering the points based on a
distance heuristic
Args:
track (:obj:`Track`): Track to merge with
pairings
Returns:
:obj:`Segment`: self | juraj-google-style |
def get_metar(
metar: typing.Union[str, 'CustomMetar']
) -> typing.Tuple[typing.Union[str, None], typing.Union['CustomMetar', None]]:
error: typing.Optional[str] = None
if isinstance(metar, CustomMetar):
return None, metar
if isinstance(metar, str):
LOGGER.debug('building CustomMetar from: %s', metar)
if len(metar) == 4:
LOGGER.debug('retrieving METAR from ICAO')
metar = AWC.query_icao(metar).raw_metar
else:
error = f'expected a string or or a CustomMetar object, got: {type(metar)}'
if error:
return error, None
try:
return None, CustomMetar(metar_code=metar)
except ParserError:
return f'Unable to parse METAR: {metar}', None | Builds a CustomMetar object from a CustomMetar object (returns it), an ICAO code or a METAR string
Args:
metar: CustomMetar object, ICAO string or METAR string
Returns: CustomMetar object | juraj-google-style |
def _anonymize_table(cls, table_data, pii_fields):
for pii_field in pii_fields:
field_name = pii_field['name']
transformer = cls.get_class(TRANSFORMERS['categorical'])(pii_field)
table_data[field_name] = transformer.anonymize_column(table_data)
return table_data | Anonymize in `table_data` the fields in `pii_fields`.
Args:
table_data (pandas.DataFrame): Original dataframe/table.
pii_fields (list[dict]): Metadata for the fields to transform.
Result:
pandas.DataFrame: Anonymized table. | codesearchnet |
def _embedding_lookup_for_ragged_tensor(self, inp: ragged_tensor.RaggedTensor, weight: Optional[ragged_tensor.RaggedTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:
if inp.shape.rank != 2:
raise ValueError('Only rank 2 ragged tensor is supported, but got rank {}'.format(inp.shape.rank))
batch_size = inp.shape[0]
def ragged_to_dense_outside_compilation(inp, weight, batch_size, feature):
if weight is None:
weight = ragged_tensor.RaggedTensor.from_row_splits(array_ops.ones_like(inp.values, dtype=dtypes.float32), inp.row_splits)
if not feature.output_shape and feature.max_sequence_length > 0:
inp = inp.to_tensor(shape=(batch_size, feature.max_sequence_length))
weight = array_ops.ones_like(inp, dtype=dtypes.float32)
elif feature.output_shape:
with ops.init_scope():
output_batch_size = math_ops.reduce_prod(feature.output_shape).numpy()
if output_batch_size == batch_size:
inp, weight = (inp.to_tensor(), weight.to_tensor())
elif output_batch_size > batch_size and output_batch_size % batch_size == 0:
seq_length = output_batch_size
inp = inp.to_tensor(shape=(batch_size, seq_length))
weight = array_ops.ones_like(inp, dtype=dtypes.float32)
else:
raise ValueError('Output shape set in the FeatureConfig should be the factor of the input data batch size. But instead got output shape {}, input data batch size {}'.format(feature.output_shape, batch_size))
else:
inp, weight = (inp.to_tensor(), weight.to_tensor())
return (inp, weight)
inp, weight = tpu_replication.outside_compilation(ragged_to_dense_outside_compilation, inp=inp, weight=weight, batch_size=batch_size, feature=feature)
embeddings = embedding_ops.embedding_lookup_v2(table, inp)
weight = array_ops.expand_dims(weight, -1)
embeddings *= weight
if feature.output_shape:
with ops.init_scope():
output_batch_size = math_ops.reduce_prod(feature.output_shape).numpy()
if output_batch_size == batch_size:
embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)
embeddings = array_ops.reshape(embeddings, shape=feature.output_shape + [feature.table.dim])
elif feature.max_sequence_length == 0:
embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)
return embeddings | Embedding lookup for ragged tensor based on its feature config.
Args:
inp: a single rank 2 RaggedTensor input.
weight: None or RaggedTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result.
Raises:
ValueError: if input ragged tensor is not rank 2 or output shape set in
the feature config doesn't match with the first dim size of the input. | github-repos |
def settings(package, reload_=False):
global packages
if package not in packages or reload_:
from os import path
result = CaseConfigParser()
if package != "acorn":
confpath = _package_path(package)
_read_single(result, confpath)
_read_single(result, _package_path("acorn"))
packages[package] = result
return packages[package] | Returns the config settings for the specified package.
Args:
package (str): name of the python package to get settings for. | juraj-google-style |
def ReadSerializedDict(cls, json_dict):
if json_dict:
json_object = cls._ConvertDictToObject(json_dict)
if (not isinstance(json_object, containers_interface.AttributeContainer)):
raise TypeError('{0:s} is not an attribute container type.'.format(type(json_object)))
return json_object
return None | Reads an attribute container from serialized dictionary form.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
AttributeContainer: attribute container or None.
Raises:
TypeError: if the serialized dictionary does not contain an
AttributeContainer. | codesearchnet |
def _to_live_trigger_log(self, **kwargs):
field_names = (field.name for field in TriggerLogAbstract._meta.get_fields())
attributes = {name: getattr(self, name) for name in field_names}
del attributes['id']
attributes.update(kwargs)
return TriggerLog(**attributes) | Make a new, non-archived :class:`.TriggerLog` instance with duplicate data.
Args:
**kwargs: Set as attributes of the new instance, overriding what would otherwise be
copied from ``self``.
Returns:
The new (unpersisted) :class:`TriggerLog` instance. | juraj-google-style |
def __init__(self, step_name, transform_id=None):
self.step_name = step_name
self.transform_id = transform_id | Creates a new step NameContext.
Args:
step_name: The name of the step. | github-repos |
def datasets_list(self, project_id=None, max_results=0, page_token=None):
if (project_id is None):
project_id = self._project_id
url = (Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, '')))
args = {}
if (max_results != 0):
args['maxResults'] = max_results
if (page_token is not None):
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | codesearchnet |
def _get_descending_key(gettime=time.time):
now_descending = int((_FUTURE_TIME - gettime()) * 100)
request_id_hash = os.environ.get("REQUEST_ID_HASH")
if not request_id_hash:
request_id_hash = str(random.getrandbits(32))
return "%d%s" % (now_descending, request_id_hash) | Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
Returns:
A string with a time descending key. | juraj-google-style |
def editline_with_regex(self, regex_tgtline, to_replace):
for idx, line in enumerate(self._swp_lines):
mobj = re.match(regex_tgtline, line)
if mobj:
self._swp_lines[idx] = to_replace
return | find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace | juraj-google-style |
def emit_pid(self, name: str, pid: int) -> None:
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event) | Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer. | github-repos |
def convert_reshape(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting reshape ...')
if (names == 'short'):
tf_name = ('RESH' + random_string(4))
elif (names == 'keep'):
tf_name = w_name
else:
tf_name = (w_name + str(random.random()))
if (len(inputs) > 1):
if (layers[inputs[1]][0] == (- 1)):
print('Cannot deduct batch size! It will be omitted, but result may be wrong.')
reshape = keras.layers.Reshape(layers[(inputs[1] + '_np')], name=tf_name)
layers[scope_name] = reshape(layers[inputs[0]])
elif (inputs[0] in layers):
reshape = keras.layers.Reshape(params['shape'][1:], name=tf_name)
layers[scope_name] = reshape(layers[inputs[0]])
else:
print('Skip weight matrix transpose, but result may be wrong.') | Convert reshape layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | codesearchnet |
def visit(self, visitor, visitor_arg):
visitor(self, visitor_arg)
for t in self._inner_types():
if isinstance(t, TypeConstraint):
t.visit(visitor, visitor_arg)
else:
visitor(t, visitor_arg) | Visitor method to visit all inner types of a composite type.
Args:
visitor: A callable invoked for all nodes in the type tree comprising
a composite type. The visitor will be called with the node visited
and the visitor argument specified here.
visitor_arg: Visitor callback second argument. | github-repos |
def _align_monomer(self, monomer, mon_vector, move_direction):
axis = np.cross(mon_vector, move_direction)
origin = monomer[self.start].coords
angle = get_angle(mon_vector, move_direction)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
monomer.apply_operation(op) | rotate the monomer so that it is aligned along the move direction
Args:
monomer (Molecule)
mon_vector (numpy.array): molecule vector that starts from the
start atom index to the end atom index
move_direction (numpy.array): the direction of the polymer chain
extension | codesearchnet |
def copy_submission_to_destination(self, src_filename, dst_subdir, submission_id):
extension = [e for e in ALLOWED_EXTENSIONS if src_filename.endswith(e)]
if (len(extension) != 1):
logging.error('Invalid submission extension: %s', src_filename)
return
dst_filename = os.path.join(self.target_dir, dst_subdir, (submission_id + extension[0]))
cmd = ['gsutil', 'cp', src_filename, dst_filename]
if (subprocess.call(cmd) != 0):
logging.error("Can't copy submission to destination")
else:
logging.info('Submission copied to: %s', dst_filename) | Copies submission to target directory.
Args:
src_filename: source filename of the submission
dst_subdir: subdirectory of the target directory where submission should
be copied to
submission_id: ID of the submission, will be used as a new
submission filename (before extension) | codesearchnet |
def make_ndarray(tensor):
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape, dtype=np.int64)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return np.frombuffer(tensor.tensor_content, dtype=dtype).copy().reshape(shape)
elif tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:
if len(tensor.half_val) == 1:
tmp = np.array(tensor.half_val[0], dtype=np.uint16)
tmp.dtype = tensor_dtype.as_numpy_dtype
return np.repeat(tmp, num_elements).reshape(shape)
else:
tmp = np.fromiter(tensor.half_val, dtype=np.uint16)
tmp.dtype = tensor_dtype.as_numpy_dtype
return tmp.reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(
np.array(tensor.float_val[0], dtype=dtype), num_elements
).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(
np.array(tensor.double_val[0], dtype=dtype), num_elements
).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [
dtypes.int32,
dtypes.uint8,
dtypes.uint16,
dtypes.int16,
dtypes.int8,
dtypes.qint32,
dtypes.quint8,
dtypes.qint8,
dtypes.qint16,
dtypes.quint16,
]:
if len(tensor.int_val) == 1:
return np.repeat(
np.array(tensor.int_val[0], dtype=dtype), num_elements
).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(
np.array(tensor.int64_val[0], dtype=dtype), num_elements
).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(
np.array(tensor.string_val[0], dtype=dtype), num_elements
).reshape(shape)
else:
return np.array([x for x in tensor.string_val], dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(
np.array(
complex(tensor.scomplex_val[0], tensor.scomplex_val[1]), dtype=dtype
),
num_elements,
).reshape(shape)
else:
return np.array(
[complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype
).reshape(shape)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
if len(tensor.dcomplex_val) == 2:
return np.repeat(
np.array(
complex(tensor.dcomplex_val[0], tensor.dcomplex_val[1]), dtype=dtype
),
num_elements,
).reshape(shape)
else:
return np.array(
[complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype
).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(
np.array(tensor.bool_val[0], dtype=dtype), num_elements
).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype) | Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type. | juraj-google-style |
def _get_next_task_from_raylet(self):
with profiling.profile('worker_idle'):
task = self.raylet_client.get_task()
ray.utils.set_cuda_visible_devices(ray.get_gpu_ids())
return task | Get the next task from the raylet.
Returns:
A task from the raylet. | codesearchnet |
def build_case(case, vcf_individuals=None, case_id=None, vcf_path=None, sv_individuals=None, vcf_sv_path=None, nr_variants=None, nr_sv_variants=None, profiles=None, matches=None, profile_path=None):
individual_positions = get_individual_positions(vcf_individuals)
sv_individual_positions = get_individual_positions(sv_individuals)
family_id = None
if case:
if (not case.affected_individuals):
LOG.warning('No affected individuals could be found in ped file')
family_id = case.family_id
case_id = (case_id or family_id)
if (case_id is None):
raise CaseError
case_obj = Case(case_id=case_id)
if vcf_path:
case_obj['vcf_path'] = vcf_path
case_obj['nr_variants'] = nr_variants
if vcf_sv_path:
case_obj['vcf_sv_path'] = vcf_sv_path
case_obj['nr_sv_variants'] = nr_sv_variants
if profile_path:
case_obj['profile_path'] = profile_path
ind_objs = []
if case:
if individual_positions:
_ind_pos = individual_positions
else:
_ind_pos = sv_individual_positions
for ind_id in case.individuals:
individual = case.individuals[ind_id]
try:
profile = (profiles[ind_id] if profiles else None)
similar_samples = (matches[ind_id] if matches else None)
ind_obj = Individual(ind_id=ind_id, case_id=case_id, ind_index=_ind_pos[ind_id], sex=individual.sex, profile=profile, similar_samples=similar_samples)
ind_objs.append(dict(ind_obj))
except KeyError:
raise CaseError('Ind %s in ped file does not exist in VCF', ind_id)
else:
for ind_id in individual_positions:
profile = (profiles[ind_id] if profiles else None)
similar_samples = (matches[ind_id] if matches else None)
ind_obj = Individual(ind_id=ind_id, case_id=case_id, ind_index=individual_positions[ind_id], profile=profile, similar_samples=similar_samples)
ind_objs.append(dict(ind_obj))
for ind_obj in ind_objs:
if vcf_sv_path:
case_obj['sv_individuals'].append(dict(ind_obj))
case_obj['_sv_inds'][ind_obj['ind_id']] = dict(ind_obj)
if vcf_path:
case_obj['individuals'].append(dict(ind_obj))
case_obj['_inds'][ind_obj['ind_id']] = dict(ind_obj)
return case_obj | Build a Case from the given information
Args:
case(ped_parser.Family): A family object
vcf_individuals(list): Show the order of inds in vcf file
case_id(str): If another name than the one in family file should be used
vcf_path(str)
sv_individuals(list): Show the order of inds in vcf file
vcf_sv_path(str)
nr_variants(int)
nr_sv_variants(int)
profiles(dict): The profiles for each sample in vcf
matches(dict(list)): list of similar samples for each sample in vcf.
Returns:
case_obj(models.Case) | codesearchnet |
def any_sparse(classes):
return any((c is sparse_tensor.SparseTensor for c in nest.flatten(classes))) | Checks for sparse tensor.
Args:
classes: a structure of objects that identify the dataset item classes
Returns:
`True` if `classes` contains a sparse tensor type and `False` otherwise. | github-repos |
def __init__(self, unique_identifier=None, usage_limits_count=None):
super(GetUsageAllocationRequestPayload, self).__init__(
enums.Tags.REQUEST_PAYLOAD
)
self._unique_identifier = None
self._usage_limits_count = None
self.unique_identifier = unique_identifier
self.usage_limits_count = usage_limits_count | Construct a GetUsageAllocation request payload struct.
Args:
unique_identifier (string): The ID of the managed object (e.g.,
a public key) to obtain a usage allocation for. Optional,
defaults to None.
usage_limits_count (int): The number of usage limits units that
should be reserved for the object. Optional, defaults to None. | juraj-google-style |
def from_string(cls, cl_function, dependencies=()):
return_type, function_name, parameter_list, body = split_cl_function(cl_function)
return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies) | Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration | juraj-google-style |
def write_int16(self, value, little_endian=True):
if little_endian:
endian = '<'
else:
endian = '>'
return self.pack(('%sh' % endian), value) | Pack the value as a signed integer and write 2 bytes to the stream.
Args:
value:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written. | codesearchnet |
def _load_audio_list(self, path):
result = {}
for entry in textfile.read_separated_lines_generator(path, separator='\t', max_columns=4):
for i in range(len(entry)):
if entry[i] == '\\N':
entry[i] = None
if len(entry) < 4:
entry.extend([None] * (4 - len(entry)))
if not self.include_empty_licence and entry[2] is None:
continue
if self.include_licenses is not None and entry[2] not in self.include_licenses:
continue
result[entry[0]] = entry[1:]
return result | Load and filter the audio list.
Args:
path (str): Path to the audio list file.
Returns:
dict: Dictionary of filtered sentences (id : username, license, attribution-url) | juraj-google-style |
def commit_signature(vcs, user_config, signature):
if signature not in get_staged_signatures(vcs):
raise NotStagedError
evidence_path = _get_committed_history_path(vcs)
committed_signatures = get_committed_signatures(vcs)
if signature in committed_signatures:
raise AlreadyCommittedError
committed_signatures.append(signature)
string = '\n'.join(committed_signatures[-user_config['history_limit']:])
with open(evidence_path, 'w') as f:
f.write(string)
unstage_signature(vcs, signature) | Add `signature` to the list of committed signatures
The signature must already be staged
Args:
vcs (easyci.vcs.base.Vcs)
user_config (dict)
signature (basestring)
Raises:
NotStagedError
AlreadyCommittedError | juraj-google-style |
def get(self, entity, key):
if entity in self._store:
return self._store[entity].get(str(key))
return None | Gets and item from the cache.
Args:
entity: The entity cache to use.
key: The key to use to lookup the cached item. | github-repos |
def ParseRecord(self, parser_mediator, key, structure):
if key not in ('logline', 'no_header_single_line'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'logline':
self._ParseLogline(parser_mediator, structure)
elif key == 'no_header_single_line':
self._ParseNoHeaderSingleLine(parser_mediator, structure) | Parse each record structure and return an EventObject if applicable.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown. | juraj-google-style |
def _verify_docker_image_size(self, image_name):
shell_call(['docker', 'pull', image_name])
try:
image_size = subprocess.check_output(['docker', 'inspect', '--format={{.Size}}', image_name]).strip()
image_size = int(image_size)
except (ValueError, subprocess.CalledProcessError) as e:
logging.error('Failed to determine docker image size: %s', e)
return False
logging.info('Size of docker image %s is %d', image_name, image_size)
if (image_size > MAX_DOCKER_IMAGE_SIZE):
logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE)
return (image_size <= MAX_DOCKER_IMAGE_SIZE) | Verifies size of Docker image.
Args:
image_name: name of the Docker image.
Returns:
True if image size is within the limits, False otherwise. | codesearchnet |
def createURL(self, word, mode="phonefy"):
try:
return self.modes[mode]["url"].format(placeholder=urllib.pathname2url(word))
except:
if mode == "base":
if word[0] == "/":
return self.baseURL+word[1:], word
else:
return self.baseURL+word
else:
try:
return self.url[mode].replace("<"+mode+">", urllib.pathname2url(word))
except:
pass
return None | Method to create the URL replacing the word in the appropriate URL.
Args:
-----
word: Word to be searched.
mode: Mode to be executed.
Return:
-------
The URL to be queried. | juraj-google-style |
def encode(self, sequence):
sequence = super().encode(sequence)
sequence = self.tokenize(sequence)
vector = [self.stoi.get(token, self.unknown_index) for token in sequence]
if self.append_eos:
vector.append(self.eos_index)
return torch.tensor(vector) | Encodes a ``sequence``.
Args:
sequence (str): String ``sequence`` to encode.
Returns:
torch.Tensor: Encoding of the ``sequence``. | juraj-google-style |
def delete(self, vid):
command = 'no vlan %s' % vid
return self.configure(command) if isvlan(vid) else False | Deletes a VLAN from the running configuration
Args:
vid (str): The VLAN ID to delete
Returns:
True if the operation was successful otherwise False | juraj-google-style |
def get_device(ads, **kwargs):
filtered = get_devices(ads, **kwargs)
if len(filtered) == 1:
return filtered[0]
else:
serials = [ad.serial for ad in filtered]
raise Error('More than one device matched: %s' % serials) | Finds a unique AndroidDevice instance from a list that has specific
attributes of certain values.
Example:
get_device(android_devices, label='foo', phone_number='1234567890')
get_device(android_devices, model='angler')
Args:
ads: A list of AndroidDevice instances.
kwargs: keyword arguments used to filter AndroidDevice instances.
Returns:
The target AndroidDevice instance.
Raises:
Error: None or more than one device is matched. | github-repos |
def __init__(self, in_features: int, lateral_widths: List[int], feature_size: int=256):
super().__init__()
self.stem = MaskFormerFPNConvLayer(in_features, feature_size)
self.layers = nn.Sequential(*[MaskFormerFPNLayer(feature_size, lateral_width) for lateral_width in lateral_widths[::-1]]) | Feature Pyramid Network, given an input tensor and a set of feature map of different feature/spatial size, it
creates a list of feature maps with the same feature size.
Args:
in_features (`int`):
The number of input features (channels).
lateral_widths (`List[int]`):
A list with the features (channels) size of each lateral connection.
feature_size (int, *optional*, defaults to 256):
The features (channels) of the resulting feature maps. | github-repos |
def ParseFileObject(self, parser_mediator, file_object):
self._last_charset_attribute = 'ascii'
self._ParseHeader(parser_mediator, file_object)
data_dict = {}
time_dict = {}
try:
for (name, value) in self._ParseAttributesGroup(file_object):
name = self._ATTRIBUTE_NAME_TRANSLATION.get(name, name)
if (name in self._DATE_TIME_VALUE_NAMES):
time_dict.setdefault(name, []).append(value)
else:
data_dict.setdefault(name, []).append(value)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning('unable to parse attributes with error: {0!s}'.format(exception))
return
event_data = CupsIppEventData()
event_data.application = self._GetStringValue(data_dict, 'application')
event_data.computer_name = self._GetStringValue(data_dict, 'computer_name')
event_data.copies = data_dict.get('copies', [0])[0]
event_data.data_dict = data_dict
event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')
event_data.job_id = self._GetStringValue(data_dict, 'job_id')
event_data.job_name = self._GetStringValue(data_dict, 'job_name')
event_data.user = self._GetStringValue(data_dict, 'user')
event_data.owner = self._GetStringValue(data_dict, 'owner')
event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')
event_data.uri = self._GetStringValue(data_dict, 'uri')
for (name, usage) in iter(self._DATE_TIME_VALUES.items()):
for date_time in time_dict.get(name, []):
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
for (name, usage) in iter(self._POSIX_TIME_VALUES.items()):
for time_value in time_dict.get(name, []):
date_time = dfdatetime_posix_time.PosixTime(timestamp=time_value)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a CUPS IPP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | codesearchnet |
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] | Args:
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
make use of token type ids, therefore a list of zeros is returned.
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros. | github-repos |
def _build_map(inputs, outputs):
finished_nodes = set()
nodes_in_progress = set()
nodes_in_decreasing_depth = []
operation_indices = {}
for output in tree.flatten(outputs):
_build_map_helper(inputs, output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, operation_indices)
return (nodes_in_decreasing_depth, operation_indices) | Topologically sort nodes in order from inputs to outputs.
It uses a depth-first search to topologically sort nodes that appear in the
_keras_history connectivity metadata of `outputs`.
Args:
outputs: the output tensors whose _keras_history metadata should be
walked. This may be an arbitrary nested structure.
Returns:
A tuple like (ordered_nodes, operation_to_first_traversal_index)
ordered_nodes: list of nodes appearing in the keras history,
topologically sorted from original inputs to the `outputs`.
(If outputs have different sets of ancestors, the inputs to one
output may appear after a different output).
operation_to_first_traversal_index:
A dict mapping operation to the traversal index in the DFS where it
is seen. Note: if a operation is shared by several nodes, the dict
will onlystore the index corresponding to the *first* time the
operation seen. | github-repos |
def is_spontaneous(gene, custom_id=None):
spont = re.compile("[Ss](_|)0001")
if spont.match(gene.id):
return True
elif gene.id == custom_id:
return True
else:
return False | Input a COBRApy Gene object and check if the ID matches a spontaneous ID regex.
Args:
gene (Gene): COBRApy Gene
custom_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
bool: If gene ID matches spontaneous ID | juraj-google-style |
def backups(self):
if (not self.__backups):
self.__backups = Backups(self.__connection)
return self.__backups | Gets the Backup API client.
Returns:
Backups: | codesearchnet |
def Pack(cls, obj, version):
if isinstance(obj, (datetime.datetime, datetime.date)):
return cls.AdManagerDateTimePacker(obj, version)
return obj | Pack the given object using Ad Manager-specific logic.
Args:
obj: an object to be packed for SOAP using Ad Manager-specific logic, if
applicable.
version: the version of the current API, e.g. 'v201811'
Returns:
The given object packed with Ad Manager-specific logic for SOAP,
if applicable. Otherwise, returns the given object unmodified. | codesearchnet |
def write_config_files(self, host, hyperparameters, input_data_config):
config_path = os.path.join(self.container_root, host, 'input', 'config')
resource_config = {
'current_host': host,
'hosts': self.hosts
}
json_input_data_config = {}
for c in input_data_config:
channel_name = c['ChannelName']
json_input_data_config[channel_name] = {
'TrainingInputMode': 'File'
}
if 'ContentType' in c:
json_input_data_config[channel_name]['ContentType'] = c['ContentType']
_write_json_file(os.path.join(config_path, 'hyperparameters.json'), hyperparameters)
_write_json_file(os.path.join(config_path, 'resourceconfig.json'), resource_config)
_write_json_file(os.path.join(config_path, 'inputdataconfig.json'), json_input_data_config) | Write the config files for the training containers.
This method writes the hyperparameters, resources and input data configuration files.
Args:
host (str): Host to write the configuration for
hyperparameters (dict): Hyperparameters for training.
input_data_config (dict): Training input channels to be used for training.
Returns: None | juraj-google-style |
def get_pluggable_module_information(self, id_or_uri):
uri = (self._client.build_uri(id_or_uri) + '/pluggableModuleInformation')
return self._client.get(uri) | Gets all the pluggable module information.
Args:
id_or_uri: Can be either the interconnect id or uri.
Returns:
array: dicts of the pluggable module information. | codesearchnet |
def _get_manager(cluster_info, host, executor_id):
for node in cluster_info:
if ((node['host'] == host) and (node['executor_id'] == executor_id)):
addr = node['addr']
authkey = node['authkey']
TFSparkNode.mgr = TFManager.connect(addr, authkey)
break
if (TFSparkNode.mgr is None):
msg = ((('No TFManager found on this node, please ensure that:\n' + '1. Spark num_executors matches TensorFlow cluster_size\n') + '2. Spark cores/tasks per executor is 1.\n') + '3. Spark dynamic allocation is disabled.')
raise Exception(msg)
logging.info('Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}'.format(host, executor_id, str(TFSparkNode.mgr.get('state'))))
return TFSparkNode.mgr | Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.
Args:
:cluster_info: cluster node reservations
:host: host IP address
:executor_id: unique id per executor (created during initial call to run())
Returns:
TFManager instance for this executor/python-worker | codesearchnet |
def get_prep_value(self, value: LocalizedValue) -> dict:
if isinstance(value, dict):
value = LocalizedValue(value)
if ((not isinstance(value, LocalizedValue)) and value):
value = None
if value:
cleaned_value = self.clean(value)
self.validate(cleaned_value)
else:
cleaned_value = value
return super(LocalizedField, self).get_prep_value((cleaned_value.__dict__ if cleaned_value else None)) | Turns the specified value into something the database
can store.
If an illegal value (non-LocalizedValue instance) is
specified, we'll treat it as an empty :see:LocalizedValue
instance, on which the validation will fail.
Dictonaries are converted into :see:LocalizedValue instances.
Arguments:
value:
The :see:LocalizedValue instance to serialize
into a data type that the database can understand.
Returns:
A dictionary containing a key for every language,
extracted from the specified value. | codesearchnet |
def __init__(self, input_fn, input_workers, input_contexts, strategy):
assert isinstance(input_workers, input_lib.InputWorkers)
if input_workers.num_workers != len(input_contexts):
raise ValueError('Number of input workers (%d) is not same as number of input_contexts (%d)' % (input_workers.num_workers, len(input_contexts)))
iterators = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
result = input_fn(ctx)
devices = input_workers.compute_devices_for_worker(i)
if isinstance(result, data_types.DatasetV2):
iterator = _SingleWorkerDatasetIterator(result, worker, devices)
elif callable(result):
iterator = _SingleWorkerCallableIterator(result, worker, devices)
else:
raise ValueError('input_fn must return a tf.data.Dataset or a callable.')
iterators.append(iterator)
super(InputFunctionIterator, self).__init__(input_workers, iterators, strategy, cardinality=cardinality_lib.UNKNOWN, enable_get_next_as_optional=False)
self._enable_get_next_as_optional = False | Make an iterator for input provided via an input function.
Currently implements PER_WORKER mode, in which the `input_fn` is called
once on each worker.
TODO(priyag): Add other replication modes.
Args:
input_fn: Input function that returns a `tf.data.Dataset` object.
input_workers: an `InputWorkers` object.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `input_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch. | github-repos |
def _right_pad(x, final_rank):
padded_shape = tf.concat([tf.shape(input=x), tf.ones((final_rank - tf.rank(x)), dtype=tf.int32)], axis=0)
static_padded_shape = None
if (x.shape.is_fully_defined() and isinstance(final_rank, int)):
static_padded_shape = x.shape.as_list()
extra_dims = (final_rank - len(static_padded_shape))
static_padded_shape.extend(([1] * extra_dims))
padded_x = tf.reshape(x, (static_padded_shape or padded_shape))
return padded_x | Pads the shape of x to the right to be of rank final_rank.
Expands the dims of `x` to the right such that its rank is equal to
final_rank. For example, if `x` is of shape [1, 5, 7, 2] and `final_rank` is
7, we return padded_x, which is of shape [1, 5, 7, 2, 1, 1, 1].
Args:
x: The tensor whose shape is to be padded.
final_rank: Scalar int32 `Tensor` or Python `int`. The desired rank of x.
Returns:
padded_x: A tensor of rank final_rank. | codesearchnet |
def from_bytes(value):
result = (value.decode('utf-8') if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError('{0!r} could not be converted to unicode'.format(value)) | Converts bytes to a string value, if necessary.
Args:
value (Union[str, bytes]): The value to be converted.
Returns:
str: The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError: If the value could not be converted to unicode. | codesearchnet |
def set_servo_speed(self, goalspeed, led):
if goalspeed>0 :
goalspeed_msb = (int(goalspeed)& 0xFF00) >> 8
goalspeed_lsb = int(goalspeed) & 0xff
elif goalspeed<0 :
goalspeed_msb = 64+(255- ((int(goalspeed)& 0xFF00) >> 8))
goalspeed_lsb = (abs(goalspeed) & 0xff)
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalspeed_lsb)
data.append(goalspeed_msb)
data.append(0x02|led)
data.append(self.servoid)
data.append(0x00)
send_data(data) | Set the Herkulex in continuous rotation mode
Args:
goalspeed (int): the speed , range -1023 to 1023
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED | juraj-google-style |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
device_cache = match.get('DeviceCache', {})
for (device, value) in iter(device_cache.items()):
name = value.get('Name', '')
if name:
name = ''.join(('Name:', name))
event_data = plist_event.PlistTimeEventData()
event_data.root = '/DeviceCache'
datetime_value = value.get('LastInquiryUpdate', None)
if datetime_value:
event_data.desc = ' '.join(filter(None, ('Bluetooth Discovery', name)))
event_data.key = '{0:s}/LastInquiryUpdate'.format(device)
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if (device in match.get('PairedDevices', [])):
event_data.desc = 'Paired:True {0:s}'.format(name)
event_data.key = device
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = value.get('LastNameUpdate', None)
if datetime_value:
event_data.desc = ' '.join(filter(None, ('Device Name Set', name)))
event_data.key = '{0:s}/LastNameUpdate'.format(device)
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
datetime_value = value.get('LastServicesUpdate', None)
if datetime_value:
event_data.desc = ' '.join(filter(None, ('Services Updated', name)))
event_data.key = '{0:s}/LastServicesUpdate'.format(device)
event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant BT entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | codesearchnet |
def reactions_add(self, *, name: str, **kwargs) -> SlackResponse:
kwargs.update({'name': name})
return self.api_call('reactions.add', json=kwargs) | Adds a reaction to an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
channel (str): Channel where the message to add reaction to was posted.
e.g. 'C1234567890'
timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456' | codesearchnet |
def unsplat(f: Callable[([Iterable], A)]) -> Callable[(..., A)]:
def unsplatted(*args):
return f(args)
return unsplatted | Convert a function taking a single iterable argument into a function taking multiple arguments.
Args:
f: Any function taking a single iterable argument
Returns:
A function that accepts multiple arguments. Each argument of this function is passed as an element of an
iterable to ``f``.
Example:
$ def f(a):
$ return a[0] + a[1] + a[2]
$
$ f([1, 2, 3]) # 6
$ g = unsplat(f)
$ g(1, 2, 3) # 6 | codesearchnet |
def get_full_filename_by_suffixes(dir_src, suffixes):
file_names = FileClass.get_filename_by_suffixes(dir_src, suffixes)
if file_names is None:
return None
return list(dir_src + os.sep + name for name in file_names) | get full file names with the given suffixes in the given directory
Args:
dir_src: directory path
suffixes: wanted suffixes
Returns:
full file names with the given suffixes as list | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.