code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting pooling ...')
if names == 'short':
tf_name = 'P' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if 'kernel_shape'... | Convert Average pooling.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | juraj-google-style |
def __init__(self, interval=3600):
self.interval = interval
self.start_time = datetime.datetime.now()
self.chk_counter = 0 | Initializes the handler with an interval.
Args:
interval (int): Interval at which to checkpoint in seconds.
Defaults to 3600 (1 hr). | juraj-google-style |
def inv_logistic(y: Union[float, np.ndarray],
k: float,
theta: float) -> Optional[float]:
r
if y is None or k is None or theta is None:
return None
return (np.log((1 / y) - 1) / -k) + theta | r"""
Inverse standard logistic function:
.. math::
x = ( log( \frac {1} {y} - 1) / -k ) + \theta
Args:
y: :math:`y`
k: :math:`k`
theta: :math:`\theta`
Returns:
:math:`x` | juraj-google-style |
def sign(self, private_keys):
if ((private_keys is None) or (not isinstance(private_keys, list))):
raise TypeError('`private_keys` must be a list instance')
def gen_public_key(private_key):
public_key = private_key.get_verifying_key().encode()
return public_key.decode()
key_pairs = ... | Fulfills a previous Transaction's Output by signing Inputs.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256
Furthermore, note that all keys required to fully sign the
Transaction have to be passed to this method. A subset of all
will cause this method t... | codesearchnet |
def begin_abort(self, root_pipeline_key, abort_message):
def txn():
pipeline_record = db.get(root_pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort root pipeline ID "%s" but it does not exist.',
root_pipeline_key.name())
raise db.Roll... | Kicks off the abort process for a root pipeline and all its children.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
abort_message: Message explaining why the abort happened, only saved
into the root pipeline.
Returns:
True if the abort signal was sent successfully; False otherwise. | juraj-google-style |
def base_name_from_image(image):
m = re.match('^(.+/)?([^:/]+)(:[^:]+)?$', image)
algo_name = (m.group(2) if m else image)
return algo_name | Extract the base name of the image to use as the 'algorithm name' for the job.
Args:
image (str): Image name.
Returns:
str: Algorithm name, as extracted from the image name. | codesearchnet |
def get_stored_version(connection):
if connection.engine.name == 'sqlite':
version = connection.execute('PRAGMA user_version').fetchone()[0]
if version == 0:
raise VersionIsNotStored
return version
elif connection.engine.name == 'postgresql':
try:
r ... | Returns database version.
Args:
connection (sqlalchemy connection):
Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case)
exist because they created with the database creation.
Returns:
int: version of the database. | juraj-google-style |
def get_metrics_namespace(self) -> str:
return 'BeamML_TF_Tensor' | Returns:
A namespace for metrics collected by the RunInference transform. | github-repos |
def __contains__(self, func):
return any((func is mw.func) or (mw.is_subchain and func in mw.func)
for mw in self.mw_list) | Returns whether the function is stored anywhere in the middleware chain.
This runs recursively though any subchains.
Args:
func (callable): A function which may be present in the chain
Returns:
bool: True if func is a function contained anywhere in the chain. | juraj-google-style |
def _remove_lines(self, lines, sublist_lengths, num_to_remove):
curr = 0
result = []
for offset in sublist_lengths:
end = curr + offset
start = min(curr + num_to_remove, end)
result += lines[start:end]
curr += offset
return result | Utility function to remove num_to_remove lines from each sublist.
Args:
lines: list of items.
sublist_lengths: list of integers representing length of sublist
corresponding to each source file.
num_to_remove: number of lines to remove from each sublist.
Returns:
remaining lines. | github-repos |
def add_transcript(self, transcript):
logger.debug('Adding transcript {0} to variant {1}'.format(transcript, self['variant_id']))
self['transcripts'].append(transcript) | Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary | codesearchnet |
def create_assembly(self, did, wid, name='My Assembly'):
payload = {
'name': name
}
return self._api.request('post', '/api/assemblies/d/' + did + '/w/' + wid, body=payload) | Creates a new assembly element in the specified document / workspace.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- name (str, default='My Assembly')
Returns:
- requests.Response: Onshape response data | juraj-google-style |
def cds_score(self, x_te, y_te):
if type(x_te) == np.ndarray:
x_te, y_te = pd.Series(x_te.reshape(-1)), pd.Series(y_te.reshape(-1))
xd, yd = discretized_sequences(x_te, y_te, self.ffactor, self.maxdev)
cx = Counter(xd)
cy = Counter(yd)
yrange = sorted(cy.ke... | Computes the cds statistic from variable 1 to variable 2
Args:
x_te (numpy.ndarray): Variable 1
y_te (numpy.ndarray): Variable 2
Returns:
float: CDS fit score | juraj-google-style |
def remove_list_duplicates(lista, unique=False):
result = []
allready = []
for elem in lista:
if elem not in result:
result.append(elem)
else:
allready.append(elem)
if unique:
for elem in allready:
result = list(filter((elem).__ne__, res... | Remove duplicated elements in a list.
Args:
lista: List with elements to clean duplicates. | juraj-google-style |
def __init__(self, skype=None):
self.skype = skype
self.synced = False
self.cache = {} | Create a new container object. The :attr:`synced` state and internal :attr:`cache` are initialised here.
Args:
skype (Skype): parent Skype instance | juraj-google-style |
def __init__(self, ad):
super(Sl4aClient, self).__init__(app_name=_APP_NAME, ad=ad)
self._ad = ad
self.ed = None
self._adb = ad.adb | Initializes an Sl4aClient.
Args:
ad: AndroidDevice object. | juraj-google-style |
def filesizes(images):
while True:
img = (yield marv.pull(images))
if (img is None):
break
(yield marv.push(img.size)) | Stat filesize of files.
Args:
images: stream of marv image files
Returns:
Stream of filesizes | codesearchnet |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(GetAttributeListRequestPayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(en... | Read the data encoding the GetAttributeList request payload and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which th... | juraj-google-style |
def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT):
if isinstance(rationale, basestring) and max_length is not None and len(rationale) > max_length:
return rationale[0:max_length], True
else:
return rationale, False | Truncates the rationale for analytics event emission if necessary
Args:
rationale (string): the string value of the rationale
max_length (int): the max length for truncation
Returns:
truncated_value (string): the possibly truncated version of the rationale
was_truncated (bool): returns true if the rationale is trunca... | juraj-google-style |
def greedy_coloring(adj):
coloring = {}
colors = {}
possible_colors = {n: set(range(len(adj))) for n in adj}
while possible_colors:
n = min(possible_colors, key=lambda n: len(possible_colors[n]))
color = min(possible_colors[n])
coloring[n] = color
... | Determines a vertex coloring.
Args:
adj (dict): The edge structure of the graph to be colored.
`adj` should be of the form {node: neighbors, ...} where
neighbors is a set.
Returns:
dict: the coloring {node: color, ...}
dict: the colors {color: [node, ...], ...}
Note:
This is a greedy heuristic: the resulting colorin... | juraj-google-style |
def _load_credentials_from_file(filename):
if (not os.path.exists(filename)):
raise exceptions.DefaultCredentialsError('File {} was not found.'.format(filename))
with io.open(filename, 'r') as file_obj:
try:
info = json.load(file_obj)
except ValueError as caught_exc:
... | Loads credentials from a file.
The credentials file must be a service account key or stored authorized
user credentials.
Args:
filename (str): The full path to the credentials file.
Returns:
Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
credentials and the project ID. Authorized user credentials ... | codesearchnet |
class PipedPipelineDataFormat(PipelineDataFormat):
def __iter__(self):
for line in sys.stdin:
if '\t' in line:
line = line.split('\t')
if self.column:
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
... | Read data from piped input to the python process. For multi columns data, columns should separated by
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (`str`): Where to save the outgoing data.
input_path (`str`): Where to look for the input data.
column (`str`):... | github-repos |
def write(self, output='jsonstat'):
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'json... | Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter. | juraj-google-style |
def ParseTextToDicts(self, *args, **kwargs):
result_lists = self.ParseText(*args, **kwargs)
result_dicts = []
for row in result_lists:
result_dicts.append(dict(zip(self.header, row)))
return result_dicts | Calls ParseText and turns the result into list of dicts.
List items are dicts of rows, dict key is column header and value is column
value.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMEr... | juraj-google-style |
def GetFileSystemReferenceCount(self, path_spec):
identifier = self._GetFileSystemCacheIdentifier(path_spec)
cache_value = self._file_system_cache.GetCacheValue(identifier)
if not cache_value:
return None
return cache_value.reference_count | Retrieves the reference count of a cached file system object.
Args:
path_spec (PathSpec): path specification.
Returns:
int: reference count or None if there is no file system object for
the corresponding path specification cached. | juraj-google-style |
def ReceiveMessagesRelationalFlows(self, client_id, messages):
now = time.time()
unprocessed_msgs = []
message_handler_requests = []
dropped_count = 0
for (session_id, msgs) in iteritems(collection.Group(messages, operator.attrgetter('session_id'))):
leftover_msgs = self.HandleWellKnownFlows... | Receives and processes messages for flows stored in the relational db.
Args:
client_id: The client which sent the messages.
messages: A list of GrrMessage RDFValues. | codesearchnet |
def Insert(self, request, global_params=None):
config = self.GetMethodConfig('Insert')
return self._RunMethod(config, request, global_params=global_params) | Creates a new, empty table in the dataset.
Args:
request: (BigqueryTablesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message. | github-repos |
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
artifact_filters = cls._ParseStringOption(options, 'artifact_filter_string')
artifact_filter... | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: if the required artifact definitions are not defined. | juraj-google-style |
def invitation_backend(backend=None, namespace=None):
backend = backend or ORGS_INVITATION_BACKEND
class_module, class_name = backend.rsplit(".", 1)
mod = import_module(class_module)
return getattr(mod, class_name)(namespace=namespace) | Returns a specified invitation backend
Args:
backend: dotted path to the invitation backend class
namespace: URL namespace to use
Returns:
an instance of an InvitationBackend | juraj-google-style |
def __init__(self, direction, edge_name, optional=False, within_optional_scope=False):
super(Traverse, self).__init__(
direction, edge_name, optional=optional, within_optional_scope=within_optional_scope)
self.direction = direction
self.edge_name = edge_name
self.opt... | Create a new Traverse block in the given direction and across the given edge.
Args:
direction: string, 'in' or 'out'
edge_name: string obeying variable name rules (see validate_safe_string).
optional: optional bool, specifying whether the traversal to the given location
is optional (i.e. non-filtering) or mandatory (f... | juraj-google-style |
def main(argv=None):
args = parse_mobly_cli_args(argv)
test_class = _find_test_class()
if args.list_tests:
_print_test_names(test_class)
sys.exit(0)
test_configs = config_parser.load_test_config_file(args.config, args.test_bed)
tests = None
if args.tests:
tests = args.tes... | Execute the test class in a test module.
This is the default entry point for running a test script file directly.
In this case, only one test class in a test script is allowed.
To make your test script executable, add the following to your file:
.. code-block:: python
from mobly import test_runner
...
if __name__ =... | github-repos |
def nb_r_deriv(r, data_row):
n = len(data_row)
d = sum(digamma(data_row + r)) - n*digamma(r) + n*np.log(r/(r+np.mean(data_row)))
return d | Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells | juraj-google-style |
def transformer_text_encoder(inputs,
target_space,
hparams,
name=None):
with tf.variable_scope(name, default_name="transformer_text_encoder"):
inputs = common_layers.flatten4d3d(inputs)
[
encoder_input,
e... | Transformer text encoder over inputs with unmasked full attention.
Args:
inputs: Tensor of shape [batch, length, 1, hparams.hidden_size].
target_space: int. Used for encoding inputs under a target space id.
hparams: HParams.
name: string, variable scope.
Returns:
encoder_output: Tensor of shape [batch, length, hparam... | juraj-google-style |
def insert(self, index, item):
if not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.insert(self, index, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
... | Insert an item at the specified index.
Args:
index (int): Position to insert the item.
item: Item to be inserted.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored. | juraj-google-style |
def from_audio_encoder_config(cls, audio_encoder_config: PretrainedConfig, **kwargs):
return cls(audio_encoder_config=audio_encoder_config.to_dict(), **kwargs) | Instantiate a [`MoshiConfig`] (or a derived class) from an audio encoder configuration.
Returns:
[`MoshiConfig`]: An instance of a configuration object | github-repos |
def ensure_resource_data(self, update_data=False):
if not any(key in self.data for key in self.UNIQUE_IDENTIFIERS):
raise exceptions.HPOneViewMissingUniqueIdentifiers(MISSING_UNIQUE_IDENTIFIERS)
if not update_data:
return
resource_data = None
... | Retrieves data from OneView and updates resource object.
Args:
update_data: Flag to update resource data when it is required. | juraj-google-style |
def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
return incremental_indices + self.padding_idx | Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
input_ids: tf.Tensor
Returns: tf.Tensor | github-repos |
def get_change_point_config(params: Dict[str, Any]) -> ChangePointConfig:
return ChangePointConfig(min_runs_between_change_points=params.get('min_runs_between_change_points', constants._DEFAULT_MIN_RUNS_BETWEEN_CHANGE_POINTS), num_runs_in_change_point_window=params.get('num_runs_in_change_point_window', constants._... | Args:
params: Dict containing parameters to run change point analysis.
Returns:
ChangePointConfig object containing change point analysis parameters. | github-repos |
def __init__(self, file_object, delete_tempfile=True, journal_mode="DELETE"):
self.file_object = file_object
self.journal_mode = journal_mode
if hasattr(self.file_object, "name"):
self.name = self.file_object.name
self._delete_file = False
else:
self._delete_file =... | Init.
Args:
file_object: A file like object.
delete_tempfile: If we create a tempfile, should we delete it when
we're done.
journal_mode: If set to "WAL" a "Write-Ahead Log" is created. | juraj-google-style |
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs):
if (tensors_to_log is None):
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=every_n_iter) | Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to Logging... | codesearchnet |
def recoverURL(self, url):
self.setUserAgent()
if ('https:
self.setProxy(protocol='https')
else:
self.setProxy(protocol='http')
if ('.onion' in url):
try:
pass
except:
pass
url = url.replace('.onion', '.onion.cab')
try:
recurso ... | Public method to recover a resource.
Args:
-----
url: The URL to be collected.
Returns:
--------
Returns a resource that has to be read, for instance, with html = self.br.read() | codesearchnet |
def skip(self, count, name=None) -> 'DatasetV2':
from tensorflow.python.data.ops import skip_op
return skip_op._skip(self, count, name) | Creates a `Dataset` that skips `count` elements from this dataset.
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.skip(7)
>>> [a.item() for a in dataset.as_numpy_iterator()]
[7, 8, 9]
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skippe... | github-repos |
def create(self, request, desc, files, public=False):
request.data = json.dumps({
"description": desc,
"public": public,
"files": files,
})
return self.send(request).json()['html_url'] | Creates a gist
Arguments:
request: an initial request object
desc: the gist description
files: a list of files to add to the gist
public: a flag to indicate whether the gist is public or not
Returns:
The URL to the newly created gist. | juraj-google-style |
def compute_centroid(points):
lats = [p[1] for p in points]
lons = [p[0] for p in points]
return Point(np.mean(lats), np.mean(lons), None) | Computes the centroid of set of points
Args:
points (:obj:`list` of :obj:`Point`)
Returns:
:obj:`Point` | codesearchnet |
def get_asn_verbose_dns(self, asn=None):
if asn[0:2] != 'AS':
asn = 'AS{0}'.format(asn)
zone = '{0}.asn.cymru.com'.format(asn)
try:
log.debug('ASN verbose query for {0}'.format(zone))
data = self.dns_resolver.query(zone, 'TXT')
return... | The function for retrieving the information for an ASN from
Cymru via port 53 (DNS). This is needed since IP to ASN mapping via
Cymru DNS does not return the ASN Description like Cymru Whois does.
Args:
asn (:obj:`str`): The AS number (required).
Returns:
str: The raw ASN data.
Raises:
ASNLookupError: The ASN lookup... | juraj-google-style |
def _initialize_global_state(self, redis_address, redis_password=None, timeout=20):
self.redis_client = services.create_redis_client(redis_address, redis_password)
start_time = time.time()
num_redis_shards = None
redis_shard_addresses = []
while ((time.time() - start_time) < timeout):
num_re... | Initialize the GlobalState object by connecting to Redis.
It's possible that certain keys in Redis may not have been fully
populated yet. In this case, we will retry this method until they have
been populated or we exceed a timeout.
Args:
redis_address: The Redis address to connect.
redis_password: The password of th... | codesearchnet |
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False, output_router_logits: bool=False) -> torch.Tensor:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = sel... | Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention h... | github-repos |
def _ReadRecordAttributeValueOffset(self, file_object, file_offset, number_of_attribute_values):
offsets_data_size = (number_of_attribute_values * 4)
offsets_data = file_object.read(offsets_data_size)
context = dtfabric_data_maps.DataTypeMapContext(values={'number_of_attribute_values': number_of_attribute_v... | Reads the record attribute value offsets.
Args:
file_object (file): file-like object.
file_offset (int): offset of the record attribute values offsets relative
to the start of the file.
number_of_attribute_values (int): number of attribute values.
Returns:
keychain_record_attribute_value_offsets: record attribute val... | codesearchnet |
def get_events_for_subscription(access_token, subscription_id, start_timestamp):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/microsoft.insights/eventtypes/management/values?api-version=',
INSIGHTS... | Get the insights evens for a subsctipion since the specific timestamp.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
start_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'.
Returns:
HTTP response. JSON body of insights even... | juraj-google-style |
def build_phenotype(phenotype_id, adapter):
phenotype_obj = {}
phenotype = adapter.hpo_term(phenotype_id)
if phenotype:
phenotype_obj['phenotype_id'] = phenotype['hpo_id']
phenotype_obj['feature'] = phenotype['description']
return phenotype | Build a small phenotype object
Build a dictionary with phenotype_id and description
Args:
phenotype_id (str): The phenotype id
adapter (scout.adapter.MongoAdapter)
Returns:
phenotype_obj (dict):
dict(
phenotype_id = str,
feature = str, # description of phenotype
) | juraj-google-style |
def get_properties(properties_file='raw.properties.json', env=None, region=None):
with open(properties_file, 'rt') as file_handle:
properties = json.load(file_handle)
env_properties = properties.get(env, properties)
contents = env_properties.get(region, env_properties)
LOG.debug('Found pro... | Get contents of _properties_file_ for the _env_.
Args:
properties_file (str): File name of `create-configs` JSON output.
env (str): Environment to read optionally.
region (str): Region to get specific configs for.
Returns:
dict: JSON loaded Application properties for _env_.
None: Given _env_ was not found in `create-... | juraj-google-style |
def synthesize(self, duration, tick_frequency):
sr = self.samplerate.samples_per_second
tick = np.random.uniform(low=-1., high=1., size=int(sr * .1))
tick *= np.linspace(1, 0, len(tick))
samples = np.zeros(int(sr * (duration / Seconds(1))))
ticks_per_se... | Synthesize periodic "ticks", generated from white noise and an envelope
Args:
duration (numpy.timedelta64): The total duration of the sound to be
synthesized
tick_frequency (numpy.timedelta64): The frequency of the ticking
sound | juraj-google-style |
def _api_scrape(json_inp, ndx):
try:
headers = json_inp['resultSets'][ndx]['headers']
values = json_inp['resultSets'][ndx]['rowSet']
except KeyError:
try:
headers = json_inp['resultSet'][ndx]['headers']
values = json_inp['resultSet'][ndx]['rowSet']
except ... | Internal method to streamline the getting of data from the json
Args:
json_inp (json): json input from our caller
ndx (int): index where the data is located in the api
Returns:
If pandas is present:
DataFrame (pandas.DataFrame): data set from ndx within the
API's json
else:
A dictionary of both headers and values fro... | codesearchnet |
def _guess_format_from_extension(ext):
ext = ext.strip('.')
formats = []
for fmt in FILE_FORMATS:
if (ext in FILE_FORMATS[fmt]):
formats.append(fmt)
if ((formats == []) or (len(formats) > 1)):
return False
return formats[0] | Guess the appropriate data type from file extension.
Arguments:
ext: The file extension (period optional)
Returns:
String. The format (without leading period),
or False if none was found or couldn't be guessed | codesearchnet |
def block_view(self, mri):
controller = self.get_controller(mri)
block = controller.block_view(weakref.proxy(self))
return block | Get a view of a block
Args:
mri: The mri of the controller hosting the block
Returns:
Block: The block we control | codesearchnet |
def get_model_details(self, model_name):
full_name = model_name
if (not model_name.startswith('projects/')):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute() | Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details. | codesearchnet |
def render_dictionary(data, headers=None):
return IPython.core.display.HTML(_html.HtmlBuilder.render_table(data, headers)) | Return a dictionary list formatted as a HTML table.
Args:
data: the dictionary list
headers: the keys in the dictionary to use as table columns, in order. | juraj-google-style |
def create_index(index_name, index_config, client):
client.create(index=index_name, body=index_config) | Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client | codesearchnet |
def StartMergeTaskStorage(self, task):
if (self._storage_type != definitions.STORAGE_TYPE_SESSION):
raise IOError('Unsupported storage type.')
if (not self._merge_task_storage_path):
raise IOError('Missing merge task storage path.')
merge_storage_file_path = self._GetMergeTaskStorageFilePath... | Starts a merge of a task storage with the session storage.
Args:
task (Task): task.
Returns:
StorageMergeReader: storage merge reader of the task storage.
Raises:
IOError: if the storage file cannot be opened or
if the storage type is not supported or
if the temporary path for the task storage does not exist or
if t... | codesearchnet |
def post(cls, payload):
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = cls.set_id_in_fkeys(payload)
payload = cls.check_boolean_fields(payload)
payload = cls.add_model_name_to_payload(payl... | Posts the data to the specified record.
Args:
payload: `dict`. This will be JSON-formatted prior to sending the request.
Returns:
`dict`. The JSON formatted response.
Raises:
`Requests.exceptions.HTTPError`: The status code is not ok.
`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNot... | juraj-google-style |
def get_dimension_index(self, dimension):
if isinstance(dimension, int):
if ((dimension < (self.ndims + len(self.vdims))) or (dimension < len(self.dimensions()))):
return dimension
else:
return IndexError('Dimension index out of bounds')
dim = dimension_name(dimension)
... | Get the index of the requested dimension.
Args:
dimension: Dimension to look up by name or by index
Returns:
Integer index of the requested dimension | codesearchnet |
def process_subj_or_pred(self, component: Union[(URIRef, str)]) -> URIRef:
if ('http' in component):
prefix = self.find_prefix(component)
if prefix:
self.process_prefix(prefix)
return URIRef(component)
elif (':' in component):
(presumed_prefix, info) = component.split... | Adds viable uri from iri or expands viable qname to iri to be triple ready
Need to have a viable qualified name (qname) in order to use a qname. You can make it
viable by either add the namespace beforehand with add_namespace(s) or if its already
in the local common_namespaces preloaded.
Args:
component: entity subje... | codesearchnet |
def read_analysis(self, file_handle):
start = self.annotation['__header__']['analysis start']
end = self.annotation['__header__']['analysis end']
if start != 0 and end != 0:
file_handle.seek(start, 0)
self._analysis = file_handle.read(end - start)
else:
... | Read the ANALYSIS segment of the FCS file and store it in self.analysis.
Warning: This has never been tested with an actual fcs file that contains an
analysis segment.
Args:
file_handle: buffer containing FCS data | juraj-google-style |
def initialize(self, map_arr, start_point_label='S', end_point_label='G', wall_label='
np.set_printoptions(threshold=np.inf)
self.__agent_label = agent_label
self.__map_arr = map_arr
self.__start_point_label = start_point_label
start_arr_tuple = np.where((self.__map_arr == self.__start_point_label))... | Initialize map of maze and setup reward value.
Args:
map_arr: Map. the 2d- `np.ndarray`.
start_point_label: Label of start point.
end_point_label: Label of end point.
wall_label: Label of wall.
agent_label: Label of agent. | codesearchnet |
def _detect(self):
results = []
for contract in self.contracts:
shadows = self.detect_builtin_shadowing_definitions(contract)
if shadows:
for shadow in shadows:
shadow_type = shadow[0]
shadow_object = shadow[1]
local_variable_parent = s... | Detect shadowing of built-in symbols
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'} | codesearchnet |
def print_tensor(self, args, screen_info=None):
parsed = self._arg_parsers['print_tensor'].parse_args(args)
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(screen_info)
highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)
tensor_name, tensor_slicing = command_parser.pars... | Command handler for print_tensor.
Print value of a given dumped tensor.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object. | github-repos |
def move_file(src, dest):
try:
os.replace(src, dest)
except Exception as ex_replace:
logger.error(f"error moving file {src} to "
f"{dest}. {ex_replace}")
raise | Move source file to destination.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong. | juraj-google-style |
def load_pkl(filenames):
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
times = []
for name in filenames:
name = str(name)
with open(name, 'rb') as file:
loaded_obj = pickle.load(file)
if not isinstance(loaded_obj, Times):
... | Unpickle file contents.
Args:
filenames (str): Can be one or a list or tuple of filenames to retrieve.
Returns:
Times: A single object, or from a collection of filenames, a list of Times objects.
Raises:
TypeError: If any loaded object is not a Times object. | juraj-google-style |
def begin_operation(self, conn_or_internal_id, op_name, callback, timeout):
data = {'id': conn_or_internal_id, 'callback': callback, 'operation_name': op_name}
action = ConnectionAction('begin_operation', data, timeout=timeout, sync=False)
self._actions.put(action) | Begin an operation on a connection
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id
op_name (string): The name of the operation that we are starting (stored in
the connection's microstate)
callback (callable): Callback to call when this disconnection attempt either
succe... | codesearchnet |
def individual(self, ind_id=None):
for ind_obj in self.individual_objs:
if ind_obj.ind_id == ind_id:
return ind_obj
return None | Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual) | juraj-google-style |
def on(self, event_name, *args, **kwargs):
def decorator(f):
self.add_event_handler(event_name, f, *args, **kwargs)
return f
return decorator | Decorator shortcut for add_event_handler.
Args:
event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.Events` or
any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.
*args: optional args to be passed to `handler`.
**kwargs: optional keyword args to be passed to... | juraj-google-style |
def NCHW_VECT_CToNHWC(input_shape_or_tensor: Union[tensor_lib.Tensor, list[int]]) -> Union[tensor_lib.Tensor, list[int]]:
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, tensor_lib.Tensor)
input_shape: list[int] = input_shape_or_tensor.shape.as_list()... | Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueErro... | github-repos |
def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
for func in spec_dict["functions"]["signatures"]:
for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]):
args = sig["arguments"]
req_args = []
pos_args = ... | Enhance function signatures
Add required and optional objects to signatures objects for semantic validation
support.
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: return enhanced bel specification dict | juraj-google-style |
def gfortran_search_path(library_dirs):
cmd = ("gfortran", "-print-search-dirs")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return_code = process.wait()
if return_code != 0:
return library_dirs
cmd_output = process.stdout.read().decode("utf-8")
search_lines =... | Get the library directory paths for ``gfortran``.
Looks for ``libraries: =`` in the output of ``gfortran -print-search-dirs``
and then parses the paths. If this fails for any reason, this method will
print an error and return ``library_dirs``.
Args:
library_dirs (List[str]): Existing library directories.
Returns:
Li... | juraj-google-style |
def copy(source, destination):
if os.path.isdir(source):
return __copytree(source, destination)
else:
return __copyfile2(source, destination) | Copy file or directory.
Args:
source (str): Source file or directory
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise. | juraj-google-style |
def _check_version(self, root):
version = self._get_version(root)
supported = [StrictVersion(x) for x in
self.supported_versions(root.tag)]
if version in supported:
return
error = "Document version ({0}) not in supported versions ({1})"
... | Ensure the root element is a supported version.
Args:
root (etree.Element)
Raises:
UnsupportedVersionError | juraj-google-style |
def validate_seeded_answers(answers, options, algo):
if (algo['name'] == 'simple'):
return validate_seeded_answers_simple(answers, options, algo)
elif (algo['name'] == 'random'):
return validate_seeded_answers_random(answers)
else:
raise UnknownChooseAnswerAlgorithm() | Validate answers based on selection algorithm
This is called when instructor setup the tool and providing seeded answers to the question.
This function is trying to validate if instructor provided enough seeds for a give algorithm.
e.g. we require 1 seed for each option in simple algorithm and at least 1 seed for rand... | codesearchnet |
def run(self, steps=None):
try:
while (self.instruction_pointer < len(self.code)):
self.step()
if (steps is not None):
steps -= 1
if (steps == 0):
break
except StopIteration:
pass
except EOFError:
pass
re... | Run threaded code in machine.
Args:
steps: If specified, run that many number of instructions before
stopping. | codesearchnet |
def is_displayed(target):
is_displayed = getattr(target, 'is_displayed', None)
if not is_displayed or not callable(is_displayed):
raise TypeError('Target has no attribute \'is_displayed\' or not callable')
if not is_displayed():
raise WebDriverException('element not visible') | Assert whether the target is displayed
Args:
target(WebElement): WebElement Object.
Returns:
Return True if the element is displayed or return False otherwise. | juraj-google-style |
def replace_variables(self, text):
variables = {'python-executable': str(((self._venv_path / 'bin') / 'python'))}
return text.format(**variables) | Replace variable placeholders in `text` with values from the virtual env.
The variables are:
- {python-executable}
Args:
text: The text to do replacment int.
Returns: The text after replacement. | codesearchnet |
def askInitial():
return inquirer.prompt([inquirer.Text('inputPath', message="What's the path of your input file (eg input.csv)"), inquirer.List('year', message='What year are you in', choices=[1, 2, 3, 4]), inquirer.Checkbox('whatToDo', message='What can I do for you (select with your spacebar)', choices=['Get you... | Asks the user for what it wants the script to do
Returns:
[dictionary] -- answers to the questions | codesearchnet |
def _create_dir_path(self, file_hash, path=None, hash_list=None):
if hash_list is None:
hash_list = list(file_hash)
if not hash_list:
raise IOError("Directory structure is too full!")
if not path:
path = os.path.join(
... | Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path. | juraj-google-style |
def search(pattern):
def match(napp):
'Whether a NApp metadata matches the pattern.'
username = napp.get('username', napp.get('author'))
strings = (['{}/{}'.format(username, napp.get('name')), napp.get('description')] + napp.get('tags'))
return any((pattern.match(string) for string ... | Search all server NApps matching pattern.
Args:
pattern (str): Python regular expression. | codesearchnet |
def fetched_records(self, max_records=None):
if (max_records is None):
max_records = self.config['max_poll_records']
assert (max_records > 0)
drained = collections.defaultdict(list)
records_remaining = max_records
while (records_remaining > 0):
if (not self._next_partition_records):
... | Returns previously fetched records and updates consumed offsets.
Arguments:
max_records (int): Maximum number of records returned. Defaults
to max_poll_records configuration.
Raises:
OffsetOutOfRangeError: if no subscription offset_reset_strategy
CorruptRecordException: if message crc validation fails (check_crcs
mus... | codesearchnet |
def print_search_results(self, search_results, buf=sys.stdout):
formatted_lines = self.format_search_results(search_results)
pr = Printer(buf)
for txt, style in formatted_lines:
pr(txt, style) | Print formatted search results.
Args:
search_results (list of `ResourceSearchResult`): Search to format. | juraj-google-style |
def _is_magical_field(self, model_instance, field, is_insert: bool):
old_value = getattr(model_instance, field.name, None)
field.pre_save(model_instance, is_insert)
new_value = getattr(model_instance, field.name, None)
return (old_value != new_value) | Verifies whether this field is gonna modify something
on its own.
"Magical" means that a field modifies the field value
during the pre_save.
Arguments:
model_instance:
The model instance the field is defined on.
field:
The field to get of whether the field is
magical.
is_insert:
Pretend whether this is an insert?
... | codesearchnet |
def delete(self, domain, type_name, search_command):
return self._request(domain, type_name, search_command, 'DELETE', None) | Delete entry in ThreatConnect Data Store
Args:
domain (string): One of 'local', 'organization', or 'system'.
type_name (string): This is a free form index type name. The ThreatConnect API will use
this resource verbatim.
search_command (string): Search command to pass to ES. | juraj-google-style |
def get_users(self, capacity=None):
users = list()
usersdicts = self.data.get('users')
if usersdicts is not None:
for userdata in usersdicts:
if capacity is not None and userdata['capacity'] != capacity:
continue
i... | Returns the organization's users.
Args:
capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.
Returns:
List[User]: Organization's users. | juraj-google-style |
def delete_keyvault(access_token, subscription_id, rgname, vault_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults/', vault_name,
... | Deletes a key vault in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
vault_name (str): Name of the new key vault.
Returns:
HTTP response. 200 OK. | juraj-google-style |
async def setvolume(self, value):
self.logger.debug('volume command')
if (self.state != 'ready'):
return
logger.debug('Volume command received')
if (value == '+'):
if (self.volume < 100):
self.statuslog.debug('Volume up')
self.volume = ((10 * (self.volume
... | The volume command
Args:
value (str): The value to set the volume to | codesearchnet |
def archive(self, output_path):
if self.path is None:
raise ArgumentError("Cannot archive a recipe yet without a reference to its original yaml file in self.path")
outfile = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)
outfile.write(self.path, arcname="recipe_s... | Archive this recipe and all associated files into a .ship archive.
Args:
output_path (str): The path where the .ship file should be saved. | juraj-google-style |
def ReadSystemConfigurationArtifact(
self, system_configuration, session_identifier=CURRENT_SESSION):
if system_configuration.code_page:
try:
self.SetCodepage(system_configuration.code_page)
except ValueError:
logger.warning(
'Unsupported codepage: {0:s}, defaultin... | Reads the knowledge base values from a system configuration artifact.
Note that this overwrites existing values in the knowledge base.
Args:
system_configuration (SystemConfigurationArtifact): system configuration
artifact.
session_identifier (Optional[str])): session identifier, where
CURRENT_SESSION represents the ... | juraj-google-style |
def populate_ast_nsarg_defaults(ast, belast, species_id=None):
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
r = bel.terms.terms.get_normalized_terms(given_term_id)
ast.canonical = r["canonical"]
ast.decanonical = r["decanonical"]
... | Recursively populate NSArg AST entries for default (de)canonical values
This was added specifically for the BEL Pipeline. It is designed to
run directly against ArangoDB and not through the BELAPI.
Args:
ast (BEL): BEL AST
Returns:
BEL: BEL AST | juraj-google-style |
def update_load_balancer(access_token, subscription_id, resource_group, lb_name, body):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/loadBalanc... | Updates a load balancer model, i.e. PUT an updated LB body.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the new load balancer.
body (str): JSON body of an updated load balancer.
Retu... | juraj-google-style |
def from_dict(cls, d):
sites = [Site.from_dict(sd) for sd in d["sites"]]
charge = d.get("charge", 0)
spin_multiplicity = d.get("spin_multiplicity")
return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity) | Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object | juraj-google-style |
def configure(self, sbi_config: str):
config_dict = json.loads(sbi_config)
self.debug_stream('SBI configuration:\n%s',
json.dumps(config_dict, indent=2))
try:
sbi = Subarray(self.get_name()).configure_sbi(config_dict)
except jsonsch... | Configure an SBI for this subarray.
Args:
sbi_config (str): SBI configuration JSON
Returns:
str, | juraj-google-style |
def sanger_variants(self, institute_id=None, case_id=None):
query = {'validation': {'$exists': True}}
if institute_id:
query['institute_id'] = institute_id
if case_id:
query['case_id'] = case_id
return self.variant_collection.find(query) | Return all variants with sanger information
Args:
institute_id(str)
case_id(str)
Returns:
res(pymongo.Cursor): A Cursor with all variants with sanger activity | codesearchnet |
def bessel_k0(x, name=None):
with ops.name_scope(name, 'bessel_k0', [x]):
return gen_special_math_ops.bessel_k0(x) | Computes the Bessel k0 function of `x` element-wise.
Modified Bessel function of order 0.
It is preferable to use the numerically stabler function `k0e(x)` instead.
>>> tf.math.special.bessel_k0([0.5, 1., 2., 4.]).numpy()
array([0.92441907, 0.42102444, 0.11389387, 0.01115968], dtype=float32)
Args:
x: A `Tensor` or ... | github-repos |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
device_cache = match.get('DeviceCache', {})
for device, value in iter(device_cache.items()):
name = value.get('Name', '')
if name:
name = ''.join(('Name:', name))
event_data = plist_event.PlistTimeEventData()
... | Extracts relevant BT entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | juraj-google-style |
def _call_unittest_assertion(assertion_method, *args, msg=None, extras=None, **kwargs):
my_msg = None
try:
assertion_method(*args, **kwargs)
except AssertionError as e:
my_msg = str(e)
if msg:
my_msg = f'{my_msg} {msg}'
if my_msg is not None:
raise signals.Tes... | Wrapper for converting a unittest assertion into a Mobly one.
Args:
assertion_method: unittest.TestCase assertion method to call.
*args: Positional arguments for the assertion call.
msg: A string that adds additional info about the failure.
extras: An optional field for extra information to be included in
test result.... | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.