content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def _process_motion_command(command, opts):
"""
Process motion command.
:param command: Command tuple
:param opts: UserOptions tuple
:return:
"""
motion_data_type = None
motion_data = [] # empty data container
# Interpret linear motion command
if opts.Use_linear_motion:
motion_type = MOVE_LIN
if command.pose is not None:
motion_data.extend(_convert_pose(command.pose))
if command.configuration is not None:
if command.external_axes is not None:
motion_data_type = E6POS
external_axes = [axis if axis is not None else 0 for axis in command.external_axes]
motion_data.extend(external_axes)
else:
motion_data_type = POS
motion_data.extend(_convert_configuration(command.configuration))
else:
motion_data_type = FRAME
else:
raise ValueError('Invalid command')
# Interpret nonlinear motion command
elif opts.Use_nonlinear_motion:
if opts.Use_continuous_motion:
motion_type = MOVE_CPTP
else:
motion_type = MOVE_PTP
if command.axes is not None:
motion_data.extend(command.axes)
if command.external_axes is not None:
motion_data_type = E6AXIS
external_axes = [axis if axis is not None else 0 for axis in command.external_axes]
motion_data.extend(external_axes)
else:
motion_data_type = AXIS
elif command.pose is not None:
motion_data_type = FRAME
motion_data.extend(_convert_pose(command.pose))
else:
raise ValueError('Invalid command')
else: # User never supplied a motion type
raise ValueError('Invalid motion type')
# Format parameters into string
motion_data = [general_utils.num_to_str(d, include_sign=False, precision=3)
for d in motion_data]
# Structure and format data, command
formatted_motion_data = postproc.fill_template(
motion_data,
STRUCTURES[motion_data_type],
TEMPLATES[motion_data_type])
formatted_motion = postproc.fill_template(
formatted_motion_data,
STRUCTURES[motion_type],
TEMPLATES[motion_type])
return formatted_motion | 1aad247544414b71cb411ce5e00f1ecacefadb6b | 3,632,900 |
import re
def prepare_term_id(config, vocab_ids, term):
"""REST POST and PATCH operations require taxonomy term IDs, not term names. This
funtion checks its 'term' argument to see if it's numeric (i.e., a term ID) and
if it is, returns it as is. If it's not (i.e., a term name) it looks for the
term name in the referenced vocabulary and returns its term ID (existing or
newly created).
"""
term = str(term)
term = term.strip()
if value_is_numeric(term):
return term
# Special case: if the term starts with 'http', assume it's a Linked Data URI
# and get its term ID from the URI.
elif term.startswith('http'):
# Note: get_term_from_uri() will return False if the URI doesn't match
# a term.
tid_from_uri = get_term_id_from_uri(config, term)
if value_is_numeric(tid_from_uri):
return tid_from_uri
else:
if len(vocab_ids) == 1:
tid = create_term(config, vocab_ids[0].strip(), term.strip())
return tid
else:
# Term names used in mult-taxonomy fields. They need to be namespaced with
# the taxonomy ID.
#
# If the field has more than one vocabulary linked to it, we don't know which
# vocabulary the user wants a new term to be added to, and if the term name is
# already used in any of the taxonomies linked to this field, we also don't know
# which vocabulary to look for it in to get its term ID. Therefore, we always need
# to namespace term names if they are used in multi-taxonomy fields. If people want
# to use term names that contain a colon, they need to add them to Drupal first
# and use the term ID. Workaround PRs welcome.
#
# Split the namespace/vocab ID from the term name on ':'.
namespaced = re.search(':', term)
if namespaced:
[vocab_id, term_name] = term.split(':')
tid = create_term(config, vocab_id.strip(), term_name.strip())
return tid | a8b53d9a7c2c649482a760e3ab6fa4016cb2f9af | 3,632,901 |
import re
def simple_string(value: str) -> str:
"""
Returns a simplified value for loose comparison
"""
if not value:
return ""
value = strip_string(value) # Remove quotes
value = value.rstrip("\\") # Remove trailing backslashes
value = value.lower() # Lowercase
value = re.sub(r"[^a-z0-9]", "_", value.strip()) # Simplify
return value | 090566d21a9cfbe4dedd699aea9b332cd6af2057 | 3,632,902 |
def current_name() -> str:
"""Return the current dataset name, with an empty name in default."""
return _current_name_context.get().name | 11252f5ebbb43dae0343de877a328e04d7f4069a | 3,632,903 |
def open_zarr(path):
"""
Utility to open an xarray dataset from either dir that pytest might be called from.
If called from root the path will be different than in the test dir
"""
return xr.open_zarr(str(dir_.joinpath(path))) | 64afd33299057d65fa23535eb6ee737341e57a2e | 3,632,904 |
def iid_log_probs(ids, batch_index, sequence_index, logp):
"""
Stacks the ids into a matrix that allows you to extract the corresponding logp
from the iid samples from the decoder.
:param ids: [B,T] tensor of ids in vocab
:param batch_index: [B,T] tensor of the batch size repeated for seq len
:param sequence_index: [B,T] tensor of range(0, seq len)
:param logp: the [B,T,V] tensor of logp from the decoder (softmax of logits)
:return: the corresponding log probabilities for the sampled IDs
"""
logp_new = tf.reshape(ids, [ids.get_shape().as_list()[1]])
index_tensor = tf.stack([batch_index, sequence_index, logp_new], axis=1)
logp_out = tf.gather_nd(logp, index_tensor) # finds log probs using indexing
return logp_out | 8a18e6652881fcad79ebd58ae0fec8f35be566f0 | 3,632,905 |
import re
def get_format_from_path(path):
""" Returns tuple of format , extension, unpacking function or None"""
if re.search(r'(\.tar\.gz$)|(\.tgz$)', path):
return ('gztar', 'tgz', unpack_tar)
elif path.endswith('.zip'):
return ('zip', 'zip', unpack_zip)
elif re.search(r'(\.tar\.bz2$)|(\.tbz2$)', path):
return ('bztar', 'tbz2', unpack_tar)
elif re.search(r'(\.tar\.xz$)|(\.txz$)', path):
return ('xztar', 'txz', unpack_tar)
elif path.endswith('.tar'):
return ('tar', 'tar', unpack_tar)
else:
raise ArchiveError('Unknown archive extension for path: %s' % path) | 0d07d835a009379c598f5af840c4a0d8a7bd0ff8 | 3,632,906 |
def tag(name, open=False, **options):
"""
Returns an XHTML compliant tag of type ``name``.
``open``
Set to True if the tag should remain open
All additional keyword args become attribute/value's for the tag. To pass in Python
reserved words, append _ to the name of the key. For attributes with no value (such as
disabled and readonly), a value of True is permitted.
Examples::
>>> tag("br")
'<br />'
>>> tag("br", True)
'<br>'
>>> tag("input", type="text")
'<input type="text" />'
>>> tag("input", type='text', disabled=True)
'<input disabled="disabled" type="text" />'
"""
tag = '<%s%s%s' % (name, (options and tag_options(**options)) or '', (open and '>') or ' />')
return tag | ec110a733ca5b40ceef4d3de92c3e74dc209f19b | 3,632,907 |
import sys
def CMDarchive(parser, args):
"""Archives data to the server.
If a directory is specified, a .isolated file is created the whole directory
is uploaded. Then this .isolated file can be included in another one to run
commands.
The commands output each file that was processed with its content hash. For
directories, the .isolated generated for the directory is listed as the
directory entry itself.
"""
add_isolate_server_options(parser)
add_archive_options(parser)
options, files = parser.parse_args(args)
process_isolate_server_options(parser, options, True, True)
server_ref = isolate_storage.ServerRef(
options.isolate_server, options.namespace)
if files == ['-']:
files = (l.rstrip('\n\r') for l in sys.stdin)
if not files:
parser.error('Nothing to upload')
files = (f.decode('utf-8') for f in files)
blacklist = tools.gen_blacklist(options.blacklist)
try:
with get_storage(server_ref) as storage:
results, _cold, _hot = archive_files_to_storage(storage, files, blacklist)
except (Error, local_caching.NoMoreSpace) as e:
parser.error(e.args[0])
print('\n'.join('%s %s' % (h, f) for f, h in results.iteritems()))
return 0 | 2820bd9c4676b75d2763f38100b0d4a9249f7d20 | 3,632,908 |
import requests
import re
def get_ludo_user_id(ludo_username):
"""Returns the user id (number) for a given username in Ludopedia"""
session = requests.Session()
result = session.get(f'{LUDOPEDIA_USER_URL}/{ludo_username}')
match_id = re.search(LUDOPEDIA_USER_ID_REGEX, result.text)
if match_id:
# Return the user_id
return match_id.group(1)
return None | 53d1dc51c7020b918130ae4c978a398ba9817781 | 3,632,909 |
def operations_get_notification_list_post(inline_object19=None): # noqa: E501
"""operations_get_notification_list_post
# noqa: E501
:param inline_object19:
:type inline_object19: dict | bytes
:rtype: TapiNotificationGetNotificationList
"""
if connexion.request.is_json:
inline_object19 = InlineObject19.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 840daf5edcc30bddb4f21296d29d613971078d1a | 3,632,910 |
def project_block_to_graph(block_sizes, block_level_vals):
"""
Projects a set of values at the level of
blocks to the nodes in the graph.
"""
full_graph_values = []
for k, n in enumerate(block_sizes):
current_block = []
for q in range(n):
current_block.append(block_level_vals[k])
full_graph_values = np.concatenate((full_graph_values, current_block))
return full_graph_values | f61ed9ab8b11533ecda77db26349eae78e4db3e4 | 3,632,911 |
from ._common import generators
def _write_gener(parameters, simulator="tough"):
"""Write GENER block data."""
# Format
label_length = max(
[
len(generator["label"]) if "label" in generator else 0
for generator in parameters["generators"]
]
)
label_length = max(label_length, 5)
fmt = block_to_format["GENER"]
fmt1 = str2format(fmt[label_length])
fmt2 = str2format(fmt[0])
out = []
for v in parameters["generators"]:
# Load data
data = deepcopy(generators)
data.update(v)
# Table
ltab = 1
if data["times"] is not None and isinstance(
data["times"], (list, tuple, np.ndarray)
):
ltab = len(data["times"])
for key in ["rates", "specific_enthalpy"]:
if data[key] is not None:
if ltab == 1 and np.ndim(data[key]) == 1:
if len(data[key]) > 1:
raise ValueError()
data[key] = data[key][0]
else:
if np.ndim(data[key]) != 1:
raise TypeError()
if ltab != len(data[key]):
raise ValueError()
elif data["type"] == "DELV" and data["n_layer"]:
ltab = data["n_layer"]
else:
for key in ["rates", "specific_enthalpy"]:
if key in data and np.ndim(data[key]) > 0:
if len(data[key]) > 1:
raise ValueError()
data[key] = data[key][0]
itab = (
"1"
if isinstance(data["specific_enthalpy"], (list, tuple, np.ndarray))
else None
)
# TOUGHREACT
ktab = None
if (
data["conductivity_times"] is not None
and data["conductivity_factors"] is not None
):
ktab = len(data["conductivity_times"])
if len(data["conductivity_factors"]) != ktab:
raise ValueError()
# Record 1
values = [
data["label"] if "label" in data else "",
data["name"],
data["nseq"],
data["nadd"],
data["nads"],
ltab if ltab > 1 else None,
None,
data["type"],
itab,
None if ltab > 1 and data["type"] != "DELV" else data["rates"],
None if ltab > 1 and data["type"] != "DELV" else data["specific_enthalpy"],
data["layer_thickness"],
ktab,
]
out += write_record(values, fmt1)
if ltab > 1 and data["type"] != "DELV":
# Record 2
out += write_record(data["times"], fmt2, multi=True)
# Record 3
out += write_record(data["rates"], fmt2, multi=True)
# Record 4
if data["specific_enthalpy"] is not None:
if isinstance(data["specific_enthalpy"], (list, tuple, np.ndarray)):
specific_enthalpy = data["specific_enthalpy"]
else:
specific_enthalpy = np.full(ltab, data["specific_enthalpy"])
out += write_record(specific_enthalpy, fmt2, multi=True)
# TOUGHREACT
if ktab:
out += write_record(data["conductivity_times"], fmt2, multi=True)
out += write_record(data["conductivity_factors"], fmt2, multi=True)
return out | f014c9157bd93bf243a35b3b94b268771b611aa6 | 3,632,912 |
def _find(condition):
"""Returns indices where ravel(a) is true.
Private implementation of deprecated matplotlib.mlab.find
"""
return np.nonzero(np.ravel(condition))[0] | 34f0bbeda3c7e8309ab990579d2a57014c78c93e | 3,632,913 |
def get_weekends():
"""
Gets weekends from user input
"""
user_reply = input("What days of the week are your weekends? ")
days = weekday_dict.keys()
ret = []
for day in days:
if day.lower() in user_reply.lower():
ret.append(day)
if confirm_input(ret):
return ret
else:
return get_weekends() | 152c90a12fa11915ec553df8e722a9f50279963e | 3,632,914 |
def get_non_utf8_tables_columns(mysql_db_name, mysql_username, mysql_password):
"""Return two lists: the names of tables and columns that do not use the UTF-8 character set."""
sqlalchemy_url = 'mysql://%s:%s@localhost:3306/information_schema' % (mysql_username, mysql_password)
info_schema_engine = create_engine(sqlalchemy_url)
tables_table = Table('TABLES', meta, autoload=True, autoload_with=info_schema_engine)
columns_table = Table('COLUMNS', meta, autoload=True, autoload_with=info_schema_engine)
select = tables_table.select().\
where(tables_table.c.TABLE_SCHEMA == bindparam('mysql_db_name')).\
where(tables_table.c.TABLE_COLLATION != 'utf8_general_ci')
non_utf8_tables = [row['TABLE_NAME'] for row in
info_schema_engine.execute(select, {'mysql_db_name': mysql_db_name}).fetchall()]
select = columns_table.select().\
where(columns_table.c.TABLE_SCHEMA == bindparam('mysql_db_name')).\
where(columns_table.c.COLLATION_NAME != 'utf8_general_ci')
non_utf8_columns = [row['COLUMN_NAME'] for row in
info_schema_engine.execute(select, {'mysql_db_name': mysql_db_name}).fetchall()]
return non_utf8_tables, non_utf8_columns | 15452ff9737d765b3175c50a4758269b27c6da80 | 3,632,915 |
import os
import time
import urllib
import base64
import shutil
import hashlib
import warnings
def _fetch_file(url, data_dir=TEMP, uncompress=False, move=False,md5sum=None,
username=None, password=None, mock=False, handlers=[], resume=True, verbose=0):
"""Load requested dataset, downloading it if needed or requested.
This function retrieves files from the hard drive or download them from
the given urls. Note to developpers: All the files will be first
downloaded in a sandbox and, if everything goes well, they will be moved
into the folder of the dataset. This prevents corrupting previously
downloaded data. In case of a big dataset, do not hesitate to make several
calls if needed.
Parameters
----------
dataset_name: string
Unique dataset name
resume: bool, optional
If true, try to resume partially downloaded files
uncompress: bool, optional
If true, will uncompress zip
move: str, optional
If True, will move downloaded file to given relative path.
NOTE: common usage is zip_file_id/zip_file.zip together
with uncompress set to True
md5sum: string, optional
MD5 sum of the file. Checked if download of the file is required
username: string, optional
Username used for basic HTTP authentication
password: string, optional
Password used for basic HTTP authentication
handlers: list of BaseHandler, optional
urllib handlers passed to urllib.request.build_opener. Used by
advanced users to customize request handling.
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
resume: bool, optional
If true, try resuming download if possible
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
files: list of string
Absolute paths of downloaded files on disk
"""
# TODO: move to global scope and rename
def _fetch_helper(url, data_dir=TEMP, resume=True, overwrite=False,
md5sum=None, username=None, password=None, handlers=[],
verbose=1):
if not os.path.isabs(data_dir):
data_dir = _get_dataset_dir(data_dir)
# Determine data path
_makedirs(data_dir)
# Determine filename using URL
parse = _urllib.parse.urlparse(url)
file_name = os.path.basename(parse.path)
if file_name == '':
file_name = md5_hash(parse.path)
temp_file_name = file_name + ".part"
full_name = os.path.join(data_dir, file_name)
temp_full_name = os.path.join(data_dir, temp_file_name)
if os.path.exists(full_name):
if overwrite:
os.remove(full_name)
else:
return full_name
if os.path.exists(temp_full_name):
if overwrite:
os.remove(temp_full_name)
t0 = time.time()
local_file = None
initial_size = 0
try:
# Download data
url_opener = urllib.request.build_opener(*handlers)
request = urllib.request.Request(url)
request.add_header('Connection', 'Keep-Alive')
if username is not None and password is not None:
if not url.startswith('https'):
raise ValueError(
'Authentication was requested on a non secured URL (%s).'
'Request has been blocked for security reasons.' % url)
# Note: HTTPBasicAuthHandler is not fitted here because it relies
# on the fact that the server will return a 401 error with proper
# www-authentication header, which is not the case of most
# servers.
encoded_auth = base64.b64encode(
(username + ':' + password).encode())
request.add_header(b'Authorization', b'Basic ' + encoded_auth)
if verbose > 0:
displayed_url = url.split('?')[0] if verbose == 1 else url
print('Downloading data from %s ...' % displayed_url)
if resume and os.path.exists(temp_full_name):
# Download has been interrupted, we try to resume it.
local_file_size = os.path.getsize(temp_full_name)
# If the file exists, then only download the remainder
request.add_header("Range", "bytes=%s-" % (local_file_size))
try:
data = url_opener.open(request)
content_range = data.info().get('Content-Range')
if (content_range is None or not content_range.startswith(
'bytes %s-' % local_file_size)):
raise IOError('Server does not support resuming')
except Exception:
# A wide number of errors can be raised here. HTTPError,
# URLError... I prefer to catch them all and rerun without
# resuming.
if verbose > 0:
print('Resuming failed, try to download the whole file.')
return _fetch_helper(
url, data_dir, resume=False, overwrite=overwrite,
md5sum=md5sum, username=username, password=password,
handlers=handlers, verbose=verbose)
local_file = open(temp_full_name, "ab")
initial_size = local_file_size
else:
data = url_opener.open(request)
local_file = open(temp_full_name, "wb")
_chunk_read_(data, local_file, report_hook=(verbose > 0),
initial_size=initial_size, verbose=verbose)
# temp file must be closed prior to the move
if not local_file.closed:
local_file.close()
shutil.move(temp_full_name, full_name)
dt = time.time() - t0
if verbose > 0:
print('...done. (%i seconds, %i min)' % (dt, dt // 60))
except HTTPError as e:
if verbose > 0:
print('Error while fetching file %s. Dataset fetching aborted.' %
(file_name))
if verbose > 1:
print("HTTP Error: %s, %s" % (e, url))
raise
except _urllib.error.URLError as e:
if verbose > 0:
print('Error while fetching file %s. Dataset fetching aborted.' %
(file_name))
if verbose > 1:
print("URL Error: %s, %s" % (e, url))
raise
finally:
if local_file is not None:
if not local_file.closed:
local_file.close()
if md5sum is not None:
if (_md5_sum_file(full_name) != md5sum):
raise ValueError("File %s checksum verification has failed."
" Dataset fetching aborted." % local_file)
return full_name
if not os.path.isabs(data_dir):
data_dir = _get_dataset_dir(data_dir)
# There are two working directories here:
# - data_dir is the destination directory of the dataset
# - temp_dir is a temporary directory dedicated to this fetching call. All
# files that must be downloaded will be in this directory. If a corrupted
# file is found, or a file is missing, this working directory will be
# deleted.
parse = _urllib.parse.urlparse(url)
file_name = os.path.basename(parse.path)
files_pickle = cPickle.dumps([(file_, url) for file_, url in zip([file_name], [url])])
files_md5 = hashlib.md5(files_pickle).hexdigest()
temp_dir = os.path.join(data_dir, files_md5)
# Create destination dir
_makedirs(data_dir)
# Abortion flag, in case of error
abort = None
# 2 possibilities:
# - the file exists in data_dir, nothing to do (we have to account for move parameter here)
# - the file does not exists: we download it in temp_dir
# Target file in the data_dir
target_file = os.path.join(data_dir, file_name)
# Change move so we always uncompress to some folder (this is important for
# detecting already downloaded files)
# Ex. glove.4B.zip -> glove.4B/glove.4B.zip
if uncompress and not move:
dirname, _ = os.path.splitext(file_name)
move = os.path.join(dirname, os.path.basename(file_name))
if (abort is None
and not os.path.exists(target_file)
and (not move or (move and uncompress and not os.path.exists(os.path.dirname(os.path.join(data_dir, move)))))
or (move and not uncompress and not os.path.exists(os.path.join(data_dir, move)))):
# Target file in temp dir
temp_target_file = os.path.join(temp_dir, file_name)
# We may be in a global read-only repository. If so, we cannot
# download files.
if not os.access(data_dir, os.W_OK):
raise ValueError('Dataset files are missing but dataset'
' repository is read-only. Contact your data'
' administrator to solve the problem')
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
dl_file = _fetch_helper(url, temp_dir, resume=resume,
verbose=verbose, md5sum=md5sum,
username=username,
password=password,
handlers=handlers)
if (abort is None and not os.path.exists(target_file) and not
os.path.exists(temp_target_file)):
if not mock:
warnings.warn('An error occured while fetching %s' % file_)
abort = ("Dataset has been downloaded but requested file was "
"not provided:\nURL:%s\nFile:%s" %
(url, target_file))
else:
_makedirs(os.path.dirname(temp_target_file))
open(temp_target_file, 'w').close()
if move:
move = os.path.join(data_dir, move)
move_dir = os.path.dirname(move)
_makedirs(move_dir)
shutil.move(dl_file, move)
dl_file = move
target_file = dl_file
if uncompress:
try:
if os.path.getsize(dl_file) != 0:
_uncompress_file(dl_file, verbose=verbose)
else:
os.remove(dl_file)
target_file = os.path.dirname(target_file)
except Exception as e:
abort = str(e)
else:
if verbose > 0:
print("File already downloaded, skipping")
if move:
target_file = os.path.join(data_dir, move)
if uncompress:
target_file = os.path.dirname(target_file)
if abort is not None:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
raise IOError('Fetching aborted: ' + abort)
# If needed, move files from temps directory to final directory.
if os.path.exists(temp_dir):
# XXX We could only moved the files requested
# XXX Movetree can go wrong
movetree(temp_dir, data_dir)
shutil.rmtree(temp_dir)
return target_file | 97950d9109c4644accbd691a3cba1e5294d399b5 | 3,632,916 |
def dataframe_from_dictionary(entry):
"""Create Pandas DataFrame from list of dictionary."""
return pd.DataFrame(entry) | e3d62626575aa5c685945b94395c74056bdd7d6a | 3,632,917 |
def label_skew_process(label_vocab, label_assignment, client_num, alpha, data_length):
"""
params
-------------------------------------------------------------------
label_vocab : dict label vocabulary of the dataset
label_assignment : 1d list a list of label, the index of list is the index associated to label
client_num : int number of clients
alpha : float similarity of each client, the larger the alpha the similar data for each client
-------------------------------------------------------------------
return
------------------------------------------------------------------
partition_result : 2d array list of partition index of each client
------------------------------------------------------------------
"""
label_index_matrix = [[] for _ in label_vocab]
label_proportion = []
partition_result = [[] for _ in range(client_num)]
client_length = 0
print("client_num", client_num)
# shuffle indexs and calculate each label proportion of the dataset
for index, value in enumerate(label_vocab):
label_location = np.where(label_assignment == value)[0]
label_proportion.append(len(label_location) / data_length)
np.random.shuffle(label_location)
label_index_matrix[index].extend(label_location[:])
print("proportion", label_proportion)
# calculate size for each partition client
label_index_tracker = np.zeros(len(label_vocab), dtype=int)
total_index = data_length
each_client_index_length = int(total_index / client_num)
print("each index length", each_client_index_length)
client_dir_dis = np.array([alpha * l for l in label_proportion])
print("alpha", alpha)
print("client dir dis", client_dir_dis)
proportions = np.random.dirichlet(client_dir_dis)
print("dir distribution", proportions[0])
# add all the unused data to the client
for client_id in range(len(partition_result)):
each_client_partition_result = partition_result[client_id]
proportions = np.random.dirichlet(client_dir_dis)
print(client_id, proportions)
print(type(proportions[0]))
while True in np.isnan(proportions):
proportions = np.random.dirichlet(client_dir_dis)
client_length = min(each_client_index_length, total_index)
if total_index < client_length * 2:
client_length = total_index
total_index -= client_length
client_length_pointer = client_length
# for each label calculate the offset length assigned to by Dir distribution and then extend assignment
for label_id, _ in enumerate(label_vocab):
offset = round(proportions[label_id] * client_length)
if offset >= client_length_pointer:
offset = client_length_pointer
client_length_pointer = 0
else:
if label_id == (len(label_vocab) - 1):
offset = client_length_pointer
client_length_pointer -= offset
start = int(label_index_tracker[label_id])
end = int(label_index_tracker[label_id] + offset)
label_data_length = len(label_index_matrix[label_id])
# if the the label is assigned to a offset length that is more than what its remaining length
if end > label_data_length:
each_client_partition_result.extend(
label_index_matrix[label_id][start:]
)
label_index_tracker[label_id] = label_data_length
label_index_offset = dynamic_batch_fill(
label_index_tracker,
label_index_matrix,
end - label_data_length,
label_id,
)
for fill_label_id in label_index_offset.keys():
start = label_index_tracker[fill_label_id]
end = (
label_index_tracker[fill_label_id]
+ label_index_offset[fill_label_id]
)
each_client_partition_result.extend(
label_index_matrix[fill_label_id][start:end]
)
label_index_tracker[fill_label_id] = (
label_index_tracker[fill_label_id]
+ label_index_offset[fill_label_id]
)
else:
each_client_partition_result.extend(
label_index_matrix[label_id][start:end]
)
label_index_tracker[label_id] = label_index_tracker[label_id] + offset
# if last client still has empty rooms, fill empty rooms with the rest of the unused data
if client_id == len(partition_result) - 1:
print("last id length", len(each_client_partition_result))
print("Last client fill the rest of the unfilled lables.")
for not_fillall_label_id in range(len(label_vocab)):
if label_index_tracker[not_fillall_label_id] < len(
label_index_matrix[not_fillall_label_id]
):
print("fill more id", not_fillall_label_id)
start = label_index_tracker[not_fillall_label_id]
each_client_partition_result.extend(
label_index_matrix[not_fillall_label_id][start:]
)
label_index_tracker[not_fillall_label_id] = len(
label_index_matrix[not_fillall_label_id]
)
partition_result[client_id] = each_client_partition_result
return partition_result | f63cad40d07f7f188f61b127b6af67a66bccbd5b | 3,632,918 |
def can_item_circulate(item_pid):
"""Return True if Item can circulate."""
item = Item.get_record_by_pid(item_pid)
if item:
return item["status"] == "CAN_CIRCULATE"
return False | 9cf6c5cca54849d030b32e6caffd5ce2a74dcd32 | 3,632,919 |
def GetFocusThreadPB(filename):
"""
Get the focus thread for a pinball.
If the pinball log file format is version 2.4, or lower, the focus thread
info will be in a *.result file. However, as of version 2.5, this info is
now in the *.global.log file.
@return integer with focus thread
@return -1 if no focus thread found
"""
# First look in the *.result file
#
field = FindResultString(filename, 'focus_thread:')
if not field[0]:
# Then look in the *.global.result file
#
if '.global.log' not in filename:
filename += '.global.log'
field = FindString(filename, 'focus_thread:')
ft = field[0]
if ft:
ft = int(ft)
else:
ft = -1
return ft | a8b0ea399f8ecf9dcf8b5a4e2ffdc1ba6805b5a9 | 3,632,920 |
def refpoint(matrix, objectives, weights):
"""Execute reference point MOORA without any validation."""
# max and min reference points
rpmax = np.max(matrix, axis=0)
rpmin = np.min(matrix, axis=0)
# merge two reference points acoording objectives
mask = np.where(objectives == Objective.MAX.value, objectives, 0)
reference_point = np.where(mask, rpmax, rpmin)
# create rank matrix
rank_mtx = np.max(np.abs(weights * (matrix - reference_point)), axis=1)
score = np.squeeze(np.asarray(rank_mtx))
return rank.rank_values(score), score, reference_point | da82d2350cb205ece9d59651bc7142653252495f | 3,632,921 |
from typing import Optional
from typing import List
def init_fabric_device_step1(device_id: int, new_hostname: str, device_type: str,
neighbors: Optional[List[str]] = [],
job_id: Optional[str] = None,
scheduled_by: Optional[str] = None) -> NornirJobResult:
"""Initialize fabric (CORE/DIST) device for management by CNaaS-NMS.
Args:
device_id: Device to select for initialization
new_hostname: Hostname to configure on this device
device_type: String representing DeviceType
neighbors: Optional list of hostnames of peer devices
job_id: job_id provided by scheduler when adding job
scheduled_by: Username from JWT.
Returns:
Nornir result object
Raises:
DeviceStateException
ValueError
"""
logger = get_logger()
if DeviceType.has_name(device_type):
devtype = DeviceType[device_type]
else:
raise ValueError("Invalid 'device_type' provided")
if devtype not in [DeviceType.CORE, DeviceType.DIST]:
raise ValueError("Init fabric device requires device type DIST or CORE")
with sqla_session() as session:
dev = pre_init_checks(session, device_id)
# Test update of linknets using LLDP data
linknets = update_linknets(
session, dev.hostname, devtype, ztp_hostname=new_hostname, dry_run=True)
try:
verified_neighbors = pre_init_check_neighbors(
session, dev, devtype, linknets, neighbors)
logger.debug("Found valid neighbors for INIT of {}: {}".format(
new_hostname, ", ".join(verified_neighbors)
))
check_neighbor_sync(session, verified_neighbors)
except Exception as e:
raise e
dev.device_type = devtype
session.commit()
# If neighbor check works, commit new linknets
# This will also mark neighbors as unsynced
linknets = update_linknets(
session, dev.hostname, devtype, ztp_hostname=new_hostname, dry_run=False)
logger.debug("New linknets for INIT of {} created: {}".format(
new_hostname, linknets
))
# Select and reserve a new management and infra IP for the device
ReservedIP.clean_reservations(session, device=dev)
session.commit()
mgmt_ip = cnaas_nms.confpush.underlay.find_free_mgmt_lo_ip(session)
infra_ip = cnaas_nms.confpush.underlay.find_free_infra_ip(session)
reserved_ip = ReservedIP(device=dev, ip=mgmt_ip)
session.add(reserved_ip)
dev.infra_ip = infra_ip
session.commit()
mgmt_variables = {
'mgmt_ipif': str(IPv4Interface('{}/32'.format(mgmt_ip))),
'mgmt_ip': str(mgmt_ip),
'mgmt_prefixlen': 32,
'infra_ipif': str(IPv4Interface('{}/32'.format(infra_ip))),
'infra_ip': str(infra_ip),
}
device_variables = populate_device_vars(session, dev, new_hostname, devtype)
device_variables = {
**device_variables,
**mgmt_variables
}
# Update device state
dev.hostname = new_hostname
session.commit()
hostname = dev.hostname
nr = cnaas_nms.confpush.nornir_helper.cnaas_init()
nr_filtered = nr.filter(name=hostname)
# step2. push management config
nrresult = nr_filtered.run(task=push_base_management,
device_variables=device_variables,
devtype=devtype,
job_id=job_id)
with sqla_session() as session:
dev = session.query(Device).filter(Device.id == device_id).one()
dev.management_ip = mgmt_ip
dev.state = DeviceState.INIT
# Remove the reserved IP since it's now saved in the device database instead
reserved_ip = session.query(ReservedIP).filter(ReservedIP.device == dev).one_or_none()
if reserved_ip:
session.delete(reserved_ip)
# Plugin hook, allocated IP
try:
pmh = PluginManagerHandler()
pmh.pm.hook.allocated_ipv4(vrf='mgmt', ipv4_address=str(mgmt_ip),
ipv4_network=None,
hostname=hostname
)
except Exception as e:
logger.exception("Error while running plugin hooks for allocated_ipv4: ".format(str(e)))
# step3. resync neighbors
scheduler = Scheduler()
sync_nei_job_id = scheduler.add_onetime_job(
'cnaas_nms.confpush.sync_devices:sync_devices',
when=1,
scheduled_by=scheduled_by,
kwargs={'hostnames': verified_neighbors, 'dry_run': False})
logger.info(f"Scheduled job {sync_nei_job_id} to resynchronize neighbors")
# step4. register apscheduler job that continues steps
scheduler = Scheduler()
next_job_id = scheduler.add_onetime_job(
'cnaas_nms.confpush.init_device:init_device_step2',
when=60,
scheduled_by=scheduled_by,
kwargs={'device_id': device_id, 'iteration': 1})
logger.info("Init step 2 for {} scheduled as job # {}".format(
new_hostname, next_job_id
))
return NornirJobResult(
nrresult=nrresult,
next_job_id=next_job_id
) | f7f74578d62d0097c481ea239ec0d3ddeb1de949 | 3,632,922 |
def architecture_is_32bit(arch):
"""
Check if the architecture specified in *arch* is 32-bit.
:param str arch: The value to check.
:rtype: bool
"""
return bool(arch.lower() in ('i386', 'i686', 'x86')) | a0cfaef4b03bc8cf335f0d19a3e46457db7574a9 | 3,632,923 |
def solve_captcha():
"""request the captcha solving from the website 2captcha.com"""
# Uses the API Key stored in the .env file (If you are not the developer you need to insert it)
load_dotenv()
data_sitekey = getenv('DATA_SITEKEY')
cap_key = getenv('CAP_KEY')
if cap_key == '' or data_sitekey == '':
return 'MISSING_KEY'
url = 'https://2captcha.com/in.php?' \
'key=' + cap_key + \
'&method=userrecaptcha' \
'&pageurl=https://www.inipec.gov.it/cerca-pec/-/pecs/companies' \
'&googlekey=' + data_sitekey
# the site will return the id of the captcha element
response = request.urlopen(url).read().decode("ascii")
if response[0:5] == 'ERROR':
return 'MISSING_KEY'
captcha_id = response.split("|")[1]
# Actual solving of the captcha
url = 'https://2captcha.com/res.php?' \
'key=' + cap_key + \
'&action=get' \
'&id=' + captcha_id
token = request.urlopen(url).read().decode('ascii')
# The computing will take a while so I request the result every 5 second
while token == 'CAPCHA_NOT_READY':
sleep(5)
# print("still waiting") use this to be sure that it is still waiting for the answer
token = request.urlopen(url).read().decode('ascii')
print('Captcha token obtained: ' + token[0:20] + '...')
return token.split("|")[1] | 8c925475b75d4cd9fa9fb1c6d2118a3771732a89 | 3,632,924 |
def StandardMajScale(frequency):
"""Takes one arguement, frequency. Returns an array of 8 frequencies from 12 TET chromatic scale"""
EightSteps = []
freqArray = Create12TETChromatic(frequency)
steps = (0, 2, 4, 5, 7, 9, 11, 12)
for i in steps:
scale = freqArray[i]
EightSteps.append(scale)
return EightSteps | 094949fe9188f50a1fea203ab977a1e740163ab6 | 3,632,925 |
def interpolate_observing_conditions(
timestamps: np.ndarray,
df: pd.DataFrame,
parameter_key: str,
) -> np.ndarray:
"""
Take the values of the observing conditions in the data frame
``df`` and interpolate them temporally so that we get values for
the timestamp of each frame.
The interpolation procedure is based on Cubic splines.
See https://stats.stackexchange.com/a/511394 for the original idea.
Args:
timestamps: A 1D numpy array of floats, containing the UTC
timestamps of the frames in the stack.
df: A data frame containing the result from querying one of
the ESO archives (e.g., :func:`query_meteo`).
parameter_key: The key under which a parameter is available from
the respective archive. (See also :func:`query_archive`).
Returns:
A 1D numpy array with shape `(n_frames, )` which contains an
interpolated value of the target parameter for every frame.
"""
# Define shortcuts
avg = df[parameter_key].values
x = df['timestamp'].values
dx = x[1] - x[0]
# Remove NaNs, because they break the spline interpolation
nan_idx = np.isnan(avg)
avg = avg[~nan_idx]
x = x[~nan_idx]
# Compute y, which is essentially the cumulative sum of the parameter
y = np.zeros(len(x))
for i in range(1, len(x)):
y[i] = y[i - 1] + avg[i - 1] * (x[i] - x[i - 1])
# Set up an interpolation using splines. We use the first derivative
# here, because the cumulative sum above is basically an integral.
interpolator = CubicSpline(x, y).derivative(1)
# Evaluate the interpolator at the time of each frame. The additional
# "+ dx" seems necessary based on visual comparison of the original
# and the interpolated time series?
return np.asarray(interpolator(timestamps + dx)) | 8e7385f2d265203454d8e7503691820b9695f985 | 3,632,926 |
def _mn_min_ ( self ,
maxcalls = 5000 ,
tolerance = 0.01 ,
method = 'MIGRADE' ) :
"""Perform the actual MINUIT minimization:
>>> m = ... #
>>> m.fit() ## run migrade!
>>> m.migrade () ## ditto
>>> m.fit ( method = 'MIN' )
"""
#
return _mn_exec_ ( self , method , maxcalls , tolerance ) | 138fc2dd31e85836ba0e5b4a62f5749fb8aad85c | 3,632,927 |
def old_func4(self, x):
"""Summary.
Further info.
"""
return x | 7417bc8b52ec36a510a73cc8669a92b3603e6169 | 3,632,928 |
def sensible_pname(egg_name):
"""Guess Debian package name from Egg name."""
egg_name = safe_name(egg_name).replace('_', '-')
if egg_name.startswith('python-'):
egg_name = egg_name[7:]
return "python-%s" % egg_name.lower() | 3b7446bcae90c249104431d56a4025efcbe993db | 3,632,929 |
def shuffle_split_data(X, y):
""" Shuffles and splits data into 70% training and 30% testing subsets,
then returns the training and testing subsets. """
# Shuffle and split the data
X_train, X_test, y_train, y_test = crossval.train_test_split(X, y, test_size=0.30, random_state=101)
# Return the training and testing data subsets
return X_train, y_train, X_test, y_test | 03527b0bd24ed4b642ca223dde3faba4a4a8cea2 | 3,632,930 |
import copy
def get_agent_params():
"""Gets parameters passed to the agent via kernel cmdline or vmedia.
Parameters can be passed using either the kernel commandline or through
virtual media. If boot_method is vmedia, merge params provided via vmedia
with those read from the kernel command line.
Although it should never happen, if a variable is both set by vmedia and
kernel command line, the setting in vmedia will take precedence.
:returns: a dict of potential configuration parameters for the agent
"""
# Check if we have the parameters cached
params = _get_cached_params()
if not params:
params = _read_params_from_file('/proc/cmdline')
# If the node booted over virtual media, the parameters are passed
# in a text file within the virtual media floppy.
if params.get('boot_method') == 'vmedia':
vmedia_params = _get_vmedia_params()
params.update(vmedia_params)
# Cache the parameters so that it can be used later on.
_set_cached_params(params)
# Check to see if any deprecated parameters have been used
deprecated_params = {'lldp-timeout': 'ipa-lldp-timeout'}
for old_param, new_param in deprecated_params.items():
if params.get(old_param) is not None:
LOG.warning("The parameter '%s' has been deprecated. Please "
"use %s instead.", old_param, new_param)
return copy.deepcopy(params) | 283f9e881587aff4838e81c06dccdd0e90575d5b | 3,632,931 |
def wrap_maya_ui(mayaname):
"""Given the name of a Maya UI element of any type,
return the corresponding QWidget or QAction.
If the object does not exist, returns None
:param mayaname: the maya ui element
:type mayaname: str
:returns: the wraped object
:rtype: QObject | None
:raises: None
"""
ptr = apiUI.MQtUtil.findControl(mayaname)
if ptr is None:
ptr = apiUI.MQtUtil.findLayout(mayaname)
if ptr is None:
ptr = apiUI.MQtUtil.findMenuItem(mayaname)
if ptr is not None:
return wrap(long(ptr)) | 341ad84f85806070202df3a2e20bdc823b6a3608 | 3,632,932 |
def FloatSpin(parent, value=0, action=None, tooltip=None,
size=(100, -1), digits=1, increment=1, **kws):
"""FloatSpin with action and tooltip"""
if value is None:
value = 0
fs = fspin.FloatSpin(parent, -1, size=size, value=value,
digits=digits, increment=increment, **kws)
if action is not None:
fs.Bind(fspin.EVT_FLOATSPIN, action)
if tooltip is not None:
fs.SetToolTip(tooltip)
return fs | 45f515195ab209f19b59ba1d44693d97ff45cfab | 3,632,933 |
def generate_config(context):
""" Entry point for the deployment resources. """
resources = []
project_id = context.env['project']
bucket_name = context.properties.get('name') or context.env['name']
# output variables
bucket_selflink = '$(ref.{}.selfLink)'.format(bucket_name)
bucket_uri = 'gs://' + bucket_name + '/'
bucket = {
'name': bucket_name,
'type': 'storage.v1.bucket',
'properties': {
'project': project_id,
'name': bucket_name
}
}
optional_props = [
'location',
'versioning',
'storageClass',
'predefinedAcl',
'predefinedDefaultObjectAcl',
'logging',
'lifecycle',
'labels',
'website'
]
for prop in optional_props:
if prop in context.properties:
bucket['properties'][prop] = context.properties[prop]
resources.append(bucket)
# If IAM policy bindings are defined then those bindings need to be applied
storage_provider_type = 'gcp-types/storage-v1:storage.buckets.setIamPolicy'
bindings = context.properties.get('bindings', [])
if bindings:
iam_policy = {
'name': bucket_name + '-iampolicy',
'action': (storage_provider_type),
'properties':
{
'bucket': '$(ref.' + bucket_name + '.name)',
'project': project_id,
'bindings': bindings
}
}
resources.append(iam_policy)
return {
'resources':
resources,
'outputs':
[
{
'name': 'storageBucketSelfLink',
'value': bucket_selflink
},
{
'name': 'storageBucketURL',
'value': bucket_uri
}
]
} | 5ceca9cf90b5435368ffdb9bdcf1532eec31ec64 | 3,632,934 |
def get_vaccine_admin_summary():
"""Returns DataFrame about COVID-19 vaccine administration in Italy (summary version)
Parameters
----------
None
Raises
------
ItaCovidLibConnectionError
Raised when there are issues with Internet connection.
Returns
-------
pandas.core.frame.DataFrame
Pandas DataFrame with requested data.
DataFrame Columns
-----------------
date : datetime (index)
Date of administration
region_code : str
Region code
total : int64
Total amount of doses
males : int64
Number of male individuals who have been given a vaccine dose
females : int64
Number of female individuals who have been given a vaccine dose
first_dose : int64
Number of first doses (excluding previously infected individuals)
second_dose : int64
Number of second doses
previously_infected : int64
Number of vaccine administrations to individuals who have already been infected by SARS-CoV-2 between 3 and 6 months before and as such completing the vaccination cycle with just one dose
extra_dose : int64
Number of extra doses administered to individuals requiring it
booster_dose : int64
Number of booster doses administered to individuals requiring it
NUTS1_code : str
European classification of territorial units NUTS: level NUTS1
NUTS2_code : str
European classification of territorial units NUTS: level NUTS2
ISTAT_region_code : int64
ISTAT region code
region : str
Official region name
See Also
--------
get_vaccine_admin : a complete version of this function with more data"""
data = icl_b._get("https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/master/dati/somministrazioni-vaccini-summary-latest.csv")
if data is not None:
# column names must be translated from Italian
data.rename(columns={"data_somministrazione":"date","area":"region_code","totale":"total","sesso_maschile":"males","sesso_femminile":"females","prima_dose":"first_dose","seconda_dose":"second_dose","pregressa_infezione":"previously_infected","dose_aggiuntiva":"extra_dose","dose_booster":"booster_dose","codice_NUTS1":"NUTS1_code","codice_NUTS2":"NUTS2_code","codice_regione_ISTAT":"ISTAT_region_code","nome_area":"region"}, inplace=True)
# dates in column date must be parsed into datetime objects
data["date"] = pd.to_datetime(data["date"])
# for proper indexing
data.sort_values(by="date", inplace=True)
data.set_index("date", inplace=True)
return data | 5296d24d2926599aa8168e89137b1f1f834e6e56 | 3,632,935 |
from typing import IO
def _get_atlassian_plugin_xml_from_jar_bytes(jar_bytes: IO[bytes]) -> str:
"""Opens the jar on the provided path and tries to find the
atlassian-plugin.xml in this file
Args:
path (pathlib.Param): the path to the jar file
Returns:
str: the content of atlassian_plugin.xml
Raises:
FileNotFoundError: If the file of path is not found
zipfile.BadZipFile: If the provided file of path is not a zip file
KeyError: If no atlassian_plugin.xml is existing inside the zip/jar
"""
with ZipFile(jar_bytes) as jar:
with jar.open("atlassian-plugin.xml") as atlassian_plugin_xml:
return atlassian_plugin_xml.read() | 550120b6fad9f70dda6f101d5edbb930d1826590 | 3,632,936 |
def get_objects(si, args):
"""
Return a dict containing the necessary objects for deployment.
"""
# Get datacenter object.
datacenter_list = si.content.rootFolder.childEntity
if args.datacenter_name:
datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)
else:
datacenter_obj = datacenter_list[0]
# Get datastore object.
datastore_list = datacenter_obj.datastoreFolder.childEntity
if args.datastore_name:
datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)
elif len(datastore_list) > 0:
datastore_obj = datastore_list[0]
else:
print("No datastores found in DC (%s)." % datacenter_obj.name)
exit(1)
# Get cluster object.
cluster_list = datacenter_obj.hostFolder.childEntity
if args.cluster_name:
cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)
elif len(cluster_list) > 0:
cluster_obj = cluster_list[0]
else:
print("No clusters found in DC (%s)." % datacenter_obj.name)
exit(1)
# Generate resource pool.
resource_pool_obj = cluster_obj.resourcePool
return {"datacenter": datacenter_obj,
"datastore": datastore_obj,
"resource pool": resource_pool_obj} | 93f7e036523245d3a2d07c3e7bda9ca177dd38ab | 3,632,937 |
def isPulledMayaReference(dagPath):
"""
Verifies if the DAG path refers to a pulled prim that is a Maya reference.
"""
_, _, _, prim = getPulledInfo(dagPath)
return prim and prim.GetTypeName() == 'MayaReference' | 86bab6b200b55b58c5968b61cfffcbf61c0ece75 | 3,632,938 |
import re
def _read_record7(fid, key1, key2, line, data):
"""
Saves metadata to ``data.stations[key]`` and ``data.recording[key]`` that
is used to preallocate arrays for data recording for ``fort.7#`` type
ADCIRC output files
:param fid: :class:``file`` object
:param string key1: ADCIRC Output File Type sans ``.``
:param string key2: ADCIRC Output File Type sans ``.``
:param line: array of parameters read from ``fort.15`` file
:type line: :class:``numpy.ndarray``
:param data: object to store mesh specific data
:type data: :class:``~polyadcirc.run_framework.domain``
:rtype: string
:returns: station type description
"""
nout, touts, toutf, nspool = np.fromstring(line[0].strip(), sep=' ')
touts = max(touts, data.time.statim)
toutf = min(toutf, data.time.rnday+data.time.statim)
description = None
if nout != 0 and nspool != 0:
total_obs = int((toutf - touts) * 24.0 * 60 * 60 / data.time.dt/ nspool)
else:
total_obs = 0
if filetype[key1][0]:
line = fid.readline().partition('!')
meas_locs = int(line[0].strip())
stations = []
description = line[-1]
for i in xrange(meas_locs):
line = fid.readline()
line = line.partition('!')
line = re.findall(r"[-*\d\.\d]+", line[0].strip())
stations.append(basic.location(float(line[0]),
float(line[-1])))
data.stations[key1] = stations
data.stations[key2] = stations
else:
meas_locs = data.node_num
data.recording[key1] = (meas_locs, total_obs, filetype[key1][1])
data.recording[key2] = (meas_locs, total_obs, filetype[key2][1])
return description | b50e6aa6dcec631ec45ff905557f1dddea5a9b8b | 3,632,939 |
def tw_mock():
"""Returns a mock terminal writer"""
class TWMock:
WRITE = object()
def __init__(self):
self.lines = []
self.is_writing = False
def sep(self, sep, line=None):
self.lines.append((sep, line))
def write(self, msg, **kw):
self.lines.append((TWMock.WRITE, msg))
def _write_source(self, lines, indents=()):
if not indents:
indents = [""] * len(lines)
for indent, line in zip(indents, lines):
self.line(indent + line)
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
def get_write_msg(self, idx):
flag, msg = self.lines[idx]
assert flag == TWMock.WRITE
return msg
fullwidth = 80
return TWMock() | a843503d3e360ed4412a020a4ec37f302ec4edaa | 3,632,940 |
import sqlite3
def sql_get_user(mitarbeiter_id):
"""
SQL module for compiling user information. Name, Surname [and Mail Address]
:param mitarbeiter_id:
:return:
"""
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute("SELECT name, nachname FROM mitarbeiter WHERE id_mitarbeiter=?", (mitarbeiter_id,))
mailuser = c.fetchall()
c.close()
for row in mailuser:
dictmailuser = [row[0], row[1]]
namenachname = (dictmailuser[0] + " " + dictmailuser[1])
return namenachname | 4f6bf235499222437235959dd8a444b031c64cdc | 3,632,941 |
def matsubtraction(A,B):
"""
Subtracts matrix B from matrix A and returns difference
:param A: The first matrix
:param B: The second matrix
:return: Matrix difference
"""
if(len(A)!=len(B) or len(A[0])!=len(B[0])):
return "Subtraction not possible"
for i in range(len(A)):
for j in range(len(A[0])):
A[i][j]=A[i][j]-B[i][j]
return A | e10ca0e218d7995c0052928b4be96c2bae8959e7 | 3,632,942 |
from typing import OrderedDict
def order_keys(order):
"""
Order keys for JSON readability when not using json_log=True
"""
def processor(logger, method_name, event_dict):
if not isinstance(event_dict, OrderedDict):
return event_dict
for key in reversed(order):
if key in event_dict:
event_dict.move_to_end(key, last=False)
return event_dict
return processor | b3ddc250dc6a7e76b8d980ab81fbf4a9de3d6268 | 3,632,943 |
import os
def get_backend():
"""
Returns the currently used backend. Default is tensorflow unless the
VXM_BACKEND environment variable is set to 'pytorch'.
"""
return 'pytorch' if os.environ.get('VXM_BACKEND') == 'pytorch' else 'tensorflow' | ae93dcf95c5712189d603a9a12f32298c32937b5 | 3,632,944 |
def _combine_concat_plans(plans, concat_axis: int):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
# singleton list so we can modify it as a side-effect within _next_or_none
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# _trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units | 1bcdace5c947c7f93dc71bbd24b28eec6cb0e9c1 | 3,632,945 |
def burn(lower_rgb, upper_rgb):
"""Apply burn blending mode of a layer on an image.
"""
return np.maximum(1.0 - (((1.0 + np.finfo(np.float64).eps) - lower_rgb) / upper_rgb), 0.0) | 1c1bd80de5bcc7d2e46747a206924af2f9096f2c | 3,632,946 |
from typing import Callable
from re import T
import inspect
def paramCheck(function: Callable[..., T], allow_none: bool = True) -> Callable[..., T]:
"""
Return a decorator that performs runtime checks on the input types.
:param function: function to be checked against its typing annotations
:param allow_none: whether None values are allowed
:return: wrapped function
"""
@wraps(function)
def check(*arguments, **kwargs):
default_values = inspect.getfullargspec(function).defaults
annotations = inspect.getfullargspec(function).annotations
_args = (
inspect.getfullargspec(function).args[: -len(default_values)]
if default_values
else inspect.getfullargspec(function).args
)
non_default_args = [
arg for arg in _args if (arg in annotations.keys() and arg != "self")
]
default_args = (
inspect.getfullargspec(function).args[-len(default_values) :]
if default_values
else []
)
arg_dict = union(
{
argument: {"type": annotations[argument], "value": None}
for argument in non_default_args
},
{
argument: {
"type": annotations.get(argument)
if annotations.get(argument)
else type(default_values[index]),
"value": default_values[index],
}
for index, argument in enumerate(default_args)
},
)
NoneType = type(None)
for index, arg in enumerate(inspect.getfullargspec(function).args):
if arg != "self":
argIn = (
arguments[index]
if index < len(arguments)
else kwargs.get(arg, arg_dict[arg]["value"])
)
if argIn is None:
if allow_none is False:
raise ValueError(f"{arg} cannot be None")
else:
if (not isinstance(argIn, arg_dict[arg]["type"])) and (
arg_dict[arg]["type"] != NoneType
):
raise TypeError(
f"{arg} parameter must be of type {str(arg_dict[arg]['type'])}"
)
return function(*arguments, **kwargs)
return check | b49ff50ca1b085db18701bf12a911f9ac831eeed | 3,632,947 |
def fixture_org(context: RBContext, org_id: str) -> RBOrganization:
"""Get RBOrganization."""
return RBOrganization(context, org_id) | 6d92018c51738e3631263434c6430078790ac239 | 3,632,948 |
import csv
def load_proxies_from_csv(path_to_list):
"""
Функция, которая загружает прокси из CSV-файла в список.
Входные данные: путь к CSV-файлу, содержащему прокси, описываемый полями: «ip», «port», «protocol».
Выходы: список, содержащий прокси, хранящиеся в именованных кортежах.
"""
Proxy = namedtuple('Proxy', ['ip', 'port', 'protocol'])
with open(path_to_list, 'r') as csv_file:
csv_reader = csv.DictReader(csv_file)
proxies = [Proxy(line['ip'],line['port'],line['protocol']) for line in csv_reader]
return proxies | 8082add1f69e6d4cb4c2bbb4971757224a6366db | 3,632,949 |
def mpls_label_group_id(sub_type, label):
"""
MPLS Label Group Id
sub_type:
- 1: L2 VPN Label
- 2: L3 VPN Label
- 3: Tunnel Label 1
- 4: Tunnel Label 2
- 5: Swap Label
"""
return 0x90000000 + ((sub_type << 24) & 0x0f000000) + (label & 0x00ffffff) | f0235d1cd8baaf601baf0db43b81417d3d5823ac | 3,632,950 |
import logging
def compare_results(out_dict, known_problems_dict, compare_warnings):
"""Compare the number of problems and warnings found with the allowed
number"""
ret = 0
for key in known_problems_dict.keys():
try:
if out_dict[key]['problems'] > known_problems_dict[key]['problems']:
logging.info("More problems found than expected %d > %d",
out_dict[key]['problems'],
known_problems_dict[key]['problems'])
ret = 1
if compare_warnings and (out_dict[key]['warnings'] >
known_problems_dict[key]['warnings']):
logging.info("More warnings found than expected %d > %d",
out_dict[key]['warnings'],
known_problems_dict[key]['warnings'])
ret = 1
except KeyError:
logging.info("Expected key %s not found in dictionary %s %s", key,
str(known_problems_dict), str(out_dict))
ret = 1
logging.info("Results: " + str(out_dict))
return ret | 96cde5d5202d62a7cf135eb6af9b84bee64e22aa | 3,632,951 |
def compute_accuracy(ground_truth, predictions, display=False, mode='per_char'):
"""
Computes accuracy
:param ground_truth:
:param predictions:
:param display: Whether to print values to stdout
:param mode: if 'per_char' is selected then
single_label_accuracy = correct_predicted_char_nums_of_single_sample / single_label_char_nums
avg_label_accuracy = sum(single_label_accuracy) / label_nums
if 'full_sequence' is selected then
single_label_accuracy = 1 if the prediction result is exactly the same as label else 0
avg_label_accuracy = sum(single_label_accuracy) / label_nums
:return: avg_label_accuracy
"""
if mode == 'per_char':
accuracy = []
for index, label in enumerate(ground_truth):
prediction = predictions[index]
total_count = len(label)
correct_count = 0
try:
for i, tmp in enumerate(label):
if tmp == prediction[i]:
correct_count += 1
except IndexError:
continue
finally:
try:
accuracy.append(correct_count / total_count)
except ZeroDivisionError:
if len(prediction) == 0:
accuracy.append(1)
else:
accuracy.append(0)
avg_accuracy = np.mean(np.array(accuracy).astype(np.float32), axis=0)
elif mode == 'full_sequence':
try:
correct_count = 0
for index, label in enumerate(ground_truth):
prediction = predictions[index]
if prediction == label:
correct_count += 1
avg_accuracy = correct_count / len(ground_truth)
except ZeroDivisionError:
if not predictions:
avg_accuracy = 1
else:
avg_accuracy = 0
else:
raise NotImplementedError('Other accuracy compute mode has not been implemented')
if display:
print('Mean accuracy is {:5f}'.format(avg_accuracy))
return avg_accuracy | 3414970a6c98245dc630a9dfac8677978ad6283d | 3,632,952 |
import re
def is_decodable(s1):
"""
try hard to decode the input chemical formula
useful for recognizing those strings from nist database
"""
for s in ['-','=','#',]:
s1 = remove_element(s1, s)
if ('(' in s1) and (')' in s1):
while True:
if not ('(' in s1):
break
else:
# get functional group and its number
try:
fg, N = re.search('\(([A-Za-z0-9]+)\)(\d?)', s1).groups()
if N == '':
s2 = ''.join(s1.split('(%s)'%fg))
else:
smd = '(%s)%s'%(fg, N)
smd2 = fg*int(N)
s2 = smd2.join(s1.split(smd))
s1 = s2
except: # for cases such as O(^3P)
break
iok = True
try:
symbs = get_symbols(s1)
except:
iok = False
return iok | 4629aa560369e191550ef7c8c5cce81499596332 | 3,632,953 |
def ensure_int_vector(I, require_order = False):
"""Checks if the argument can be converted to an array of ints and does that.
Parameters
----------
I: int or iterable of int
require_order : bool
If False (default), an unordered set is accepted. If True, a set is not accepted.
Returns
-------
arr : ndarray(n)
numpy array with the integers contained in the argument
"""
if is_int_vector(I):
return I
elif is_int(I):
return np.array([I])
elif is_list_of_int(I):
return np.array(I)
elif is_tuple_of_int(I):
return np.array(I)
elif isinstance(I, set):
if require_order:
raise TypeError('Argument is an unordered set, but I require an ordered array of integers')
else:
lI = list(I)
if is_list_of_int(lI):
return np.array(lI)
else:
raise TypeError('Argument is not of a type that is convertible to an array of integers.') | 9234442631e13462df6c4d557cda8cee14a06035 | 3,632,954 |
def lgt_to_gt(lgt, la):
"""A method for transforming Local GT and Local Alleles into the true GT"""
return hl.call(la[lgt[0]], la[lgt[1]]) | 1d655f561c6b38c935b856862d568c277e5925b1 | 3,632,955 |
def ascat(scan_nb, scan_points=None):
"""ASCAT make two scans one to the left and one to the right of the
sub-satellite track.
"""
if scan_points is None:
scan_len = 42 # samples per scan
scan_points = np.arange(42)
else:
scan_len = len(scan_points)
scan_angle_inner = -25.0 # swath, degrees
scan_angle_outer = -53.0 # swath, degrees
scan_rate = 3.74747474747 # single scan, seconds
if scan_len < 2:
raise ValueError("Need at least two scan points!")
sampling_interval = scan_rate / float(np.max(scan_points) + 1)
# build the Metop/ascat instrument scan line angles
scanline_angles_one = np.linspace(-np.deg2rad(scan_angle_outer),
-np.deg2rad(scan_angle_inner), 21)
scanline_angles_two = np.linspace(np.deg2rad(scan_angle_inner),
np.deg2rad(scan_angle_outer), 21)
scan_angles = np.concatenate(
[scanline_angles_one, scanline_angles_two])[scan_points]
inst = np.vstack((scan_angles, np.zeros(scan_len * 1,)))
inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scan_nb), 1])
# building the corresponding times array
offset = np.arange(scan_nb) * scan_rate
times = (np.tile(scan_points * sampling_interval,
[np.int(scan_nb), 1]) + np.expand_dims(offset, 1))
return ScanGeometry(inst, times) | ef7748283d41a4a2a15d55dd07d27dda146cdb91 | 3,632,956 |
from typing import Optional
from typing import Mapping
from typing import Any
def ensure_csv(
key: str,
*subkeys: str,
url: str,
name: Optional[str] = None,
force: bool = False,
download_kwargs: Optional[Mapping[str, Any]] = None,
read_csv_kwargs: Optional[Mapping[str, Any]] = None,
):
"""Download a CSV and open as a dataframe with :mod:`pandas`.
:param key: The module name
:param subkeys:
A sequence of additional strings to join. If none are given,
returns the directory for this module.
:param url:
The URL to download.
:param name:
Overrides the name of the file at the end of the URL, if given. Also
useful for URLs that don't have proper filenames with extensions.
:param force:
Should the download be done again, even if the path already exists?
Defaults to false.
:param download_kwargs: Keyword arguments to pass through to :func:`pystow.utils.download`.
:param read_csv_kwargs: Keyword arguments to pass through to :func:`pandas.read_csv`.
:return: A pandas DataFrame
:rtype: pandas.DataFrame
Example usage::
>>> import pystow
>>> import pandas as pd
>>> url = 'https://raw.githubusercontent.com/pykeen/pykeen/master/src/pykeen/datasets/nations/test.txt'
>>> df: pd.DataFrame = pystow.ensure_csv('pykeen', 'datasets', 'nations', url=url)
"""
_module = Module.from_key(key, ensure_exists=True)
return _module.ensure_csv(
*subkeys,
url=url,
name=name,
force=force,
download_kwargs=download_kwargs,
read_csv_kwargs=read_csv_kwargs,
) | 91fc051fd712bed0cb7cda44b7c022607f28fc98 | 3,632,957 |
def energy_scan(sim_func, sim_kwargs, energies, parallel=False):
"""
This function provides a convenient way to repeat the same simulation for a number of different
electron beam energies. This can reveal variations in the charge state balance due to
weakly energy dependent ionisation cross sections or
even resonant phenomena like dielectronic recombination.
Parameters
----------
sim_func : callable
The function handle for the simulation e.g. ebisim.simulation.basic_simulation.
sim_kwargs : dict
A dictionary containing all the required and optional parameters of the simulations
(except for the kinetic electron energy) as key value pairs.
This is unpacked in the function call.
energies : list or numpy.array
A list or array of the energies at which the simulation should be performed.
parallel : bool, optional
Determine whether multiple simulations should be run in parallel using pythons
multiprocessing.pool. This may accelerate the scan when performing a large number of
simulations.
By default False.
Returns
-------
ebisim.simulation.EnergyScanResult
An object providing convenient access to the generated scan data.
"""
sim_kwargs = sim_kwargs.copy()
if "e_kin" in sim_kwargs:
del sim_kwargs["e_kin"]
warn("sim_kwargs contains a value for e_kin, this item will be ignored.")
sim_kwargs.setdefault("solver_kwargs", {}) # cast element to Element if necessary
sim_kwargs["solver_kwargs"]["dense_output"] = True # need dense output for interpolation
# cast element to Element if necessary
sim_kwargs["element"] = Element.as_element(sim_kwargs["element"])
energies = np.array(energies)
energies.sort()
proc = _EScanProcessor(sim_func, sim_kwargs)
if parallel:
with Pool() as pool:
results = pool.map(proc, energies)
else:
results = list(map(proc, energies))
return EnergyScanResult(sim_kwargs, energies, results) | 30c1fa9d85832ca27354297815ea57f0701c856d | 3,632,958 |
import random
def random_walk_memory(world_state, pose, visited):
""" Returns a random valid neighboring cell that is not recently visited.
Can return visited cells if there is no other option """
nbors = get_orthogonal_nbors(world_state, pose)
# Get neighbors that aren't recently visited
new_nbors = list(set(nbors) - set(visited))
if new_nbors:
return random.choice(new_nbors)
else:
# If all neighbors are recently visited, return a random neighbor
return random.choice(nbors) | 00b75c1e0f8635874c505b9d19b15be19e45f5ab | 3,632,959 |
def run_file_mask(fmask, fname, fbase=0):
"""extract temporal data from file name
"""
if fbase and fname.startswith(fbase):
fname = fname[fname.index(fbase) + len(fbase) + 1:]
output = {
"year": "".join([x for x,y in zip(fname, fmask) if y == 'Y' and x.isdigit()]),
"month": "".join([x for x,y in zip(fname, fmask) if y == 'M' and x.isdigit()]),
"day": "".join([x for x,y in zip(fname, fmask) if y == 'D' and x.isdigit()])
}
return output | f13bff19ae7b3a3bbef258c7bf5830a6b1114b1a | 3,632,960 |
import torch
def get_spin_interp(zeta: torch.Tensor) -> torch.Tensor:
"""Compute spin interpolation function from fractional polarization `zeta`."""
exponent = 4.0 / 3
scale = 1.0 / (2.0 ** exponent - 2.0)
return ((1.0 + zeta) ** exponent + (1.0 - zeta) ** exponent - 2.0) * scale | b1abced09aead7394be773d93d59a621cda98d14 | 3,632,961 |
import inspect
def get_interpolator(func, varname, df, default_time, docstring):
"""Creates time interpolator with custom signature"""
#extract source code from time_interpolator
src = inspect.getsource(time_interpolator)
#create variable-dependent signature
new_src = (src \
.format(docstring = docstring)
.replace('timekwarg', "t = default_time")
.replace('time_interpolator', varname))
# default_time = self._instrument.data.index
loc = dict(default_time = default_time, df = df)
exec(new_src, loc)
return loc[varname] | 998429f17e74a76d440020552d0da7d43fda026a | 3,632,962 |
def inverse_cdf_coupling(logits_1, logits_2):
"""Constructs the matrix for an inverse CDF coupling."""
dim, = logits_1.shape
p1 = jnp.exp(logits_1)
p2 = jnp.exp(logits_2)
p1_bins = jnp.concatenate([jnp.array([0.]), jnp.cumsum(p1)])
p2_bins = jnp.concatenate([jnp.array([0.]), jnp.cumsum(p2)])
# Value in bin (i, j): overlap between bin ranges
def get(i, j):
left = jnp.maximum(p1_bins[i], p2_bins[j])
right = jnp.minimum(p1_bins[i + 1], p2_bins[j + 1])
return jnp.where(left < right, right - left, 0.0)
return jax.vmap(lambda i: jax.vmap(lambda j: get(i, j))(jnp.arange(dim)))(
jnp.arange(dim)) | e5ad6b35ea0625b5416f0c1bf746206ca02ffcf1 | 3,632,963 |
import os
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1] == '/':
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x: osp.join(basedir, x)
prefix = logdir.split('/')[-1]
listdir = os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not (x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '=' * DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '=' * DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not (legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data | 158fcb8cb181cb71eade93bf8b113db82b0525b6 | 3,632,964 |
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
if settings.get_hrm_filter_certificates() and \
not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_certificate)
return s3_rest_controller("hrm", resourcename,
rheader=s3db.hrm_rheader,
csv_template=("hrm", "certificate"),
csv_stylesheet=("hrm", "certificate.xsl"),
) | 9317c7f60728a1c9230d831e4161fefe4845bda5 | 3,632,965 |
import asyncio
import logging
async def _run_given_tasks_async(tasks, event_loop=asyncio.get_event_loop(), executor=None):
"""
Given list of Task objects, this method executes all tasks in the given event loop (or default one)
and returns list of the results.
The list of the results are in the same order as the list of the input.
Ex; If we say we are going to execute
Task1, Task2 and Task3; and their results are Result1, Result2 and Result3.
If the input of the task array is; [Task1, Task2, Task3]
The result of this operation would be; [Result1, Result2, Result3]
If any of the tasks throws an exception, this method raise the exception to the caller
Parameters
----------
tasks : list of Task
The list of tasks that will be executed
event_loop: EventLoop
The EventLoop instance that will be used for execution. If nothing is provided, this will be the default one
executor: ThreadPoolExecutor
The Executor that will be used by the EventLoop to execute the input tasks
Returns
--------
results : list of results that is returned by the list of Tasks in order. Raises an exception if the underlying
task is thrown an exception during its execution
"""
async_tasks = []
results = []
LOG.debug("Async execution started")
for task in tasks:
# loop in all tasks and pass them to the executor
LOG.debug("Invoking function %s", str(task))
async_tasks.append(event_loop.run_in_executor(executor, task))
LOG.debug("Waiting for async results")
for result in await asyncio.gather(*async_tasks, return_exceptions=True):
# for each task, wait for them to complete
if isinstance(result, Exception):
LOG.debug("Exception raised during the execution")
# if the result is a type of Exception, stop the event loop and raise it back to caller
raise result
results.append(result)
LOG.debug("Async execution completed")
# flush all loggers which is printed during async execution
if logging.root and logging.root.handlers:
for handler in logging.root.handlers:
handler.flush()
return results | 296ce85a5f806f52a3965d6d1ac259fe3095c8d4 | 3,632,966 |
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.fileno() | eedcba17c20d9de81c5435c0689efabab861b49a | 3,632,967 |
def word_show(vol, guess, store):
"""
param vol: str, the word from def random_word
param guess: str, the letter user guessed
param store: str, the string showing correct letters user guessed
return: str, answer
"""
answer = ''
if guess == '':
for i in vol:
answer += '-'
else:
for j in range(len(vol)):
if guess == vol[j]:
answer += guess
else:
answer += store[j]
return answer | 65178dda52c61abbae682878dcf2439f20e51b5f | 3,632,968 |
def convert_yt_music(input_url: str) -> str:
"""
Convert a YouTube Music link to a YouTube link.
YouTube Music videos share the same `v` URL parameter as their YouTube counterparts
and hence can be processed like YouTube URLs after making changes to the URL
This function replaces the `music.youtube.com` part of the URL with `youtube.com`
As an example:
`input_url`: https://music.youtube.com/watch?v=J7p4bzqLvCw&list=RDAMVMJ7p4bzqLvCw
Returns: https://youtube.com/watch?v=J7p4bzqLvCw&list=RDAMVMJ7p4bzqLvCw
"""
url_parts = list(parse.urlparse(input_url))
url_parts[1] = YOUTUBE_HOST
return parse.urlunparse(url_parts) | 77e09438498e5fbab52d8da29fdde8508e6d10cc | 3,632,969 |
import torch
def get_feature_attributions(args, model_list, data_x_list, data_y_list,
col_names, is_mgmc):
"""Get feature attributions.
Get feature attributions for given list of saved Pipeline models and
output dataset lists from get_train_test_dataset_list().
Args:
args:
model_list:
data_x_list:
data_y_list:
col_names:
Returns:
"""
list_importances = []
for k, i in enumerate(model_list):
print("Calculating feature importances for Model {}".format(k))
# Get model
# model = i._final_estimator.best_estimator_.module_
device_ = "cuda:1"
# if "MGMC" in i._final_estimator.best_estimator_.module_.__class__\
# .__name__:
if is_mgmc:
model = i._final_estimator.module_
model.to(device_)
targets = torch.tensor(data_y_list[k].argmax(1)).to(
torch.device(device_))
data_loader = torch.utils.data.DataLoader(data_x_list[k],
batch_size=data_x_list[
0].feat_data.shape[0])
for batch in data_loader:
batch[0] = [x.to(device_) for x in batch[0]]
input_ = batch[0]
def model_forward(feat_matrix, input_list):
format_input = [feat_matrix] + input_list
out = model(format_input)
out = out[0][:, -args.num_class:]
return out
attr = IntegratedGradients(model_forward, )
fa_list = []
fa_train = attr.attribute(input_[0], target=targets,
additional_forward_args=input_[1:])
fa_train = torch.abs(fa_train)
mean_fa = fa_train.mean(0)
mean_fa = mean_fa[:-args.num_class]
else:
model = i._final_estimator.best_estimator_.module_
model.to(device_)
attr = IntegratedGradients(model)
input_ = torch.tensor(data_x_list[k]).to(torch.device(device_))
targets = torch.tensor(data_y_list[k].argmax(1)).to(
torch.device(device_))
baseline = input_ * 0.
fa_train = attr.attribute(input_, baseline, target=targets)
fa_train = torch.abs(fa_train)
mean_fa = fa_train.mean(0)
mean_fa = mean_fa.cpu().numpy()
cur_item = pd.Series(mean_fa, index=col_names)
list_importances.append(cur_item)
return list_importances | 5b5167b4f271e5355cdfba485a22acea7e843b63 | 3,632,970 |
def bspline_fit(x,y,order=3,knots=None,everyn=20,xmin=None,xmax=None,w=None,bkspace=None):
""" bspline fit to x,y
Should probably only be called from func_fit
Parameters
----------
x: ndarray
y: ndarray
func: str
Name of the fitting function: polynomial, legendre, chebyshev, bspline
deg: int
deg of the spline. Default=3 (cubic)
xmin: float, optional
Minimum value in the array [both must be set to normalize]
xmax: float, optional
Maximum value in the array [both must be set to normalize]
w: ndarray, optional
weights to be used in the fitting (weights = 1/sigma)
knots: ndarray, optional
Internal knots only. External ones are added by scipy
everyn: int
Knot everyn good pixels, if used
bkspace: float
Spacing of breakpoints in units of x
Returns
-------
tck : tuple
describes the bspline
"""
task = 0 # Default of splrep
if w is None:
ngd = x.size
gd = np.arange(ngd)
weights = None
else:
gd = np.where(w > 0.)[0]
weights = w[gd]
ngd = len(gd)
# Make the knots
if knots is None:
if bkspace is not None:
xrnge = (np.max(x[gd]) - np.min(x[gd]))
startx = np.min(x[gd])
nbkpts = max(int(xrnge/bkspace) + 1,2)
tempbkspace = xrnge/(nbkpts-1)
knots = np.arange(1, nbkpts-1)*tempbkspace + startx
# Remove cases where two knots have no data between them
keep_knots = np.array([True]*len(knots))
for ii in range(1,len(knots)): # Ugly for loop..
if not np.any((x[gd] > knots[ii-1]) & (x[gd] < knots[ii])):
keep_knots[ii] = False
knots = knots[keep_knots]
elif everyn is not None:
# A knot every good N pixels
idx_knots = np.arange(everyn//2, ngd-everyn//2, everyn)
knots = x[gd[idx_knots]]
else:
msgs.error("No method specified to generate knots")
else:
task = -1
# Generate spline
try:
tck = interpolate.splrep(x[gd], y[gd], w=weights, k=order, xb=xmin, xe=xmax, t=knots, task=task)
except ValueError:
# Knot problem (usually)
msgs.warn("Problem in the bspline knot")
raise ValueError("Crashing out of bspline fitting")
return tck | a7db0bef96e3c0211cc380d33db179efa2786e73 | 3,632,971 |
def request(s):
"""
Returns a :class:`Request` object for the given string.
:param str s: The string containing the request line to parse
:returns: A :class:`Request` tuple representing the request line
"""
try:
method, s = s.split(' ', 1)
except ValueError:
raise ValueError('Request line is missing a space separated method')
try:
s, protocol = s.rsplit(' ', 1)
except ValueError:
raise ValueError('Request line is missing a space separated protocol')
s = s.strip()
if not s:
raise ValueError('Request line URL cannot be blank')
return Request(method, url(s) if s != '*' else None, protocol) | 7e057d425ee76c6986c71f0a4c572e632063cd98 | 3,632,972 |
def _gradual_sequence(start, end, multiplier=3):
"""Custom nodes number generator
The function gives the list of exponentially increase/decrease
integers from both 'start' and 'end' params, which can be later used as
the number of nodes in each layer.
_gradual_sequence(10, 7000, multiplier=5) gives [50, 250, 1250, 6250],
_gradual_sequence(6000, 10, multiplier=5) gives [1250, 250, 50]
as a return sequence.
Args:
start: lower limit(exclusive)
end: upper limit
Returns:
num_nodes_list: list of integers
or:
reversed(num_nodes_list)
"""
mode = 'incremental'
if end < start:
mode = 'decremental'
start, end = end, start
num_nodes_list = [start*(multiplier**x) for x in range(10)
if start*(multiplier**x) < end]
if mode == 'incremental':
return num_nodes_list
else:
return reversed(num_nodes_list) | 8b42931600cb14b84621f6619ef695f1adee641c | 3,632,973 |
import requests
def get_group_clusters(group_name):
"""
Returns list of clusters administered by group
:return: list
"""
access_token = get_user_access_token(session)
query = {'token': access_token}
group_clusters = requests.get(
slate_api_endpoint + '/v1alpha3/groups/' + group_name + '/clusters', params=query)
group_clusters = group_clusters.json()
return group_clusters | f926105e28f2bdf18036f0f41ca0969551f63c47 | 3,632,974 |
import array
def Q_continuous_white_noise(dim, dt=1., spectral_density=1.):
""" Returns the Q matrix for the Discretized Continuous White Noise
Model. dim may be either 2 or 3, dt is the time step, and sigma is the
variance in the noise.
Parameters
----------
dim : int (2 or 3)
dimension for Q, where the final dimension is (dim x dim)
dt : float, default=1.0
time step in whatever units your filter is using for time. i.e. the
amount of time between innovations
spectral_density : float, default=1.0
spectral density for the continuous process
"""
assert dim == 2 or dim == 3
if dim == 2:
Q = array([[(dt**4)/3, (dt**2)/2],
[(dt**2)/2, dt]])
else:
Q = array([[(dt**5)/20, (dt**4)/8, (dt**3)/6],
[ (dt**4)/8, (dt**3)/3, (dt**2)/2],
[ (dt**3)/6, (dt**2)/2, dt]], dtype=float)
return Q * spectral_density | 0bc0e1ebcc91eca5d79ded101739e7266dd77f29 | 3,632,975 |
import time
def timeit(func):
"""calculate time for a function to complete"""
def wrapper(*args, **kwargs):
start = time.time()
output = func(*args, **kwargs)
end = time.time()
print('function {0} took {1:0.3f} s'.format(
func.__name__, (end - start) * 1))
return output
return wrapper | 13a86c9475ce547a7b5e7e54ad7373f833920b41 | 3,632,976 |
from typing import Tuple
def make_test_label_and_intensity_images_no_internal_2d() -> Tuple[
np.ndarray, np.ndarray, np.ndarray, np.ndarray
]:
"""Create 2D test data where label 2 has no internal pixels"""
label_image = np.zeros((40, 40), dtype=int)
label_image[10:20, 10:20] = 1
label_image[25:27, 25:27] = 2
label_image[32:39, 32:39] = 3
(
intensity_image,
expected_boundary_intensity,
expected_internal_intensity,
) = _make_test_intensity_image(label_image)
return (
label_image,
intensity_image,
expected_boundary_intensity,
expected_internal_intensity,
) | 9bda083fe43cee45e3e2157a19264ee40e6282b6 | 3,632,977 |
import os
def get_mock_image():
"""
Return a canned test image (1 band of original NetCDF raster)
"""
nc = os.path.join(script_dir,
'resources/HadGHCND_TXTN_anoms_1950-1960_15052015.nc')
with open(nc, 'rb') as ncfile:
return ncfile.read() | 77fdb4acd4e8b660a5dde458f266e186fc175675 | 3,632,978 |
from typing import Optional
def get_database_acl(instance_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseAclResult:
"""
Gets information about the RDB instance network Access Control List.
## Example Usage
```python
import pulumi
import pulumi_scaleway as scaleway
my_acl = scaleway.get_database_acl(instance_id="fr-par/11111111-1111-1111-1111-111111111111")
```
:param str instance_id: The RDB instance ID.
"""
__args__ = dict()
__args__['instanceId'] = instance_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('scaleway:index/getDatabaseAcl:getDatabaseAcl', __args__, opts=opts, typ=GetDatabaseAclResult).value
return AwaitableGetDatabaseAclResult(
acl_rules=__ret__.acl_rules,
id=__ret__.id,
instance_id=__ret__.instance_id,
region=__ret__.region) | 06d05a544dfa5ee3992f2798f406cd81394660be | 3,632,979 |
import sys
import subprocess
def run_suite(project, suite_name):
"""Run a suite. This is used when running suites from the GUI"""
script_name = sys.argv[0]
if script_name[-5:] != 'golem' and script_name[-9:] != 'golem.exe':
if sys.platform == 'win32':
script_name = 'golem'
else:
which_golem = subprocess.run(['which', 'golem'], stdout=subprocess.PIPE)
script_name = which_golem.stdout.decode('utf-8').strip()
timestamp = utils.get_timestamp()
param_list = [
script_name,
'--golem-dir',
session.testdir,
'run',
project,
suite_name,
'--timestamp',
timestamp
]
subprocess.Popen(param_list)
return timestamp | d737076b516574780d33e5c028e4694993ad0049 | 3,632,980 |
def get_cancellations(es_cfg):
"""Calls external scheduler and returns task cancellations."""
req = plugin_pb2.GetCancellationsRequest()
req.scheduler_id = es_cfg.id
c = _get_client(es_cfg.address)
resp = c.GetCancellations(req, credentials=_creds())
return resp.cancellations | bceadcfe984b5338e9c2a9010ce4cd60f256c5d4 | 3,632,981 |
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
j = 0
len_n = len(nums)
for i in range(len_n):
if nums[j] != nums[i]:
nums[j + 1] = nums[i]
j += 1
return j + 1 | 3020be29ad6499dfcb1dcfae6a09b91a11ccfc38 | 3,632,982 |
def GetUserGender(user_url: str) -> int:
"""获取用户性别
Args:
user_url (str): 用户个人主页 Url
Returns:
int: 用户性别,0 为未知,1 为男,2 为女
"""
AssertUserUrl(user_url)
AssertUserStatusNormal(user_url)
json_obj = GetUserJsonDataApi(user_url)
result = json_obj["gender"]
if result == 3: # 某些未设置性别的账号性别值为 3,怀疑为简书系统遗留问题
result = 0 # 3 也代表性别未知
return result | 474db93e37092999ff04358530b3945c92da5be7 | 3,632,983 |
def do_menu_action(action):
"""Execute menu action!"""
return {
'help': help,
'inventory': print_inventory,
'game': print_game_status,
'quit': save_game,
}.get(action, (lambda: ''))() | be03cdf66fb80a1c67ec7eac653d70e151f242f2 | 3,632,984 |
def test_mixed_optimization():
"""
Checks if variables with mixed constraints are optimized correctly together.
"""
def get_loss(mean):
def loss(x, y, z):
return (x + y + z - mean) ** 2
return loss
# Fix seed
tf.random.set_seed(42)
# Create variables
v1 = UnconstrainedVariable(tf.random.uniform(shape=()))
v2 = PositiveVariable(1 + 3 * tf.random.uniform(shape=()))
v3 = BoundedVariable(3 + tf.random.uniform(shape=()), lower=3, upper=4)
# Get loss
loss = get_loss(0.)
total_loss, converged, _ = ntfo.minimize(loss,
vs=[v1, v2, v3])
# Test if the optimization converges
assert converged
assert_near(total_loss, 0.) | e0e62d320785e0f0e10e057cfb26f3c96d644368 | 3,632,985 |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & poinwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation('relu')(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x | c7d71cad82d26f4afdde67455f18508970f15fca | 3,632,986 |
from functools import reduce
def dot_prod_numpy(T):
"""Calculate dot product over last two axis of a multi dimensional matrix"""
# reverse along domain axis, see comment in next function
return np.array([reduce(np.dot, Tn) for Tn in T[:, ::-1, ...]]) | 9e4ff3ab5b66ffad18557e3bf20ef3bbfad3b527 | 3,632,987 |
def create_pretrain_mask(tokens, mask_cnt, vocab_list):
"""
masking subwords(15% of entire subwords)
- mask_cnt: len(subwords) * 0.15
- [MASK]: 80% of masking candidate token
- original token: 10% of masking candidate token
- another token: 10% of masking candidate token
"""
candidate_idx = []
## subwords in the same list augment a sementic word
## eg. [[0], [1], [2], [4, 5]] -> token_idx 4 + 5 is semantic word
# A list represent a sementic word
for i, token in enumerate(tokens):
if token == '[CLS]' or token == '[SEP]':
continue
if 0 < len(candidate_idx) and token.find(u'\u2581') < 0: # LOWER ONE EIGHTH BLOCK
# if 0 < len(candidate_idx) and token.find('_') < 0: # test code
candidate_idx[-1].append(i)
else:
candidate_idx.append([i])
np.random.shuffle(candidate_idx)
mask_lms = []
for idx_set in candidate_idx:
# check if len(mask_lms) exceeds threshold
if len(mask_lms) >= mask_cnt:
break
if len(mask_lms) + len(idx_set) > mask_cnt:
continue
## masking subwords with 15% probability
## mask_cnt is len(subwords) * 0.15
# iter subwords idx
for sub_idx in idx_set:
masked_token = None
### assign value to masked token: [MASK], original token, random token
# 80% of masking candidate are replaced with '[MASK]' token
if np.random.uniform() < 0.8:
masked_token = '[MASK]'
# remainng 20% of masking candidate
else:
# 10% of remaining preserve original token
if np.random.uniform() < 0.5:
masked_token = tokens[sub_idx]
# 10% of ones are replaced with rnadom token
else:
masked_token = np.random.choice(vocab_list)
### replace subword with masked_token value
mask_lms.append({'idx': sub_idx, 'label':tokens[sub_idx]})
tokens[sub_idx] = masked_token
mask_lms = sorted(mask_lms, key=lambda x: x['idx'])
mask_idx = [mask_dict['idx'] for mask_dict in mask_lms]
mask_label = [mask_dict['label'] for mask_dict in mask_lms]
# print(candidate_idx)
# print(mask_lms)
print(mask_idx, mask_label)
return tokens, mask_idx, k_label | 98364c713ab00644e0deb30b69d06ea1e00e0097 | 3,632,988 |
def string2token(t,nl,nt):
"""
This function takes a string and returns a token. A token is a tuple
where the first element specifies the type of the data stored in the
second element.
In this case the data types are limited to numbers, either integer, real
or complex, and strings. The types a denoted as follows:
i - integer
f - float/real
c - complex
s - string
For navigational purposes two more elements added to identify the line
number (nl) the token was on, and the token number (nt) within the line.
"""
try:
i_a = int(t)
#
# Toldiff should recognise that -0 and 0 are the same, however, in
# a text based comparison that is not automatic so we have to force this.
#
if i_a == 0:
i_a = 0
token = ("i",i_a,nl,nt)
except ValueError:
#
# In Fortran double precision constants are often printed with a
# "D" for the exponent rather than an "E", i.e. 1.0E+01 might be
# printed as 1.0D+01 in Fortran. Python is not aware of this convention
# so we need to replace any potential "D"-s to obtain valid floating
# values.
#
z = t.replace("d","e")
z = z.replace("D","e")
try:
i_f = float(z)
#
# Toldiff should recognise that -0.0 and 0.0 are the same, however,
# in a text based comparison that is not automatic so we have to
# force this.
#
if i_f == 0.0:
i_f = 0.0
token = ("f",i_f,nl,nt)
except ValueError:
#
# The handling of complex numbers is unlikely to work in practice
# as in most cases complex numbers are printed as (1.0,2.0)
# rather than 1.0+2.0j. Therefore it is impossible to reliably
# distinguish between a complex number and a list of 2 real numbers.
#
try:
i_c = complex(z)
#
# Toldiff should recognise that x-0.0j and x+0.0j and that
# -0.0+y*j and 0.0+y*j are the same, however, in a text based
# comparison that is not automatic so we have to force this.
#
if i_c.real == 0.0:
i_c = complex(0.0,i_c.imag)
if i_c.imag == 0.0:
i_c = complex(i_c.real,0.0)
token = ("c",i_c,nl,nt)
except ValueError:
token = ("s",t,nl,nt)
return token | 23fd5da01a49076b1fcf474fbe1047329ad7471a | 3,632,989 |
def fitness_func(loci, **kwargs):
"""
Return how fit the locus is to describe a quarter of circle.
It is a minisation problem and the theorical best score is 0.
Returns
-------
float
Sum of square distances between tip locus bounding box and a defined
square.
"""
# Locus of the Joint 'pin", mast in linkage order
tip_locus = tuple(x[-1] for x in loci)
# We get the bounding box
curr_bb = pl.bounding_box(tip_locus)
# Reference bounding box in order (min_y, max_x, max_y, min_x)
ref_bb = (0, 5, 2, 3)
# Our score is the square sum of the edges distances
return sum((pos - ref_pos) ** 2 for pos, ref_pos in zip(curr_bb, ref_bb)) | c0ae31416acfc62726f47db1b2dc6de52e609df7 | 3,632,990 |
def get_div(integer):
"""
Return list of divisors of integer.
:param integer: int
:return: list
"""
divisors = [num for num in range(2, int(integer**0.5)+1) if integer % num == 0]
rem_divisors = [int(integer/num) for num in divisors]
divisors += rem_divisors
divisors.append(integer)
res = list(set(divisors)) # remove duplicates
res.sort()
return res | 4c40a2b2da1d9681c1d7ca69a53975dd27c7bdb8 | 3,632,991 |
def ifuse(inputs):
"""Fuse iterators"""
value, extent = 0, 1
for i, ext in inputs:
value = value * ext + i
extent = extent * ext
return (value, extent) | 42c65ec62e637b668125ed27aad517d3301a7aff | 3,632,992 |
def filtered_list_gen(raw_response, term=None, partial_match=True):
"""
Iterates over items yielded by raw_response_gen, validating that:
1. the `path` dict key is a str
2. the `path` value starts with starts_with (if provided)
>>> r = [{
>>> 'checksum': {
>>> 'md5': 'd94b865aa7620c46ef8faef7059a311c',
>>> 'sha1': '2186934d880cf24dd9ecc578335e290026695522',
>>> 'sha256': 'b7bb3424a6a6(...)4113bc38fd7807528481a8ffe3cf',
>>> 'sha512': 'e7806f3caa3e(...)3caeb9bbc54bbde286c07f837fdc'
>>> },
>>> 'downloadUrl': 'http://nexus/repository/repo_name/a/file.ext',
>>> 'format': 'yum',
>>> 'id': 'Y2xvdWRlcmEtbWFuYWdlcj(...)mRiNWU0YjllZWQzMg',
>>> 'path': 'a/fake.rpm',
>>> 'repository': 'cloudera-manager'}]
>>>
>>> for i in filtered_list_gen(r, starts_with='a/fake.rpm')
>>> print(i['path'])
a/fake.rpm
>>> for i in filtered_list_gen(r, starts_with='b')
>>> print(i['path'])
# (nothing printed)
:param raw_response: an iterable that yields one element of a nexus
search response at a time, such as the one returned by
_paginate_get().
:type raw_response: iterable
:param term: if defined, only items with an attribute `path`
that starts with the given parameter are returned.
:param partial_match: if True, include items whose artefact path starts
with the given term.
:return: a generator that yields items that matched the filter.
:rtype: iterable
"""
def is_match(path_, term_):
if partial_match:
return path_.startswith(term_)
else:
return path_ == term_
for artefact in raw_response:
artefact_path = artefact.get('path')
if artefact_path is None:
continue
if not validate_strings(artefact_path):
continue
if term is None or is_match(artefact_path, term):
yield artefact | 167da6e68c0450eb76ccb3697beee87de5dae4fb | 3,632,993 |
import random
def occlude_with_pascal_objects(im, occluders):
"""Returns an augmented version of `im`, containing some occluders from the
Pascal VOC dataset."""
result = im.copy()
width_height = np.asarray([im.shape[1], im.shape[0]])
im_scale_factor = min(width_height) / 256
count = np.random.randint(1, 8)
# logger.debug(f'Number of augmentation objects: {count}')
for _ in range(count):
occluder = random.choice(occluders)
center = np.random.uniform([0, 0], width_height)
random_scale_factor = np.random.uniform(0.2, 1.0)
scale_factor = random_scale_factor * im_scale_factor
# logger.debug(f'occluder size: {occluder.shape},
# scale_f: {scale_factor}, img_scale: {im_scale_factor}')
occluder = resize_by_factor(occluder, scale_factor)
paste_over(im_src=occluder, im_dst=result, center=center)
return result | 1a60aa501b7424de454d6d8fba6c5926ab90240d | 3,632,994 |
def check_job_access_permission(request, job_id):
"""
Decorator ensuring that the user has access to the job submitted to Oozie.
Arg: Oozie 'workflow', 'coordinator' or 'bundle' ID.
Return: the Oozie workflow, coordinator or bundle or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
if job_id is not None:
if job_id.endswith('W'):
get_job = get_oozie(request.user).get_job
elif job_id.endswith('C'):
get_job = get_oozie(request.user).get_coordinator
else:
get_job = get_oozie(request.user).get_bundle
try:
oozie_job = get_job(job_id)
except RestException, ex:
raise PopupException(_("Error accessing Oozie job %s.") % (job_id,),
detail=ex._headers['oozie-error-message'])
if request.user.is_superuser \
or oozie_job.user == request.user.username \
or has_dashboard_jobs_access(request.user):
return oozie_job
else:
message = _("Permission denied. %(username)s does not have the permissions to access job %(id)s.") % \
{'username': request.user.username, 'id': oozie_job.id}
access_warn(request, message)
raise PopupException(message) | 80e8c7e610c96e275aed23f4e24ad96520da171e | 3,632,995 |
from typing import get_origin
def is_dict_type(tp):
"""Return True if tp is a Dict"""
return (
get_origin(tp) is dict
and getattr(tp, '_name', None) == 'Dict'
) | 3b9992b7b131e936472d4d0e2994ac476f0d0f76 | 3,632,996 |
def shape(pyshp_shpobj):
"""Convert a pyshp geometry object to a flopy geometry object.
Parameters
----------
pyshp_shpobj : shapefile._Shape instance
Returns
-------
shape : flopy.utils.geometry Polygon, Linestring, or Point
Notes
-----
Currently only regular Polygons, LineStrings and Points (pyshp types 5, 3, 1) supported.
Examples
--------
>>> import shapefile as sf
>>> from flopy.utils.geometry import shape
>>> sfobj = sf.Reader('shapefile.shp')
>>> flopy_geom = shape(list(sfobj.iterShapes())[0])
"""
types = {5: Polygon,
3: LineString,
1: Point}
flopy_geometype = types[pyshp_shpobj.shapeType]
return flopy_geometype(pyshp_shpobj.points) | 39e6152c680a4358e980095d090a0e724bc9338c | 3,632,997 |
def compute_nbr(image, sensor):
"""
Compute nbr index
NBR = (NIR-SWIR2)/(NIR+SWIR2)
"""
bands = cp.sensors[sensor]["bands"]
nir = image.select(bands["nir"])
swir2 = image.select(bands["swir2"])
doy = ee.Algorithms.Date(ee.Number(image.get("system:time_start")))
yearday = ee.Number(doy.get("year")).add(
ee.Number.parse(doy.format("D")).divide(365)
)
# create an image out of the yearday value
yearday = ee.Image.constant(yearday).float().rename("yearday")
nbr = nir.subtract(swir2).divide(nir.add(swir2)).rename("NBR")
return nbr.addBands(yearday) | 005b965e93d0f4455e01a7aae949631b94e13b16 | 3,632,998 |
def unbroadcast(array):
"""
Given an array, return a new array that is the smallest subset of the
original array that can be re-broadcasted back to the original array.
See http://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays
for more details.
"""
if array.ndim == 0:
return array
new_shape = np.where(np.array(array.strides) == 0, 1, array.shape)
return as_strided(array, shape=new_shape) | e7a205a325dc3000a920df441c5b861f66c8c3c8 | 3,632,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.