content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def check_if_module_installed(module_name):
"""Check if a module is installed.
:param module_name: Name of Module
:return: Bool if module is installed or not
"""
distribution_instance = filter(None, (getattr(finder, 'find_distributions', None) for finder in sys.meta_path))
for res in distribution_instance:
dists = res(DistributionFinder.Context(name=module_name))
dist = next(iter(dists), None)
if dist is not None:
return True
else:
return False
| 8,600
|
def __virtual__():
"""Only load if grafana4 module is available"""
return "grafana4.get_org" in __salt__
| 8,601
|
def setup( key=None, force=False ):
"""Do setup by creating and populating the directories
This incredibly dumb script is intended to let you unpack
the Tcl/Tk library Togl from SourceForce into your
PyOpenGL 3.0.1 (or above) distribution.
Note: will not work with win64, both because there is no
win64 package and because we don't have a url defined
for it.
"""
if key is None:
key = '%s%s'%( sys.platform,suffix )
log.info( 'Doing setup for platform key: %s', key )
target_directory = os.path.join(
os.path.dirname( OpenGL.__file__ ),
'Tk',
'togl-%s'%( key, ),
)
log.info( 'Target directory: %s', target_directory )
if key not in urls:
log.error(
"""URL for platform key %s is not present, please update script""",
key,
)
sys.exit( 1 )
if os.path.exists( target_directory ):
return False
url = urls[key]
log.info( 'Downloading: %s', url )
filename,headers = urllib.urlretrieve( url )
log.info( 'Downloaded to: %s', filename )
if not os.path.isdir( target_directory ):
log.warn( 'Creating directory: %s', target_directory )
try:
os.makedirs( target_directory )
except OSError, err:
log.error( "Unable to create directory: %s", target_directory )
sys.exit( 2 )
if '.tar.gz' in url:
log.info( 'Opening TarFile' )
fh = tarfile.open( filename, 'r:gz')
def getnames():
return fh.getnames()
def getfile( name ):
return fh.extractfile( name )
elif '.zip' in url:
log.info( 'Opening ZipFile' )
fh = zipfile.ZipFile( filename )
def getnames():
return fh.namelist()
def getfile( name ):
return fh.open( name )
try:
for name in getnames():
log.debug( 'Found file: %s', name )
if fnmatch.fnmatch( name, WANTED_FILES ):
if not name.endswith( '/' ):
log.info( 'Found wanted file: %s', name )
source = getfile( name )
try:
new = os.path.join(
target_directory,
os.path.basename( name ),
)
log.info( 'Writing file: %s', new )
open( new,'wb' ).write( source.read() )
finally:
if hasattr( source, 'close' ):
source.close()
finally:
fh.close()
if filename != url:
os.remove( filename )
return True
| 8,602
|
def update_gateway_information(GatewayARN=None, GatewayName=None, GatewayTimezone=None):
"""
Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.
See also: AWS API Documentation
Examples
Updates a gateway's metadata, which includes the gateway's name and time zone.
Expected Output:
:example: response = client.update_gateway_information(
GatewayARN='string',
GatewayName='string',
GatewayTimezone='string'
)
:type GatewayARN: string
:param GatewayARN: [REQUIRED]
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayName: string
:param GatewayName: The name you configured for your gateway.
:type GatewayTimezone: string
:param GatewayTimezone:
:rtype: dict
:return: {
'GatewayARN': 'string',
'GatewayName': 'string'
}
"""
pass
| 8,603
|
def categories_report(x):
"""Returns value counts report.
Parameters
----------
x: pd.Series
The series with the values
Returns
-------
string
The value counts report.
str1 = False 22 | True 20 | nan 34
str2 = False (22) | True (20) | nan (34)
"""
# Do counting and sorting
counts = x.value_counts(dropna=False)
counts.index = counts.index.map(str)
counts = counts.sort_index()
# Create different strings
str1 = ' | '.join(str(counts).split("\n")[:-1])
str2 = ' | '.join("%s (%s)" % (i, counts[i]) for i in counts.index)
# Return
return str2
| 8,604
|
def train(total_loss, global_step, train_num_examples):
"""Train model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
# num_batches_per_epoch = train_num_examples / FLAGS.batch_size
# decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
decay_steps = DECAY_STEPS
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
| 8,605
|
def config_module_add():
"""Add module for configuration
Add an available module to the config file. POST json object structure:
{"action": "add", "value": {
"module": "modulename",
"moduleprop": ...
}}
On success, an object with this structure is returned:
{"_meta": {"id": <new ID>}, "module": "modulename"}
Otherwise an error message and code.
"""
config = read_config()
action = request.json["action"]
if action == "add":
if "value" not in request.json:
return _ret_invalid_request("value")
if "module" not in request.json["value"]:
return _ret_invalid_request("value/module")
moduletype = request.json["value"]["module"]
if moduletype not in _get_available_modules():
return _ret_unknown_module(moduletype)
newid = max([x["_meta"]["id"] for x in config["modules"]]) + 1
newmodule = request.json["value"]
if "_meta" not in newmodule:
newmodule["_meta"] = {}
if "_order" not in newmodule["_meta"]:
newmodule["_meta"]["order"] = 0
newmodule["_meta"]["id"] = newid
config["modules"].append(newmodule)
write_config(config)
ret = {"_meta": {"id": newid}, "module": moduletype}
return jsonify(ret)
else:
return _ret_unknown_action(action)
| 8,606
|
def command_add(fname, ctype, **kwa):
"""returns (str) command to add consatants from file to the DB,
ex.: cdb add -e testexper -d testdet_1234 -c test_ctype -r 123 -f cm-confpars.txt -i txt -l DEBUG
"""
exp = kwa.get('experiment', None)
det = kwa.get('detname', None)
runnum = kwa.get('runnum', None)
timestamp = kwa.get('timestamp', None)
time_sec = kwa.get('time_sec', None)
version = kwa.get('version', None)
dtype = kwa.get('dtype', None)
comment = kwa.get('coment', None)
loglev = kwa.get('loglev', None)
confirm = kwa.get('cdbadd', True)
cmd = 'cdb add'
if exp is not None: cmd += ' -e %s' % exp
if det is not None: cmd += ' -d %s' % det
if ctype is not None: cmd += ' -c %s' % ctype.ljust(12)
if dtype is not None: cmd += ' -i %s' % dtype
if runnum is not None: cmd += ' -r %s' % str(runnum)
if timestamp is not None: cmd += ' -t %s' % timestamp
if fname is not None: cmd += ' -f %s' % fname
if loglev is not None: cmd += ' -l %s' % loglev
if version is not None: cmd += ' -v %s' % version
if comment is not None: cmd += ' -m %s' % comment
if time_sec is not None: cmd += ' -s %s' % str(time_sec)
if confirm: cmd += ' -C'
logger.debug('command: %s' % cmd)
return cmd
| 8,607
|
def ustobj2songobj(
ust: up.ust.Ust, d_table: dict, key_of_the_note: int = None) -> up.hts.Song:
"""
Ustオブジェクトをノートごとに処理して、HTS用に変換する。
日本語歌詞を想定するため、音節数は1とする。促音に注意。
ust: Ustオブジェクト
d_table: 日本語→ローマ字変換テーブル
key_of_the_note:
曲のキーだが、USTからは判定できない。
Sinsyでは 0 ~ 11 または 'xx' である。
"""
song = up.hts.Song()
ust_notes = ust.notes
# Noteオブジェクトの種類を変換
for ust_note in ust_notes:
hts_note = ustnote2htsnote(ust_note, d_table, key_of_the_note=key_of_the_note)
song.append(hts_note)
# ノート長や位置などを自動補完
song.autofill()
# 発声開始時刻と終了時刻をノート長に応じて設定
song.reset_time()
return song
| 8,608
|
def detect_ol(table):
"""Detect ordered list"""
if not len(table):
return False
for tr in table:
if len(tr)!=2:
return False
td1 = tr[0]
# Only keep plausible ordered lists
if td1.text is None:
return False
text = td1.text.strip()
if not text or len(text)>3:
return False
if text[-1] not in ('.', ')'):
return False
if not text[:-1].isalpha() and not text[:-1].isdigit():
return False
if len(td1):
return False
return True
| 8,609
|
def read_dataset_test(data_dir, transforms=None):
"""
Read the Mini-ImageNet dataset.
Args:
data_dir: directory containing Mini-ImageNet.
Returns:
A tuple (train, val, test) of sequences of
ImageNetClass instances.
"""
return tuple([_read_classes(os.path.join(data_dir, 'test'), transforms)])
| 8,610
|
def analyze_syntax(text):
"""Use the NL API to analyze the given text string, and returns the
response from the API. Requests an encodingType that matches
the encoding used natively by Python. Raises an
errors.HTTPError if there is a connection problem.
"""
credentials = GoogleCredentials.get_application_default()
scoped_credentials = credentials.create_scoped(
['https://www.googleapis.com/auth/cloud-platform'])
http = httplib2.Http()
scoped_credentials.authorize(http)
service = discovery.build(
'language', 'v1beta1', http=http)
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': get_native_encoding_type(),
}
request = service.documents().annotateText(body=body)
return request.execute()
| 8,611
|
def _parseArgs(args: List[str]) -> Arguments:
"""
Parse the arguments. Terminates the script if errors are found.
"""
argLen = len(args)
# Initialize the argument values
inputPath: str = None
sheetName: Optional[str] = None
outputPath: Optional[str] = None
# Check if the input path was specified
if argLen < 2:
raise ArgsError('The input file was not specified.')
# Check if the input file exists
if not os.path.exists(args[1]):
raise ArgsError(f'The file "{args[1]}" does not exist.')
inputPath = args[1]
argIdx = 2
# Check each optional argument
while argIdx < argLen:
# Check the sheet argument
if args[argIdx] in ('-s', '--sheet'):
if argIdx + 1 == argLen:
raise ArgsError('Sheet name was not specified.')
sheetName = args[argIdx + 1]
argIdx += 2
# Check the outputPath argument
elif args[argIdx] in ('-o', '--output'):
if argIdx + 1 == argLen:
raise ArgsError('Output path was not specified.')
outputPath = args[argIdx + 1]
argIdx += 2
# If the argument is unrecognized
else:
raise ArgsError(f'The argument "{args[2]}" is unrecognized.')
return Arguments(inputPath,
sheetName,
outputPath)
| 8,612
|
def test_repr_linear(model_class, model_class_repr):
"""Check __repr__ method of LinearPerSegmentModel and LinearMultiSegmentModel."""
kwargs = {"copy_X": True, "positive": True}
kwargs_repr = "copy_X = True, positive = True"
model = model_class(fit_intercept=True, normalize=False, **kwargs)
model_repr = model.__repr__()
true_repr = f"{model_class_repr}(fit_intercept = True, normalize = False, {kwargs_repr}, )"
assert model_repr == true_repr
| 8,613
|
def kabsch_numpy(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
# center X and Y to the origin
X_ = X - X.mean(axis=-1, keepdims=True)
Y_ = Y - Y.mean(axis=-1, keepdims=True)
# calculate convariance matrix (for each prot in the batch)
C = np.dot(X_, Y_.transpose())
# Optimal rotation matrix via SVD
V, S, W = np.linalg.svd(C)
# determinant sign for direction correction
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = np.dot(V, W)
# calculate rotations
X_ = np.dot(X_.T, U).T
# return centered and aligned
return X_, Y_
| 8,614
|
def save_trade_history_hdf(df, market, update):
"""
Saves a dataframe of the trade history for a market.
"""
filename = TRADE_DATA_DIR + market + '.hdf5'
if update:
df.to_hdf(filename, 'data', mode='a', complib='blosc', complevel=9, format='table', append=True)
else:
df.to_hdf(filename, 'data', mode='w', complib='blosc', complevel=9, format='table')
| 8,615
|
def _decode(value):
"""
Base64 解码,补齐"="
记得去错多余的“=”,垃圾Docker,签发的时候会去掉
:param value:
:return:
"""
length = len(value) % 4
if length in (2, 3,):
value += (4 - length) * "="
elif length != 0:
raise ValueError("Invalid base64 string")
if not isinstance(value, six.binary_type):
value = value.encode()
return base64.urlsafe_b64decode(value)
| 8,616
|
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list from a comma-separated header string.
"""
if not fieldvalue:
return []
result = []
for element in RE_HEADER_SPLIT.split(fieldvalue):
if fieldname.startswith('Accept') or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
return list(reversed(sorted(result)))
| 8,617
|
def p_expression_logical_op(p):
"""Parser
expression : expression AND expression
| expression OR expression
"""
result, arg1, op, arg2 = p
if op == '&' or op == ',':
result = lambda: arg1() and arg2()
elif op == '|':
result = lambda: arg1() or arg2()
p[0] = SubexpressionWrapper(result)
| 8,618
|
def line2dict(st):
"""Convert a line of key=value pairs to a
dictionary.
:param st:
:returns: a dictionary
:rtype:
"""
elems = st.split(',')
dd = {}
for elem in elems:
elem = elem.split('=')
key, val = elem
try:
int_val = int(val)
dd[key] = int_val
except ValueError:
dd[key] = val
return dd
| 8,619
|
def transform(walls, spaces):
"""svg coords are in centimeters from the (left, top) corner,
while we want metres from the (left, bottom) corner"""
joint = np.concatenate([np.concatenate(walls), np.concatenate(spaces)])
(left, _), (_, bot) = joint.min(0), joint.max(0)
def tr(ps):
x, y = ps[..., 0], ps[..., 1]
return np.stack([x - left, bot - y], -1)/SCALE + MARGIN
return tr(walls), [tr(s) for s in spaces]
| 8,620
|
def test_irls_init():
"""
test that all distributions have irls initalizers
"""
dists = [Gamma, Gaussian, InverseGaussian, Binomial, NegativeBinomial,
Poisson]
for dist in dists:
assert hasattr(dist, 'irls_init')
| 8,621
|
def get_json(partition: str,
start: float = 0.0,
end: float = 1.0,
return_data: bool = False) -> Union[Path, Dict[Any, Any]]:
"""path, gender, age, result
result=-1 for test set
Example
-------
```
JSON_TRAIN = get_json('train', start=0.0, end=0.8)
JSON_VALID = get_json('train', start=0.8, end=1.0)
```
"""
wav = WAV_FILES[partition]
wav_meta = WAV_META[partition]
# === 1. prepare meta
meta = defaultdict(dict)
for k, tab in META_DATA[partition].items():
tab: pd.DataFrame
for _, row in tab.iterrows():
meta[row['uuid']].update({
i: eval(j) if isinstance(j, string_types) and '[' in j else j
for i, j in row.items()})
# === 2. load wav
data = []
for f in sorted(wav):
name = os.path.basename(f)
uuid = name.replace('.wav', '')
row: dict = meta[uuid]
dur, sr = wav_meta[f]
row['duration'] = dur
row['sr'] = sr
data.append((uuid, dict(path=f, meta=row)))
# === 3. shuffle and split
rand = np.random.RandomState(seed=DATA_SEED)
rand.shuffle(data)
n = len(data)
start = int(n * start)
end = int(n * end)
data = data[start:end]
data = dict(data)
if return_data:
return data
# === 4. save to JSON
path = os.path.join(CACHE_PATH,
f'{partition}_{start:g}_{end:g}_{DATA_SEED:d}.json')
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f)
return Path(path)
| 8,622
|
def isolate(result_file, isolate_file, mode, variables, out_dir, error):
"""Main function to isolate a target with its dependencies.
Arguments:
- result_file: File to load or save state from.
- isolate_file: File to load data from. Can be None if result_file contains
the necessary information.
- mode: Action to do. See file level docstring.
- variables: Variables to process, if necessary.
- out_dir: Output directory where the result is stored. It's use depends on
|mode|.
Some arguments are optional, dependending on |mode|. See the corresponding
MODE<mode> function for the exact behavior.
"""
# First, load the previous stuff if it was present. Namely, "foo.result" and
# "foo.state".
complete_state = CompleteState.load_files(result_file, out_dir)
isolate_file = isolate_file or complete_state.saved_state.isolate_file
if not isolate_file:
error('A .isolate file is required.')
if (complete_state.saved_state.isolate_file and
isolate_file != complete_state.saved_state.isolate_file):
error(
'%s and %s do not match.' % (
isolate_file, complete_state.saved_state.isolate_file))
try:
# Then process options and expands directories.
complete_state.load_isolate(isolate_file, variables, error)
# Regenerate complete_state.result.files.
complete_state.process_inputs(LEVELS[mode])
# Finally run the mode-specific code.
result = VALID_MODES[mode](out_dir, complete_state)
except run_test_from_archive.MappingError, e:
error(str(e))
# Then store the result and state.
complete_state.save_files()
return result
| 8,623
|
def check_structure(struct):
"""
Return True if the monophyly structure represented by struct is
considered "meaningful", i.e. encodes something other than an
unstructured polytomy.
"""
# First, transform e.g. [['foo'], [['bar']], [[[['baz']]]]], into simply
# ['foo','bar','baz'].
def denester(l):
if type(l) != list:
return l
if len(l) == 1:
return denester(l[0])
return [denester(x) for x in l]
struct = denester(struct)
# Now check for internal structure
if not any([type(x) == list for x in struct]):
# Struct is just a list of language names, with no internal structure
return False
return True
| 8,624
|
def batchify_with_label(input_batch_list, gpu, volatile_flag=False):
"""
input: list of words, chars and labels, various length. [[words,biwords,chars,gaz, labels],[words,biwords,chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
char_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
character_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, max_sent_len)
mask: (batch_size, max_sent_len)
"""
batch_size = len(input_batch_list)
chars = [sent[0] for sent in input_batch_list]
bichars = [sent[1] for sent in input_batch_list]
gazs = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
char_seq_lengths = torch.LongTensor(list(map(len, chars)))
max_seq_len = char_seq_lengths.max().item()
with torch.no_grad():
# torch.zeros(*sizes, out=None) → Tensor
# 返回一个全为标量 0 的张量,形状由可变参数sizes 定义
# sizes (int...) – 整数序列,定义了输出形状
# out(Tensor, optional) – 结果张量
char_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long()
bichar_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long()
label_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long()
mask = autograd.Variable(torch.zeros((batch_size, max_seq_len))).byte()
for idx, (seq, biseq, label, seqlen) in enumerate(zip(chars, bichars, labels, char_seq_lengths)):
# torch.Tensor是一种包含单一数据类型元素的多维矩阵
# 64-bit integer (signed) torch.LongTensor torch.cuda.LongTensor
char_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
bichar_seq_tensor[idx, :seqlen] = torch.LongTensor(biseq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor([1] * seqlen.item())
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
bichar_seq_tensor = bichar_seq_tensor[char_perm_idx]
label_seq_tensor = label_seq_tensor[char_perm_idx]
mask = mask[char_perm_idx]
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
# keep the gaz_list in orignial order
gaz_list = [gazs[i] for i in char_perm_idx]
gaz_list.append(volatile_flag)
if gpu:
char_seq_tensor = char_seq_tensor.cuda()
bichar_seq_tensor = bichar_seq_tensor.cuda()
char_seq_lengths = char_seq_lengths.cuda()
char_seq_recover = char_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
mask = mask.cuda()
return gaz_list, char_seq_tensor, bichar_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
| 8,625
|
def symbolicMatrix(robot):
"""
Denavit - Hartenberg parameters for n - th rigid body
theta: rotation on «z» axis
d: translation on «z» axis
a: translation on «x» axis
alpha: rotation on «x» axis
"""
return np.array([[0, 0, 0, 0],
[robot.symbolicJointsPositions[0, 0], robot.symbolicLinksLengths[0], 0, np.pi / 2],
[robot.symbolicJointsPositions[1, 0], 0, robot.symbolicLinksLengths[1], 0],
[robot.symbolicJointsPositions[2, 0], 0, 0, np.pi / 2],
[robot.symbolicJointsPositions[3, 0], robot.symbolicLinksLengths[2], 0, 0]])
| 8,626
|
def magnitude_list(data: List) -> List:
"""
:param data:
:return:
"""
if data is None or len(data) == 0:
return []
if isinstance(data, str):
try:
data = json.loads(data)
except:
data = data
try:
input_data = np.array([i for i in data])
data = norm(input_data, axis=1).tolist()
except Exception as e:
print("Error in calculating magnigutude ----> ")
print("Data: ", data)
print("NP Array: ", input_data)
print(e)
raise Exception
return data
| 8,627
|
def generate_keypair(passphrase):
""" Create a pair of keys with the passphrase as part of the key names """
keypath = '/tmp/test_{}_key'.format(passphrase)
command = 'ssh-keygen -t rsa -b 4096 -C "{p}" -P "{p}" -f {k} -q'
command = command.format(p=passphrase,
k=keypath)
subprocess.check_call(command, shell=True)
return keypath, keypath + '.pub'
| 8,628
|
def main():
"""
Produces the figure and saves it as a PDF.
"""
plt.clf()
axes = setup_axes()
visuals.plot_output_4axes(axes,
"../../simulations/sudden_5Gyr_5e9Msun_schmidt", "crimson", "O")
visuals.plot_output_4axes(axes,
"../../simulations/sudden_5Gyr_5e9Msun_ts1p0_schmidt", "deepskyblue",
"O")
visuals.plot_output_4axes(axes, "../../simulations/sudden_5Gyr_5e9Msun",
"black", "O", second_linestyle = ':')
visuals.plot_track_points_intervals(axes[2],
vice.history("../../simulations/sudden_5Gyr_5e9Msun"))
visuals.sfr_ifr_legend(axes[0])
visuals.legend(axes[2], ["black", "crimson", "deepskyblue"],
[r"$\tau_*\propto M_\text{g}^0$ \qquad$\tau_\text{s}$ = 0",
r"$\tau_*\propto M_\text{g}^{-1/2}$\quad$\tau_\text{s}$ = 0",
r"$\tau_*\propto M_\text{g}^{-1/2}$\quad$\tau_\text{s}$ = 1 Gyr"])
plot_ifr(axes[0], "../../simulations/sudden_5Gyr_5e9Msun_schmidt",
"crimson")
plot_ifr(axes[0], "../../simulations/sudden_5Gyr_5e9Msun_ts1p0_schmidt",
"deepskyblue")
plot_ifr(axes[0], "../../simulations/sudden_5Gyr_5e9Msun",
"black")
plt.tight_layout()
visuals.yticklabel_formatter(axes[3])
plt.savefig(sys.argv[1])
plt.clf()
| 8,629
|
def lookup_axis1(x, indices, fill_value=0):
"""Return values of x at indices along axis 1,
returning fill_value for out-of-range indices.
"""
# Save shape of x and flatten
ind_shape = indices.shape
a, b = x.shape
x = tf.reshape(x, [-1])
legal_index = indices < b
# Convert indices to legal indices in flat array
indices = tf.clip_by_value(indices, 0., b - 1.)
indices = indices + b * tf.range(a, dtype=float_type())[:, o, o]
indices = tf.reshape(indices, shape=(-1,))
indices = tf.dtypes.cast(indices, dtype=int_type())
# Do indexing
result = tf.reshape(tf.gather(x,
indices),
shape=ind_shape)
# Replace illegal indices with fill_value, cast to float explicitly
return tf.cast(tf.where(legal_index,
result,
tf.zeros_like(result) + fill_value),
dtype=float_type())
| 8,630
|
async def test_full_flow(
hass: HomeAssistant,
hass_client_no_auth: Callable[[], Awaitable[TestClient]],
aioclient_mock: AiohttpClientMocker,
current_request_with_host: None,
mock_geocaching_config_flow: MagicMock,
mock_setup_entry: MagicMock,
) -> None:
"""Check full flow."""
assert await setup_geocaching_component(hass)
# Ensure integration is discovered when manual implementation is configured
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert "context" in flows[0]
assert flows[0]["context"]["source"] == SOURCE_INTEGRATION_DISCOVERY
assert flows[0]["context"]["unique_id"] == DEFAULT_DISCOVERY_UNIQUE_ID
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert "flow_id" in result
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": REDIRECT_URI,
},
)
assert result.get("type") == RESULT_TYPE_EXTERNAL_STEP
assert result.get("step_id") == "auth"
assert result.get("url") == (
f"{CURRENT_ENVIRONMENT_URLS['authorize_url']}?response_type=code&client_id={CLIENT_ID}"
f"&redirect_uri={REDIRECT_URI}"
f"&state={state}&scope=*"
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
CURRENT_ENVIRONMENT_URLS["token_url"],
json={
"access_token": "mock-access-token",
"token_type": "bearer",
"expires_in": 3599,
"refresh_token": "mock-refresh_token",
},
)
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup_entry.mock_calls) == 1
| 8,631
|
def delete_compute_job():
"""
Deletes the current compute job.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
"""
#since op-engine handles this, there is no need for this endpoint. Will just keep it here for backwards compat
return jsonify(""), 200
| 8,632
|
def default_init_weights(module, scale=1, nonlinearity="relu"):
"""
nonlinearity: leaky_relu
"""
for m in module.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_in", nonlinearity=nonlinearity)
m.weight *= scale
if m.bias is not None:
M.init.zeros_(m.bias)
else:
pass
| 8,633
|
def cli_modules_remove(
ctx: typer.Context,
module_name: str = typer.Argument(
...,
help="The module name to remove",
),
):
"""
Remove a module.
"""
p = Path("./modules") / Path(module_name)
if not p.is_dir():
typer.echo(
f"{ctx.obj['style']['error']} The module `{module_name}` doesn't exist !"
)
raise typer.Exit(code=1)
typer.echo(f"{ctx.obj['style']['info']} Uninstalling `{module_name}` ...")
shutil.rmtree(p, onerror=_on_rmtree_error)
typer.echo(
f"{ctx.obj['style']['success']} `{module_name}` has been correctly removed."
)
| 8,634
|
def load_dict(dict_path):
"""
Load a dict. The first column is the value and the second column is the key.
"""
result_dict = {}
for idx, line in enumerate(io.open(dict_path, "r", encoding='utf8')):
terms = line.strip("\n")
result_dict[idx] = terms
return result_dict
| 8,635
|
def project(x, n):
""" http://www.euclideanspace.com/maths/geometry/elements/plane/lineOnPlane/"""
l = np.linalg.norm(x)
a = normalize(x)
b = normalize(n)
axb = np.cross(a,b)
bxaxb = np.cross(b, axb)
return l * bxaxb
| 8,636
|
def topologicalSort(roots, getParents):
"""Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node
"""
results = []
visited = set()
# Use iterative version to avoid stack limits for large datasets
stack = [(node,0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0: # before recursing
if current not in visited:
visited.add(current)
stack.append((current,1))
stack.extend((parent,0) for parent in getParents(current))
else: # after recursing
assert current in visited
results.append(current)
return results
| 8,637
|
def resources(ctx, job, gpu):
"""Get experiment or experiment job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for getting experiment resources:
\b
```bash
$ polyaxon experiment -xp 19 resources
```
For GPU resources
\b
```bash
$ polyaxon experiment -xp 19 resources --gpu
```
Examples for getting experiment job resources:
\b
```bash
$ polyaxon experiment -xp 19 resources -j 1
```
For GPU resources
\b
```bash
$ polyaxon experiment -xp 19 resources -j 1 --gpu
```
"""
def get_experiment_resources():
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().experiment.resources(
user, project_name, _experiment, message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def get_experiment_job_resources():
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().experiment_job.resources(user,
project_name,
_experiment,
_job,
message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job_resources()
else:
get_experiment_resources()
| 8,638
|
def get_headers(metric_resource: MetricResource):
"""
Get the headers to be used in the REST query for the given metric.
"""
headers = {}
# no headers will be used
if metric_resource.spec.headerTemplates is None:
return headers, None
# initialize headers dictionary
for item in metric_resource.spec.headerTemplates:
headers[item.name] = item.value
# if authType is None, interpolation is not attempted
if metric_resource.spec.authType is None:
return headers, None
# if authType is Basic, interpolation is not attempted
if metric_resource.spec.authType == AuthType.BASIC:
return headers, None
# if there is no secret referenced, interpolation is not attempted
if metric_resource.spec.secret is None:
return headers, None
# args contain decoded secret data for header template interpolation
args, err = get_secret_data_for_metric(metric_resource)
if err is None:
for key in headers:
headers[key], err = interpolate(headers[key], args)
if err is not None:
return None, err
return headers, None
return None, err
| 8,639
|
def visualize(images: dict, images_originals: dict, max_images: int = 5):
"""
Generate the bokeh plot of the input batch transformation
:param images: list of transformed items (dict of image and other objects)
:param images_originals: list of original items (dict of image and other objects)
:param max_images: max number of tabs to be shown
"""
tabs = []
image_labels, heatmap_labels, mask_types, points_types, other_types = _get_image_types(images[0])
# loop through the input elements to create the tabs
for index, (data, data_original) in enumerate(zip(images, images_originals)):
# Restart palette of colors to have the same colors in each image
if index == max_images:
break
_restart_color_palette()
p = generate_item_tab(data=data, data_original=data_original, heatmap_labels=heatmap_labels,
mask_types=mask_types, points_types=points_types, other_types=other_types)
title = 'image ' + str(index)
tab = Panel(child=p, title=title)
tabs.append(tab)
# Generate output document
layout = Tabs(tabs=tabs)
title = generate_title_template()
layout = column(title, layout)
curdoc().title = "Batch visualization"
curdoc().add_root(layout)
# Run bokeh server to show the visualization window
if not os.environ.get('READTHEDOCS'):
command = 'bokeh serve --show ' + sys.argv[0]
os.system(command)
| 8,640
|
def indra_upstream_ora(
client: Neo4jClient, gene_ids: Iterable[str], **kwargs
) -> pd.DataFrame:
"""
Calculate a p-value for each entity in the INDRA database
based on the set of genes that it regulates and how
they compare to the query gene set.
"""
count = count_human_genes(client=client)
return _do_ora(
get_entity_to_targets(client=client), gene_ids=gene_ids, count=count, **kwargs
)
| 8,641
|
def log(*args, host="127.0.0.1", port=3001, surround=3, **kwargs) -> bool:
"""
Create `Log` object and send to codeCTRL server in cbor format.
The codectrl.log function collects and formats information about
the file/function/line of code it got called on and sends it to
the codeCTRL server, if available.
Usage:
The function takes any number of positional
or keyword arguments of all types.
All positional arguments get included in the log `message`
using str() or json.dumps(obj, indent=4) in case of dicts.
Keyword arguments, other than `reserved` ones, get appended
to the logs as {key}={value}
Reserved arguments:
* host:
By default set to `127.0.0.1`, this argument
holds the address of the codeCTRL server.
* port:
By default set to `30001`, this is the port
the codeCTRL server should be contacted at.
* surround:
By default `3`, this argument specifies the
number of lines of code that should be displayed
around the call to `codectrl.log`.
"""
# This makes it easier for users of the library
# to debug errors they caused.
assert isinstance(host, str), "host variable has to be a string"
assert isinstance(port, int), "port variable has to be an integer"
assert isinstance(surround, int), "surround variable has to be an integer"
# Try connect to the server.
try:
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((host, port))
except Exception as err: # pylint: disable=broad-except # Could be many things.
print(f"[codeCTRL] Could not reach codeCTRL server. {err}", file=sys.stderr)
return False
# Collect logging data
log_obj: Log = Log(surround, *args, **kwargs)
# Send logging data to server
soc.send(log_obj.cbor())
# s.send(b'\0')
# close socket
soc.close()
return True
| 8,642
|
def _take_photo(gopro_instance, interval_secs = 600):
""" Take a photo, this function still in dev """
try:
img = gopro_instance.take_photo();
return img
except TypeError:
tl.send_alert()
return False
except:
tl.send_alert( message = \
'🆘*E️rror desconocido*, se requiere soporte técnico urgente!' )
return False
#time.sleep(interval_secs)
#time_lapse(gopro_instance, interval_secs)
| 8,643
|
def evaluate(
forecaster, cv, y, X=None, strategy="refit", scoring=None, return_data=False
):
"""Evaluate forecaster using cross-validation
Parameters
----------
forecaster : sktime.forecaster
Any forecaster
cv : sktime.SlidingWindowSplitter or sktime.ExpandingWindowSplitter
Splitter of how to split the data into test data and train data
y : pd.Series
Target time series to which to fit the forecaster.
X : pd.DataFrame, optional (default=None)
Exogenous variables
strategy : str, optional
Must be "refit" or "update", by default "refit". The strategy defines
whether forecaster is only fitted on the first train window data and
then updated or always refitted.
scoring : object of class MetricFunctionWrapper from
sktime.performance_metrics, optional. Example scoring=sMAPE().
Used to get a score function that takes y_pred and y_test as arguments,
by default None (if None, uses sMAPE)
return_data : bool, optional
Returns three additional columns in the DataFrame, by default False.
The cells of the columns contain each a pd.Series for y_train,
y_pred, y_test.
Returns
-------
pd.DataFrame
DataFrame that contains several columns with information regarding each
refit/update and prediction of the forecaster.
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.performance_metrics.forecasting import evaluate
>>> from sktime.forecasting.model_selection import ExpandingWindowSplitter
>>> from sktime.forecasting.naive import NaiveForecaster
>>> y = load_airline()
>>> forecaster = NaiveForecaster(strategy="drift", sp=12)
>>> cv = ExpandingWindowSplitter(
initial_window=24,
step_length=12,
fh=[1,2,3,4,5,6,7,8,9,10,11,12]
)
>>> evaluate(forecaster=forecaster, y=y, cv=cv)
"""
cv = check_cv(cv)
y = check_y(y)
_check_strategies(strategy)
scoring = check_scoring(scoring)
results = pd.DataFrame()
cv.start_with_window = True
for i, (train, test) in enumerate(cv.split(y)):
# get initial window, if required
if i == 0 and cv.initial_window and strategy == "update":
train, test = cv.split_initial(y)
# this might have to be directly handled in split_initial()
test = test[: len(cv.fh)]
# create train/test data
y_train = y.iloc[train]
y_test = y.iloc[test]
X_train = X.iloc[train] if X else None
X_test = X.iloc[test] if X else None
# fit/update
start_fit = time.time()
if strategy == "refit" or i == 0:
forecaster.fit(
y=y_train,
X=X_train,
fh=ForecastingHorizon(y_test.index, is_relative=False),
)
else:
# strategy == "update" and i != 0:
forecaster.update(y=y_train, X=X_train)
fit_time = time.time() - start_fit
# predict
start_pred = time.time()
y_pred = forecaster.predict(
fh=ForecastingHorizon(y_test.index, is_relative=False), X=X_test
)
pred_time = time.time() - start_pred
# save results
results = results.append(
{
"test_" + scoring.__class__.__name__: scoring(y_pred, y_test),
"fit_time": fit_time,
"pred_time": pred_time,
"len_train_window": len(y_train),
"cutoff": forecaster.cutoff,
"y_train": y_train if return_data else np.nan,
"y_test": y_test if return_data else np.nan,
"y_pred": y_pred if return_data else np.nan,
},
ignore_index=True,
)
# post-processing of results
if not return_data:
results = results.drop(columns=["y_train", "y_test", "y_pred"])
results["len_train_window"] = results["len_train_window"].astype(int)
return results
| 8,644
|
def euler(step, y0):
"""
Implements Euler's method for the differential equation dy/dx = 1/(2(y-1)) on the interval [0,4]
"""
x = [0]
index_x = 0
while x[index_x] < 4:
x.append(x[index_x] + step)
index_x += 1
index_y = 0
y = [y0]
def yprime(y):
yprime = 1 / (2 * (y - 1))
return yprime
while index_y < index_x:
y.append(y[index_y] + step * yprime(y[index_y]))
index_y += 1
return x, y
| 8,645
|
def download_wikipedia_pageids(config, out_file):
"""download and save a summary of all wikipedia pages in a category
:param config: argparse object.
Must have config.wikipedia_category and config.wikipedia_sleep
:param out_file: string, file to save page ids into
"""
category = config.wikipedia_category
if type(category) is not str:
category = " ".join(category)
result = _download_category_list(category, config.wikipedia_sleep)
with gzip.open(out_file, "wt") as out:
out.write(_make_page_str(None))
for x in result:
out.write(_make_page_str(x))
| 8,646
|
def cd_chdir():
"""Sample pytest fixture. """
os.chdir(Path(__file__).parent)
| 8,647
|
def get_fileinfo(url: str, proxy: str = '', referer: str = '') -> (str, str, requests.Response):
"""
获取待下载的文件信息
Gets information about the file to be downloaded
:param url: 文件url
:param proxy: 代理
:param referer: 绕反爬
:return: 真实url,文件名,http头部信息 (headers中键值均为小写)
"""
import re
import os
proxies = {
'http': 'http://'+proxy,
'https': 'https://'+proxy
} if proxy else {}
if referer:
headers['referer'] = referer
try:
res = requests.head(url, headers=headers, proxies=proxies)
except Exception as e:
return '', repr(e), None
while res.status_code in [301, 302]:
url = {i[0]: i[1] for i in res.headers.lower_items()}['location']
res = requests.head(url, headers=headers, proxies=proxies)
res.headers = {i[0]: i[1] for i in res.headers.lower_items()}
if 'content-disposition' in res.headers:
try:
filename = re.findall('filename=(.*?);', res.headers['content-disposition'])[0]
except IndexError:
from urllib.parse import urlparse
filename = os.path.basename(urlparse(url).path.strip('/'))
else:
from urllib.parse import urlparse
filename = os.path.basename(urlparse(url).path.strip('/'))
return url, re.sub(r"^\W+|\W+$", "", filename), res
| 8,648
|
def generate_random_solution():
"""generate_random_solution() Generates a random solution of random characters from [ ,!,..A..Z..a..z...~]."""
global answer
#codes for chars [ ,!..A..Z..a..z..~]
chars = list(range(32,127))
solution = []
while len(solution) < len(answer): #generate random solutions to length of the true answer
solution.append(random.choice(chars))
return solution
| 8,649
|
def entrypoint_module(config):
"""Lazily returns the entrypoint module defined in a qaboard config"""
import importlib.util
entrypoint = config.get('project', {}).get('entrypoint')
if not entrypoint:
click.secho(f'ERROR: Could not find the entrypoint', fg='red', err=True, bold=True)
click.secho(f'Add to qaboard.yaml:\n```\nproject:\n entrypoint: my_main.py\n```', fg='red', err=True, dim=True)
return FailingEntrypoint()
else:
entrypoint = Path(entrypoint)
try:
name = f'qaboard-entrypoint'
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
spec = importlib.util.spec_from_file_location(name, entrypoint)
module = importlib.util.module_from_spec(spec)
sys.path.insert(0, str(entrypoint.parent))
spec.loader.exec_module(module)
# sys.path.pop(0)
# spec = importlib.util.spec_from_loader(name, importlib.machinery.SourceFileLoader(name, str(entrypoint)))
# spec.submodule_search_locations = [str(entrypoint.parent)]
# with cached versions of the entrypoint.... An option could be importlib.reload(module)
# FIXME: at some points I had issues with sys.path, but no more (?)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
click.secho(f'ERROR: Error importing the entrypoint ({entrypoint}).', fg='red', err=True, bold=True)
click.secho(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)), fg='red', err=True)
click.secho(
f'{entrypoint} must implement a `run(context)` function, and optionnally `postprocess` / `metadata` / `iter_inputs`.\n'
'Please read the tutorial at https://samsung.github.com/qaboard/docs\n',
dim=True, err=True)
return FailingEntrypoint()
return module
| 8,650
|
def getTV_Info():
"""
获取TeamViewer的账号和密码信息
使用 Spy++ 读取特定程序中子窗口及各个控件类的信息,
然后 使用 win32 api 读取文本框中的内容
注意:
# FindWindowEx() 只能查找直接子窗口,因此需要逐级查找
# 该函数的第二个参数用于表示在哪个子窗口继续查找,用于查找包含两个相同类名的子窗口
参考:
https://github.com/wuxc/pywin32doc/blob/master/md/win32gui.md#win32guifindwindowex
"""
# 获取指定 Handle
id_hwnd, pwd_hwnd = get_Hwnd()
ID = get_Text(id_hwnd)
# 如果数据还未生成,则重新读取
while len(ID) < 6:
# 保证Teamviewer 本身是正常运行
id_hwnd, pwd_hwnd = get_Hwnd()
ID = get_Text(id_hwnd)
Password = get_Text(pwd_hwnd)
print("ID:",ID, "Password:",Password)
return ID, Password
| 8,651
|
def tokenize_words(text):
"""Word segmentation"""
output = []
sentences = split_2_short_text(text, include_symbol=True)
for sentence, idx in sentences:
if is_chinese_string(sentence):
import jieba
output.extend(jieba.lcut(sentence))
else:
output.extend(whitespace_tokenize(sentence))
return output
| 8,652
|
def p_expr(p):
"""
expr : comparer
"""
p[0] = p[1]
| 8,653
|
def segmentation_model_func(output_channels, backbone_name, backbone_trainable=True):
""" Creates a segmentation model with the tf.keras functional api.
Args:
output_channels: number of output_channels (classes)
backbone_name: name of backbone; either: 'vgg19', 'resnet50', 'resnet50v2', 'mobilenetv2', 'resnet101'
Returns:
tf.keras functional model
"""
down_stack = create_backbone(name=backbone_name, set_trainable=backbone_trainable)
skips = [down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][0]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][1]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][2]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][3]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][4]).output]
up_stack_filters = [64, 128, 256, 512]
x = skips[-1]
skips = reversed(skips[:-1])
up_stack_filters = reversed(up_stack_filters)
# Upsampling and establishing the skip connections
for skip, filters in zip(skips, up_stack_filters):
x = simple_upblock(x, filters, 3, 'up_stack' + str(filters))
x = tf.keras.layers.Concatenate()([x, skip])
# x = simple_upblock_func(x, 32, 3, 'up_stack' + str(32))
x = tf.keras.layers.UpSampling2D(2)(x)
x = tf.keras.layers.Conv2D(32, 3, activation='relu', padding='same')(x)
x = tf.keras.layers.Conv2D(output_channels, 1, activation='softmax', padding='same', name='final_output')(x)
return tf.keras.Model(inputs=down_stack.layers[0].input, outputs=x)
| 8,654
|
def EventAddKwargs(builder, kwargs):
"""This method is deprecated. Please switch to AddKwargs."""
return AddKwargs(builder, kwargs)
| 8,655
|
def generate_sidecar(events, columns_selected):
""" Generate a JSON sidecar template from a BIDS-style events file.
Args:
events (EventInput): An events input object to generate sidecars from.
columns_selected (dict): A dictionary of columns selected.
Returns:
dict: A dictionary of results in standard format including either the generated sidecar string or errors.
"""
columns_info = BidsTsvSummary.get_columns_info(events.dataframe)
hed_dict = {}
for column_name, column_type in columns_selected.items():
if column_name not in columns_info:
continue
if column_type:
column_values = list(columns_info[column_name].keys())
else:
column_values = None
hed_dict[column_name] = generate_sidecar_entry(column_name, column_values=column_values)
display_name = events.name
file_name = generate_filename(display_name, name_suffix='_generated', extension='.json')
return {base_constants.COMMAND: base_constants.COMMAND_GENERATE_SIDECAR,
base_constants.COMMAND_TARGET: 'events',
'data': json.dumps(hed_dict, indent=4),
'output_display_name': file_name, 'msg_category': 'success',
'msg': 'JSON sidecar generation from event file complete'}
| 8,656
|
def start_rasa_server(run_event):
""" write endpoints file and start rasa server """
print('START RASA SERVER')
if os.getenv('RASA_ACTIONS_URL') and len(
os.getenv('RASA_ACTIONS_URL')) > 0:
# ensure rasa endpoints file matches RASA_ACTIONS_URL env var
endpoints_file = open(
os.path.join(
os.path.dirname(__file__),
'../rasa/endpoints.yml'),
"r")
endpoints = yaml.load(endpoints_file.read(), Loader=yaml.FullLoader)
endpoints['action_endpoint'] = {"url": os.getenv('RASA_ACTIONS_URL')}
# write updates
with open(os.path.join(os.path.dirname(__file__), '../rasa/endpoints.yml'), 'w') as outfile:
yaml.dump(endpoints, outfile, default_flow_style=False)
cmd = ['rasa', 'run', '--enable-api']
process2 = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
shell=False,
cwd=os.path.join(
os.path.dirname(__file__),
'../rasa'))
while run_event.is_set():
time.sleep(1)
process2.terminate()
process2.wait()
| 8,657
|
def eurosense_to_unified(eurosense: IO, unified: IO):
"""
Do the XML conversion from the Eurosense format to the Unified format. Note
that this only deals with XML and doesn't convert other things like synset
ids. For the full conversion pipeline see eurosense2unified in
`pipeline.py`.
"""
write_header(unified, "eurosense")
for sent_id, sent_elem in iter_sentences_eurosense(eurosense):
unified.write('<sentence id="{}">\n'.format(sent_id))
trie = pygtrie.StringTrie(separator=" ")
anns = sent_elem.xpath(".//annotation")
for ann in anns:
trie[ann.attrib["anchor"]] = (ann.text, ann.attrib["lemma"])
sent = sent_elem.xpath("text")[0].text
cursor = 0
while cursor < len(sent):
match_anchor, match_val = trie.longest_prefix(sent[cursor:])
if match_anchor:
sense_key, lemma = match_val
pos = WN_UNI_POS_MAP[sense_key[-1]]
unified.write(
'<instance lemma="{}" pos="{}" key="{}">{}</instance>\n'.format(
lemma, pos, sense_key, match_anchor
)
)
cursor += len(match_anchor) + 1
else:
end_pos = sent.find(" ", cursor)
if end_pos == -1:
break
unified.write("<wf>{}</wf>\n".format(escape(sent[cursor:end_pos])))
cursor = end_pos + 1
unified.write("</sentence>\n")
unified.write("</text>\n")
unified.write("</corpus>\n")
| 8,658
|
def _filter_artifacts(artifacts, relationships):
"""
Remove artifacts from the main list if they are a child package of another package.
Package A is a child of Package B if all of Package A's files are managed by Package B per its file manifest.
The most common examples are python packages that are installed via dpkg or rpms.
:param artifacts:
:param relationships:
:return:
"""
def filter_fn(artifact):
# some packages are owned by other packages (e.g. a python package that was installed
# from an RPM instead of with pip), filter out any packages that are not "root" packages.
if _filter_relationships(
relationships, child=dig(artifact, "id"), type="ownership-by-file-overlap"
):
return False
return True
return [a for a in artifacts if filter_fn(a)]
| 8,659
|
def get_augmented_image_palette(img, nclusters, angle):
"""
Return tuple of (Image, Palette) in LAB space
color shifted by the angle parameter
"""
lab = rgb2lab(img)
ch_a = lab[...,1]
ch_b = lab[...,2]
theta = np.deg2rad(angle)
rot = np.array([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]])
hue_rotate = lambda ab: np.dot(rot, [ab[0], ab[1]])
ab = np.asarray(list(map(hue_rotate, zip(ch_a, ch_b)))).transpose((0, 2, 1))
lab = np.dstack((lab[...,0], ab[...,0], ab[...,1]))
palette = kmeans_get_palette(lab, nclusters)
return (lab, palette)
| 8,660
|
def autodetect(uri: str, **kwargs) -> intake.source.DataSource:
"""
Autodetect intake source given URI.
Keyword arguments are passed to the source constructor.
If no other source is more suitable, it returns an instance of :class:`intake_io.source.ImageIOSource`, which uses
`imageio <https://github.com/imageio/imageio>`_.
This function doesn't check whether the data can actually be loaded.
:param uri:
URI (e.g. file system path or URL)
:param kwargs:
Arguments passed to the source constructor
:return:
Data source
"""
luri = uri.lower()
lext = os.path.splitext(luri)[-1]
if lext == ".nrrd":
return source.NrrdSource(uri, **kwargs)
elif lext in (".tif", ".tiff"):
return source.TifSource(uri, **kwargs)
elif luri.endswith(".nii.gz") or lext == ".nii":
return source.NiftiSource(uri, **kwargs)
elif lext in (".dicom", ".dcm"):
return source.DicomSource(uri, **kwargs)
elif luri.endswith(".dicom.zip") or luri.endswith(".dcm.zip"):
return source.DicomZipSource(uri, **kwargs)
elif lext == ".klb":
return source.KlbSource(uri, **kwargs)
elif luri.endswith(".ome.tif") or luri.endswith(".ome.tiff") \
or lext not in (".tif", ".tiff", ".png", ".jpg", ".gif", ".mp4"):
return source.BioformatsSource(uri, **kwargs)
else:
return source.ImageIOSource(uri, **kwargs)
| 8,661
|
def test_send_activation_token_to_user(default_settings, user):
""" Deliver a contact email. """
with current_app.test_request_context():
with mail.record_messages() as outbox:
send_activation_token(user)
assert len(outbox) == 1
assert "/auth/activate" in outbox[0].body # from /auth/activate/<token>
assert "/auth/activate" in outbox[0].html
| 8,662
|
def squeeze__default(ctx, g, self, dim=None):
"""Register default symbolic function for `squeeze`.
squeeze might be exported with IF node in ONNX, which is not supported in
lots of backend.
"""
if dim is None:
dims = []
for i, size in enumerate(self.type().sizes()):
if size == 1:
dims.append(i)
else:
dims = [sym_help._get_const(dim, 'i', 'dim')]
return g.op('Squeeze', self, axes_i=dims)
| 8,663
|
def _call_create_pref(a, t, e):
"""
Handler for pref() and user_pref() calls in defaults/preferences/*.js files
to ensure that they don't touch preferences outside of the "extensions."
branch.
"""
if not t.im_self.filename.startswith("defaults/preferences/") or len(a) == 0:
return
value = str(t(a[0]).get_literal_value())
from predefinedentities import BANNED_PREF_BRANCHES, BANNED_PREF_REGEXPS
for banned in BANNED_PREF_BRANCHES:
if value.startswith(banned):
return ("Extensions should not alter preferences in the '%s' "
"preference branch" % banned)
for banned in BANNED_PREF_REGEXPS:
if re.match(banned, value):
return ("Extensions should not alter preferences matching /%s/"
% banned)
if not value.startswith("extensions.") or value.rindex(".") < len("extensions."):
return ("Extensions should not alter preferences outside of the "
"'extensions.' preference branch. Please make sure that "
"all of your extension's preferences are prefixed with "
"'extensions.add-on-name.', where 'add-on-name' is a "
"distinct string unique to and indicative of your add-on.")
| 8,664
|
def pig_action_utility(state, action, utility):
"""The expected value of choosing action in state.Assumes opponent also plays with optimal strategy.
An action is one of ["roll", "hold", "accept", decline", "double"]
"""
if action == 'roll':
one = iter([1])
rest = iter([2, 3, 4, 5, 6])
return (-utility(do(action, state, one)) + sum(utility(do(action, state, rest)) for _ in range(5))) / 6.0
else:
return -utility(do(action, state, fair_die_rolls()))
| 8,665
|
def to_canonical_url(url):
"""
Converts a url into a "canonical" form, suitable for hashing. Keeps only scheme,
domain and path. Ignores url query, fragment, and all other parts of the url.
:param url: a string
:return: a string
"""
parsed_url = urlparse(url)
return urlunparse([
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
'',
'',
''
])
| 8,666
|
def find_and_get_references(arg: Any) -> tuple[OutputReference, ...]:
"""
Find and extract output references.
This function works on nested inputs. For example, lists or dictionaries
(or combinations of list and dictionaries) that contain output references.
Parameters
----------
arg
The argument to search for references.
Returns
-------
tuple[OutputReference]
The output references as a tuple.
"""
from pydash import get
from jobflow.utils.find import find_key_value
if isinstance(arg, OutputReference):
# if the argument is a reference then stop there
return tuple([arg])
elif isinstance(arg, (float, int, str, bool)):
# argument is a primitive, we won't find a reference here
return tuple()
arg = jsanitize(arg, strict=True, enum_values=True)
# recursively find any reference classes
locations = find_key_value(arg, "@class", "OutputReference")
# deserialize references and return
return tuple(OutputReference.from_dict(get(arg, loc)) for loc in locations)
| 8,667
|
def get_fdr_thresh(p_values, alpha=0.05):
"""
Calculate the false discovery rate (FDR) multiple comparisons correction threshold for a list of p-values.
:param p_values: list of p-values
:param alpha: the uncorrected significance level being used (default = 0.05)
:type p_values: numpy array
:type alpha: float
:returns: The FDR correction threshold
:rtype: float
"""
sn = np.sort(p_values)
sn = sn[np.isfinite(sn)]
for i in range(len(sn)):
p_crit = alpha * float(i+1) / float(len(sn))
if sn[i] <= p_crit:
continue
else:
break
return sn[i]
| 8,668
|
def ProcessHighlightColorsRandomOption():
"""Process highlight colors random option"""
OptionsInfo["HighlightColorsRandom"] = None
OptionsInfo["HighlightColorsRandomType"] = None
OptionsInfo["HighlightColorsRandomList"] = None
HighlightColors = "colorclass,table-primary,table-success,table-danger,table-info,table-warning,table-secondary"
if not re.match("^auto$", Options["--highlightColorsRandom"], re.I):
HighlightColors = Options["--highlightColorsRandom"].strip()
if MiscUtil.IsEmpty(HighlightColors):
MiscUtil.PrintError("The value specified using \"--highlightColorsRandom\" is empty.")
OptionsInfo["HighlightColorsRandom"] = re.sub(" ", "", HighlightColors)
HighlightColorsList = [Color.lower() for Color in OptionsInfo["HighlightColorsRandom"].split(",")]
if len(HighlightColorsList) <= 1:
MiscUtil.PrintError("The number of comma delimited paramater names and values, %d, specified using \"--highlightColorsRandom\" option must be > 1." % (len(HighlightColorsList)))
ColorsType = HighlightColorsList[0]
ColorsList = HighlightColorsList[1:]
if not re.match("^(colorclass|colorspec)$", ColorsType, re.I):
MiscUtil.PrintError("The color type, %s, specified using \"--highlightColorsRandim\" option is not valid. Supported values: colorclass or colorspec." % ColorsType)
if re.match("^colorclass$", ColorsType, re.I):
CheckOptionTableClassColorValues("--highlightColorsRandom", ColorsList)
OptionsInfo["HighlightColorsRandomList"] = ColorsList
OptionsInfo["HighlightColorsRandomType"] = ColorsType
| 8,669
|
def _rescue_filter(
flags: RescueRenderFlags, platform_filter: typing.Optional[Platforms], rescue: Rescue
) -> bool:
"""
determine whether the `rescue` object is one we care about
Args:
rescue:
Returns:
"""
filters = []
if flags.filter_unassigned_rescues:
# return whether any rats are assigned
# either properly or via unidentified rats
filters.append(not (bool(rescue.rats) or bool(rescue.unidentified_rats)))
# use the active bool on rescue if we don't want inactives, otherwise True
if flags.filter_active_rescues:
filters.append(rescue.active)
if flags.filter_inactive_rescues:
filters.append(not rescue.active)
if platform_filter: # if we are filtering on platform
filters.append(rescue.platform is platform_filter)
return not all(filters)
| 8,670
|
def penalty(precision, alpha, beta, psi):
"""Penalty for time-varying graphical lasso."""
if isinstance(alpha, np.ndarray):
obj = sum(a[0][0] * m for a, m in zip(alpha, map(l1_od_norm, precision)))
else:
obj = alpha * sum(map(l1_od_norm, precision))
if isinstance(beta, np.ndarray):
obj += sum(b[0][0] * m for b, m in zip(beta, map(psi, precision[1:] - precision[:-1])))
else:
obj += beta * psi(precision[1:] - precision[:-1])
return obj
| 8,671
|
def all_ctd_descriptors(
G: nx.Graph, aggregation_type: Optional[List[str]] = None
) -> nx.Graph:
"""
Calculate all CTD descriptors based seven different properties of AADs.
:param G: Protein Graph to featurise
:type G: nx.Graph
:param aggregation_type: Aggregation types to use over chains
:type aggregation_type: List[Optional[str]]
:return: Protein Graph with ctd_descriptors feature added.
G.graph["ctd_descriptors_{chain | aggregation_type}"]
:rtype: nx.Graph
"""
from propy.CTD import CalculateCTD
func = CalculateCTD
feature_name = "ctd_descriptors"
return compute_propy_feature(
G,
func=func,
feature_name=feature_name,
aggregation_type=aggregation_type,
)
| 8,672
|
def get_report_hash(report: Report, hash_type: HashType) -> str:
""" Get report hash for the given diagnostic. """
hash_content = None
if hash_type == HashType.CONTEXT_FREE:
hash_content = __get_report_hash_context_free(report)
elif hash_type == HashType.PATH_SENSITIVE:
hash_content = __get_report_hash_path_sensitive(report)
elif hash_type == HashType.DIAGNOSTIC_MESSAGE:
hash_content = __get_report_hash_diagnostic_message(report)
else:
raise Exception("Invalid report hash type: " + str(hash_type))
return __str_to_hash('|||'.join(hash_content))
| 8,673
|
def parse_json_file(json_file_path, allow_non_standard_comments=False):
"""
Parse a json file into a utf-8 encoded python dictionary
:param json_file_path: The json file to parse
:param allow_non_standard_comments: Allow non-standard comment ('#') tags in the file
:return: Dictionary representation of the json file
"""
def _decode_list(list_data):
rv = []
for item in list_data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(dict_data):
rv = {}
for key, value in dict_data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
try:
if allow_non_standard_comments:
# If we are reading non-standard json files where we are accepting '#' as comment tokens, then the
# file must have CR/LF characters and will be read in line by line.
with open(json_file_path) as json_file:
json_lines = json_file.readlines()
json_file_content = ""
for json_line in json_lines:
comment_index = json_line.find('#')
literal_pound_index = json_line.find('##')
if comment_index>=0 and comment_index != literal_pound_index:
processed_line = json_line.split('#')[0].strip()
else:
if literal_pound_index>=0:
processed_line = json_line.replace('##','#').strip()
else:
processed_line = json_line.strip()
json_file_content += processed_line
else:
with open(json_file_path) as json_file:
json_file_content = json_file.read()
json_file_data = json.loads(json_file_content, object_hook=_decode_dict)
return json_file_data
except Exception as e:
raise ValueError('Error reading {}: {}'.format(json_file_path, e.message))
| 8,674
|
def update_softwaretitle_packages(api, jssid, pkgs):
"""
Update packages of software title
:param jssid: Patch Software Title ID
:param pkgs: dict of {version: package, ...}
:returns: None
"""
logger = logging.getLogger(__name__)
data = api.get(f"patchsoftwaretitles/id/{jssid}")
title = data['patch_software_title']
title_name = title['name']
logger.info(f"updating patch software title: {title_name} ({jssid})")
# single version (dict), multiple versions (list)
version = title['versions']['version']
_modified = False
try:
# access key of single version and count on TypeError being raised
v = version['software_version']
if v in pkgs.keys():
version['package'] = {'name': pkgs[v]}
_modified = True
except TypeError:
# looks like it was actually a list
for _version in version:
v = _version['software_version']
if v in pkgs.keys():
_version['package'] = {'name': pkgs[v]}
_modified = True
if _modified:
result = api.put(f"patchsoftwaretitles/id/{jssid}", data)
logger.info(f"succesfully updated: {title_name}")
return result
else:
logger.info(f"software title was not modified")
| 8,675
|
def get_balances(session: Session, redis: Redis, user_ids: List[int]):
"""Gets user balances.
Returns mapping { user_id: balance }
Enqueues in Redis user balances requiring refresh.
"""
# Find user balances
query: List[UserBalance] = (
(session.query(UserBalance)).filter(UserBalance.user_id.in_(user_ids)).all()
)
# Construct result dict from query result
result = {
user_balance.user_id: {
"owner_wallet_balance": user_balance.balance,
"associated_wallets_balance": user_balance.associated_wallets_balance,
"associated_sol_wallets_balance": user_balance.associated_sol_wallets_balance,
"waudio_balance": user_balance.waudio,
"total_balance": str(
int(user_balance.balance)
+ int(user_balance.associated_wallets_balance)
+ int(user_balance.associated_sol_wallets_balance)
* 10 ** WAUDIO_DECIMALS
+ int(user_balance.waudio) * 10 ** WAUDIO_DECIMALS
),
}
for user_balance in query
}
# Find user_ids that don't yet have a balance
user_ids_set = set(user_ids)
fetched_user_ids_set = {x.user_id for x in query}
needs_balance_set = user_ids_set - fetched_user_ids_set
# Add new balances to result set
no_balance_dict = {
user_id: {
"owner_wallet_balance": "0",
"associated_wallets_balance": "0",
"associated_sol_wallets_balance": "0",
"total_balance": "0",
"waudio_balance": "0",
}
for user_id in needs_balance_set
}
result.update(no_balance_dict)
# Get old balances that need refresh
needs_refresh = [
user_balance.user_id
for user_balance in query
if does_user_balance_need_refresh(user_balance)
]
# Enqueue new balances to Redis refresh queue
# 1. All users who need a new balance
# 2. All users who need a balance refresh
enqueue_lazy_balance_refresh(redis, list(needs_balance_set) + needs_refresh)
return result
| 8,676
|
def set_pixel(pixel_num, brightness):
"""Set one pixel in both 16-pixel rings. Pass in pixel index (0 to 15)
and relative brightness (0.0 to 1.0). Actual resulting brightness
will be a function of global brightness and gamma correction."""
# Clamp passed brightness to 0.0-1.0 range,
# apply global brightness and gamma correction
brightness = max(min(brightness, 1.0), 0.0) * PIXEL_BRIGHTNESS
brightness = pow(brightness, PIXEL_GAMMA) * 255.0
# local_color is adjusted brightness applied to global PIXEL_COLOR
local_color = (
int(PIXEL_COLOR[0] * brightness + 0.5),
int(PIXEL_COLOR[1] * brightness + 0.5),
int(PIXEL_COLOR[2] * brightness + 0.5))
# Roll over pixel_num as needed to 0-15 range, then store color
pixel_num_wrapped = (pixel_num + RING_1_OFFSET) & 15
PIXELS[pixel_num_wrapped] = local_color
# Determine corresponding pixel for second ring. Mirror direction if
# configured for such, correct for any rotational difference, then
# perform similar roll-over as above before storing color.
if RING_2_FLIP:
pixel_num = 15 - pixel_num
pixel_num_wrapped = 16 + ((pixel_num + RING_2_OFFSET) & 15)
PIXELS[pixel_num_wrapped] = local_color
| 8,677
|
def kmeans(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 2)` or `shape (N, 3)`
An array of observations (x, y) to be clustered.
Data should be provided as:
`[(x, y), (x, y), (x, y), ...]`
or
`[(x, y, z), (x, y, z), (x, y, z), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 2)` or `shape (k, 3)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
>>> center, count = kmeans_2d(observations, k=2)
>>> center
[[-4, -4
5, -3]]
>>> count
[1, 4]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
if observations.shape[-1] == 3:
return kmeans_3d(observations, k)
elif observations.shape[-1] == 2:
return kmeans_2d(observations, k)
else:
pass
| 8,678
|
def encloses(coord, points):
""" """
sc = constants.CLIPPER_SCALE
coord = st(coord.to_list(), sc)
points = st(points, sc)
return pyclipper.PointInPolygon(coord, points) != 0
| 8,679
|
def cli_transpose_mat_vec(mat_shape, vec_shape, optimize, **kwargs):
"""``A -> A^T, A^Tx = b``."""
i, j = dimensions('i j')
A = Function(name='A', shape=mat_shape, dimensions=(i, j))
x = Function(name='x', shape=vec_shape, dimensions=(j,))
b = Function(name='b', shape=vec_shape, dimensions=(i,))
transpose_mat_vec(A, x, b, optimize)
| 8,680
|
def arpls(y, lam, ratio=1e-6, niter=1000, progressCallback=None):
"""
Return the baseline computed by asymmetric reweighted penalized least squares smoothing, arPLS.
Ref: Baseline correction using asymmetrically reweighted penalized least squares smoothing
Sung-June Baek, Aaron Park, Young-Jin Ahn and Jaebum Choo
Analyst, 2015, 140, 250-257. DOI: 10.1039/C4AN01061B
In this implementation, W is not squared so p carries the same meaning as in AsLS.
Parameters:
y: one spectrum to correct, or multiple as an array of shape (spectrum, wavenumber)
lam: lambda, the smoothness parameter
ratio: convergence criterion; target relative change in weights between iterations
niter: maximum number of iterations
progressCallback(int a, int b): callback function called to indicated that the processing
is complete to a fraction a/b.
Returns: baseline of the spectrum, measured at the same points
"""
L = y.shape[-1]
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
D = lam * D.dot(D.T)
def arpls_one(yy):
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
z = sparse.linalg.spsolve(W + D, w * yy)
d = yy - z
dn = d[d < 0]
s = dn.std()
wt = 1. / (1 + np.exp(2 / s * (d - (2*s-dn.mean()))))
if np.linalg.norm(w - wt) / np.linalg.norm(w) < ratio:
break
w = wt
return z
return mp_bgcorrection(arpls_one, y, progressCallback=progressCallback)
| 8,681
|
async def test_function_raised_exception(dut):
"""
Test that exceptions thrown by @function coroutines can be caught
"""
@cocotb.function
async def func():
raise ValueError()
@external
def ext():
return func()
with pytest.raises(ValueError):
await ext()
| 8,682
|
def dismiss_notification_mailbox(notification_mailbox_instance, username):
"""
Dismissed a Notification Mailbox entry
It deletes the Mailbox Entry for user
Args:
notification_mailbox_instance (NotificationMailBox): notification_mailbox_instance
username (string)
Return:
bool: Notification Mailbox Dismissed
"""
profile_instance = get_self(username)
NotificationMailBox.objects.filter(target_profile=profile_instance, pk=notification_mailbox_instance.id).delete()
return True
| 8,683
|
def knn(x, y, k, predict_x):
"""
knn算法实现,使用欧氏距离
:param x: 样本值
:param y: 标签
:param k: 个数
:return:
"""
assert isinstance(y, np.ndarray)
y = y.flatten('F')
def cal_distance(a, b):
return np.sqrt(np.sum(np.power(a - b, 2), axis=0))
dists = {
}
for (index, sample) in enumerate(x):
dists[index] = cal_distance(sample, predict_x)
k_sample = sorted(dists.items(), key=operator.itemgetter(1))[:k]
k_labels = y[[key for (key, value) in k_sample]]
counters = {
}
for k in k_labels:
if k not in counters.keys():
counters[k] = 1
else:
counters[k] += 1
return sorted(counters.items(), key=operator.itemgetter(1))[0]
| 8,684
|
def piano():
"""A piano instrument."""
return lynames.Instrument('Piano', abbr='Pno.', transposition=None,
keyboard=True, midi='acoustic grand',
family='percussion', mutopianame='Piano')
| 8,685
|
def extract_fields(obj: Any) -> Dict[str, Any]:
"""A recursive function that extracts all fields in a Django model, including related fields (e.g. many-to-many)
:param obj: A Django model
:return: A dictionary containing fields and associated values
"""
sub_content = {}
if obj is not None:
# Gets a list of any Django model fields
fields = type(obj)._meta.get_fields()
for field in fields:
if issubclass(field.__class__, ForeignKey):
sub_content[field.name] = extract_fields(getattr(obj, field.name))
elif issubclass(field.__class__, RelatedField):
sub_content[field.name] = [extract_fields(sub_obj) for sub_obj in list(getattr(obj, field.name).all())]
elif issubclass(field.__class__, Field):
sub_content[field.name] = getattr(obj, field.name)
return sub_content
| 8,686
|
def test_student_group(student_group):
""" Студенческая группа """
assert str(student_group) == 'Б-ИВТ-19-1'
assert student_group.get_education_year(4040) == 1
assert student_group.get_education_year(4041) == 2
| 8,687
|
def test_non_empty_group(test_file_name,compression_kwargs):
""" Test if attempting to dump to a group with data fails """
hickle.dump(None, test_file_name,**compression_kwargs)
with pytest.raises(ValueError):
dump(None, test_file_name, 'r+',**compression_kwargs)
| 8,688
|
def decomposeM(modified):
"""Auxiliary in provenance filtering: split an entry into name and date."""
splits = [m.rsplit(ON, 1) for m in modified]
return [(m[0], dtm(m[1].replace(BLANK, T))[1]) for m in splits]
| 8,689
|
def main():
"""
This function is where the program begins.
It is the general menu for the hotel.
"""
clear()
print("Welcome. This is Summer Square Hotel\n")
while True:
print("\nWhat may we offer you today?\n")
print("1. View our rooms")
print("2. Look at our prices")
print("3. Check out the discounts")
print("4. Login")
print("5. Register")
print("6. Exit")
choice = input("Option: ")
choice = int(choice)
if choice == 1:
clients.view_rooms()
elif choice == 2:
clients.check_prices()
elif choice == 3:
clients.check_discounts()
elif choice == 4:
login()
elif choice == 5:
register()
elif choice == 6:
exit()
else:
print("Wrong input\n")
| 8,690
|
def update(data):
"""
TODO:
find a way to call collection.findOneAndUpdate(), currently pymodm .update()
only returns the number of updated record.
"""
try:
required_fields = ['id']
validator.validate_required_fields(required_fields, data)
cleaned_data = user_prevalidation(data)
updated_data = {key: val for key, val in cleaned_data.items()
if val is not None}
db_id = updated_data.pop('id')
_get_user({'id': db_id}) # call to validate if user exist
user_entitymanager.update(db_id, updated_data)
user = _get_user({'id': db_id}) # call to get the updated data.
return flask_helper.ResponseHelper(user.to_dict(), http_status_code.OK)
except Exception as e:
logging.error(e)
raise
| 8,691
|
def get_2d_peaks_coords(
data: np.ndarray, size: int = None, threshold: float = 0.5
) -> np.ndarray:
"""Detect peaks in image data, return coordinates.
If neighborhoods size is None, default value is the highest value
between 50 pixels and the 1/40th of the smallest image dimension.
Detection threshold is relative to difference between data maximum and minimum.
"""
if size is None:
size = max(min(data.shape) // 40, 50)
data_max = spf.maximum_filter(data, size)
data_min = spf.minimum_filter(data, size)
data_diff = data_max - data_min
abs_threshold = (data_diff.max() - data_diff.min()) * threshold
diff = (data_max - data_min) > abs_threshold
maxima = data == data_max
maxima[diff == 0] = 0
labeled, _num_objects = spi.label(maxima)
slices = spi.find_objects(labeled)
coords = []
for dy, dx in slices:
x_center = int(0.5 * (dx.start + dx.stop - 1))
y_center = int(0.5 * (dy.start + dy.stop - 1))
coords.append((x_center, y_center))
if len(coords) > 1:
# Eventually removing duplicates
dist = distance_matrix(coords)
for index in reversed(np.unique(np.where((dist < size) & (dist > 0))[1])):
coords.pop(index)
return np.array(coords)
| 8,692
|
def border_positions_from_texts(texts, direction, only_attr=None):
"""
From a list of textboxes in <texts>, get the border positions for the respective direction.
For vertical direction, return the text boxes' top and bottom border positions.
For horizontal direction, return the text boxes' left and right border positions.
<direction> must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL from pdftabextract.common.
optional <only_attr> must be either 'low' (only return 'top' or 'left' borders) or 'high' (only return 'bottom' or
'right').
Border positions are returned as sorted NumPy array.
"""
if direction not in (DIRECTION_HORIZONTAL, DIRECTION_VERTICAL):
raise ValueError("direction must be DIRECTION_HORIZONTAL or DIRECTION_VERTICAL (see pdftabextract.common)")
if only_attr is not None and only_attr not in ('low', 'high'):
raise ValueError("only_attr must be either 'low' or 'high' if not set to None (default)")
if direction == DIRECTION_VERTICAL:
attr_lo = 'top'
attr_hi = 'bottom'
else:
attr_lo = 'left'
attr_hi = 'right'
positions = []
for t in texts:
if only_attr is None or only_attr == 'low':
positions.append(t[attr_lo])
if only_attr is None or only_attr == 'high':
positions.append(t[attr_hi])
return np.array(sorted(positions))
| 8,693
|
def progress(self):
"""Check if foo can send to corge"""
return True
| 8,694
|
def write_sample_sdf(input_file_name, valid_list):
"""
Function for writing a temporary file with a subset of pre-selected
structures
:param input_file_name: name of input file
:param valid_list: list of indexes of pre-selected structures
:return: name of subsampled file
"""
sample_file_name = '{}_sample.sdf'.format(input_file_name.split('.')[0])
sample_file = open(sample_file_name, 'w')
mol = []
i = 0
for line in open(input_file_name):
mol.append(line)
if line[:4] == '$$$$':
i += 1
if i in valid_list:
for mol_line in mol:
sample_file.write(mol_line)
valid_list.remove(i)
mol = []
else:
mol = []
sample_file.close()
return sample_file_name
| 8,695
|
def rotx(theta, unit="rad"):
"""
ROTX gives rotation about X axis
:param theta: angle for rotation matrix
:param unit: unit of input passed. 'rad' or 'deg'
:return: rotation matrix
rotx(THETA) is an SO(3) rotation matrix (3x3) representing a rotation
of THETA radians about the x-axis
rotx(THETA, "deg") as above but THETA is in degrees
"""
check_args.unit_check(unit)
if unit == "deg":
theta = theta * math.pi / 180
ct = math.cos(theta)
st = math.sin(theta)
mat = np.matrix([[1, 0, 0], [0, ct, -st], [0, st, ct]])
mat = np.asmatrix(mat.round(15))
return mat
| 8,696
|
def _table(*rows: Sequence) -> str:
"""
>>> _table(['a', 1, 'c', 1.23])
'|a|1|c|1.23|'
>>> _table(['foo', 0, None])
'|foo|||'
>>> print(_table(['multiple', 'rows', 0], ['each', 'a', 'list']))
|multiple|rows||
|each|a|list|
"""
return '\n'.join([
'|'.join(['', *[str(cell or '') for cell in row], '']) for row in rows
])
| 8,697
|
def test_workflow_preprocessors_not_list_error() -> None:
"""
preprocessors should be of type [`PluginFn`](../../plugin/test_plugins.html)
"""
with pytest.raises(TypeError):
_ = Workflow(preprocessors=10, postprocessors=[])
| 8,698
|
def trunc(s, n):
"""
Truncate a string to N characters, appending '...' if truncated.
trunc('1234567890', 10) -> '1234567890'
trunc('12345678901', 10) -> '1234567890...'
"""
if not s:
return s
return s[:n] + '...' if len(s) > n else s
| 8,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.