content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def scan_year(visit, studyid='TON'):
"""
Retrieve the year in which a scan was collected.
Parameters
----------
visit : str or int
Visit number
studyid: str, optional
Specifies the study from which files will be retrieved. Valid
values are 'THD' and 'TON'.
Returns
-------
sc_year : int
Actual scan year
"""
if type(visit) is str:
visit = int(visit[-1:])
if studyid == 'TON':
years = [2012, 2013, 2014]
else:
years = [2008, 2009, 2010, 2011]
sc_year = years[visit-1]
return sc_year | 3128d43b00caff51c1e0329470d36592e2e848a3 | 50,620 |
from typing import List
def shell_line_to_o_files_list(line: str) -> List[str]:
"""Return a list of .o files in the files list."""
return [entry for entry in line.split() if entry.endswith(".o")] | a2e7d9d4c6cd333f32515ed6305e4a2d031f9ae9 | 50,621 |
def append_offset(name, offset):
"""
This function is used for assigning a name with offset if a file with the same name exists
It takes a filename and a offset and returns a valid equivalent name with offset number
Example :
# assume two variables
name = 'python.py'
offset = '2'
append_offset(name, offset)
# The above invocation will return string as
# 'python_2.py'
"""
fname, extension = name.split('.')
fname = ''.join([fname, '_', offset, '.', extension])
return fname | 719ae91df2d7af04090d7bcc5f07f32e24f7f8fc | 50,622 |
def _analyse_operator(operator, param_name='operator'):
"""Checks the properties of an operator and extracts the number of qubits.
Args:
operator: the operator to be analysed.
param_name: the parameter name as displayed in potential error messages.
Returns:
returns: number of qubits.
Raises:
ValueError: if operator does not have a valid shape or if it is not
unitary.
"""
if operator.ndim != 2:
raise ValueError(
'%s must be a 2D array (found: ndim=%d)'
%(param_name, operator.ndim)
)
rows, cols = operator.shape
if rows != cols:
raise ValueError(
'%s must be a square matrix [found: shape=(%d, %d)]'
%(param_name, rows, cols)
)
num_qubits = rows.bit_length()-1
if rows != 2 ** num_qubits:
raise ValueError(
'dimension of %s must be a power of 2 (found: dim=%d)'
%(param_name, rows)
)
return num_qubits | b2725dcef28351918647188f57fd36d072c7ed0d | 50,623 |
def get_data_directory_files(root_directory):
"""Recursively get all the files within the provided root directory.
Get all of the files below the specified directory and return a list of filepath strings in
the expected module manifest format.
Args:
root_directory: Root directory to recursively walk through.
Returns:
A list of properly formatted filepath strings.
"""
data_files = []
# Manifest files only contain .js or .txt files
for extension in ['js', 'txt']:
for file in root_directory.glob(f'**/*.{extension}'):
# Filter manifest or README files from the result set
if not file.stem or file.stem in ['manifest', 'README']:
continue
# Match the expected file format listed within the manifest
data_files.append(str(file.relative_to(root_directory)))
return data_files | 246fa937a88ca4c022d62c7ac2193412ea9f31bb | 50,624 |
import sys
def parse_mixed_args(parser, args=None):
"""This version of parse args allows intermixing if on Python 3.7 or greater."""
can_intermix = bool(10*sys.version_info.major + sys.version_info.minor >= 37)
opts = parser.parse_intermixed_args(args) if can_intermix else parser.parse_args(args)
return opts | 104e31fe836ada3c046e699e5e8d9aae6861ab45 | 50,626 |
from typing import Any
from typing import Tuple
def _get_dict(
obj: Any,
) -> Tuple[bool, dict]:
""" Hack to work around the lack of __dict__ when __slots__ is used
Returns:
(has slots, dictionary of object fields)
"""
if isinstance(obj, dict):
return False, obj
has_slots = hasattr(obj, '__slots__')
if has_slots:
d = {k:getattr(obj, k) for k in obj.__slots__}
else:
d = obj.__dict__
return has_slots, d | f414aa3c5728722bc1995879d8ee2494f8c01b1f | 50,627 |
def comma_separated_str_to_list(s: str) -> list:
"""
Convert a comma separated string to a list.
Args:
s: string to convert
Returns: list value
"""
return s.split(',') | 9513ca572c50f2c01a3743e6b01ec3b9360bd677 | 50,628 |
def get_most_popular_talks_by_like_ratio(videos):
"""Return the pycon video list sorted by most likes relative to
number of views, so 10 likes on 175 views ranks higher than
12 likes on 300 views. Discount the dislikeCount from the likeCount.
Return the filtered list"""
sort_function = lambda x: float((int(x.metrics['likeCount'])-int(x.metrics['dislikeCount']))/int(x.metrics['viewCount']))
return sorted(videos, key = sort_function, reverse=True)
pass | b8a89d9726877cd4a13e6b2d2d1ca5527a219230 | 50,629 |
def check_buzz(number: int) -> str:
"""If a integer is divisible by five function outputs buzz
Args:
number (int): integer to check if divisible by five
Returns:
str: returns buzz if divisible by five, else continues
Examples:
>>> check_buzz(3)
''
>>> check_buzz(5)
'buzz'
"""
return "buzz" if number % 5 == 0 else "" | f9d48422d2880508dd685450e811fbc3b4a3862d | 50,631 |
def pick(y, column):
"""A helper method to pick a specific output from the simulator node output.
Allows one to create separate nodes for specific outputs, e.g. number of clusters.
"""
return y[column] | 2b30beba249c4bdcddc89876076e278e4777ad23 | 50,632 |
def get_filename_without_ending(file_path):
"""
Returns the filename without extension
:param file_path:
:return:
"""
# if filename has file_path parts
if '/' in file_path:
filename = file_path.rsplit('/')[-1]
else:
filename = file_path
return filename.rsplit('.', 1)[0] | 6a1ed6a38ba44e26957d9cbee5e588f78db3093d | 50,633 |
import torch
def compute_negative_entropy(
inputs, attention_mask: torch.Tensor, return_values=False
):
"""Compute the negative entropy across layers of a network for given inputs.
Args:
- input: tuple. Tuple of length num_layers. Each item should be in the form: BHSS
- attention_mask. Tensor with dim: BS
"""
inputs = torch.stack(inputs) # LayersBatchHeadsSeqlenSeqlen
assert inputs.ndim == 5, "Here we expect 5 dimensions in the form LBHSS"
# average over attention heads
pool_heads = inputs.mean(2)
batch_size = pool_heads.shape[1]
print("batch_size=", batch_size)
samples_entropy = list()
neg_entropies = list()
for b in range(batch_size):
# get inputs from non-padded tokens of the current sample
mask = attention_mask[b]
sample = pool_heads[:, b, mask.bool(), :]
sample = sample[:, :, mask.bool()]
print("Sample=", sample)
# get the negative entropy for each non-padded token
neg_entropy = (sample.softmax(-1) * sample.log_softmax(-1)).sum(-1)
if return_values:
neg_entropies.append(neg_entropy.detach())
# get the "average entropy" that traverses the layer
mean_entropy = neg_entropy.mean(-1)
# store the sum across all the layers
samples_entropy.append(mean_entropy.sum(0))
print("samples_entropy=", samples_entropy)
# average over the batch
final_entropy = torch.stack(samples_entropy).mean()
print("final**", final_entropy)
if return_values:
return final_entropy, neg_entropies
else:
return final_entropy | dc6af84f22e9198d7f09aeec1c88403fba6eeac0 | 50,634 |
def is_rule_in_set(rule, rule_list):
"""Check if the given rule is present in the rule_list
:param rule_list: list of existing rules in dictionary format
:param rule: new rule to be added
:return boolean:
"""
for old_rule in rule_list:
if rule['source'] == old_rule['source']\
and rule['destination'] == old_rule['destination']\
and rule['action'] == old_rule['action']\
and rule['priority'] == old_rule['priority']:
return True
return False | a2dbbf49d7e084204683045919da2e6e2515245b | 50,635 |
def parse_word_comment_meta(text):
"""
>>> parse_word_comment_meta('antialkoholista')
('antialkoholista', '', '')
>>> parse_word_comment_meta('absztinens <em>val</em>')
('absztinens', '', '<em>val</em>')
>>> parse_word_comment_meta('bornemissza <em>reg</em>')
('bornemissza', '', '<em>reg</em>')
>>> parse_word_comment_meta('bornemissza (mn)')
('bornemissza', '', '(mn)')
>>> parse_word_comment_meta(' [anyag]: elettelen')
('elettelen', 'anyag', '')
>>> parse_word_comment_meta('')
('', '', '')
>>> parse_word_comment_meta(' ragaszkodik <vkihez>')
('ragaszkodik', 'vkihez', '')
"""
text = text.strip()
if text:
word = meta = comment = ''
if ']:' in text:
comment, text = text.split(']:')
comment = comment.replace('[', '').strip()
elif '<' in text:
text, comment = text.split('<')[:2]
comment = comment.replace('>', '').strip()
elif '</em>:' in text:
meta, text = text.split(':')
text = text.strip()
if text:
word = text.split()[0]
if text and not meta:
meta = text[len(word)+1:].strip()
return word.strip(), comment, meta
else:
return '', '', '' | 63c55da1254ea9876f9c2d337feb45bc605d3dc3 | 50,636 |
def gen_locale(locale): # type: (str) -> str
"""Returns the generated code for a given locale in the list."""
# We assume that all locale codes have only letters, numbers and hyphens.
assert locale.replace('-', '').isalnum(), locale
# clang-format enforces a four-space indent for initializer lists.
return ' PLATFORM_LOCALE({locale})'.format(locale=locale) | 747d8018d99b7e530b0fd4ac7476bc27df980270 | 50,637 |
def numbersonly(line):
"""returns the lines containing only numbers. bad lines reported to stderr.
if any bad line is detected, exits with exitcode 2.
"""
if not line.isnumeric():
raise ValueError('{} is not a number'.format(line))
return line | 96ac06b037bcd542f9ea8a79784fc0e7659bc1dd | 50,638 |
def role_vars(host):
"""Loading standard role variables."""
defaults_files = "file=./defaults/main.yml name=role_defaults"
vars_files = "file=./vars/main.yml name=role_vars"
ansible_vars = host.ansible("include_vars", defaults_files)["ansible_facts"]["role_defaults"]
ansible_vars.update(
host.ansible("include_vars", vars_files)["ansible_facts"]["role_vars"]
)
return ansible_vars | 19a874a46ff75de0513c2539d45597aeab2e8941 | 50,640 |
from typing import List
import math
def check_if_min_height(array: List[int], node) -> bool:
"""
This is my attempt to use math to see what height should be considered minimal.
In my thinking, I can use log base 2. I'll need to experiment with this more.
"""
return (math.ceil(math.log2(len(array)))) == node.find_height(node) | 8396714b1d55c9afac4f2def31172592dab3545e | 50,641 |
import os
def _file_size(fname):
"""Get the file size in bytes."""
with open(fname, 'rb') as f:
f.seek(0, os.SEEK_END)
return f.tell() | ca8306bb5b38db8238a2c7299d3e8b44cfdd8e98 | 50,642 |
import torch
def bin_acc(y_true, y_pred, sigmoid: bool = False):
"""
Returns accuracy per batch (with sigmoid function), i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
# round predictions to the closest integer
if sigmoid:
round_pred = torch.round(torch.sigmoid(y_pred))
else:
round_pred = torch.round(y_pred)
correct = (round_pred == y_true).float()
acc = correct.sum() / len(correct)
return acc | 6aeacc8e24312b96a5e9a2c405780e73e774f7a6 | 50,643 |
def clts_comparison(inv_a, inv_b, name_a, name_b, prefix=None):
"""
Collect info for BIPA inventories.
"""
# Set prefix and separator, if any
if prefix:
prefix = f"{prefix}_"
else:
prefix = ""
column = {}
# get counts
inv_a_cons = len(inv_a.sounds["consonant"])
inv_a_vowl = len(inv_a.sounds["vowel"])
inv_b_cons = len(inv_b.sounds["consonant"])
inv_b_vowl = len(inv_b.sounds["vowel"])
column[f"{prefix}size_{name_a}_all"] = inv_a_cons + inv_a_vowl
column[f"{prefix}size_{name_b}_all"] = inv_b_cons + inv_b_vowl
column[f"{prefix}size_{name_a}_cons"] = inv_a_cons
column[f"{prefix}size_{name_b}_cons"] = inv_b_cons
column[f"{prefix}size_{name_a}_vowl"] = inv_a_vowl
column[f"{prefix}size_{name_b}_vowl"] = inv_b_vowl
# compute similarities
aspect_groups = {"all": None, "consonant": ["consonant"], "vowel": ["vowel"]}
for aspect_label, aspects in aspect_groups.items():
column[f"{prefix}strict-{aspect_label}"] = inv_a.similar(
inv_b, metric="strict", aspects=aspects
)
column[f"{prefix}appr-{name_a}-{name_b}-{aspect_label}"] = inv_a.similar(
inv_b, metric="approximate", aspects=aspects
)
column[f"{prefix}appr-{name_b}-{name_a}-{aspect_label}"] = inv_b.similar(
inv_a, metric="approximate", aspects=aspects
)
# Collect consonants, vowels from both inventories, for overlap
sounds_a = {
aspect: sorted(list(inv_a.sounds[aspect])) for aspect in ["consonant", "vowel"]
}
sounds_b = {
aspect: sorted(list(inv_b.sounds[aspect])) for aspect in ["consonant", "vowel"]
}
common_cons = [
cons for cons in sounds_a["consonant"] if cons in sounds_b["consonant"]
]
common_vowl = [vowl for vowl in sounds_a["vowel"] if vowl in sounds_b["vowel"]]
column[f"{prefix}inv_{name_a}"] = " ".join(
sounds_a["consonant"] + sounds_a["vowel"]
)
column[f"{prefix}inv_{name_b}"] = " ".join(
sounds_b["consonant"] + sounds_b["vowel"]
)
column[f"{prefix}shared_cons"] = " ".join(common_cons)
column[f"{prefix}shared_vowl"] = " ".join(common_vowl)
column[f"{prefix}size_shared_cons"] = len(common_cons)
column[f"{prefix}size_shared_vowl"] = len(common_vowl)
column[f"{prefix}size_shared_all"] = len(common_cons) + len(common_vowl)
column[f"{prefix}exclusive_{name_a}_cons"] = " ".join(
[cons for cons in sounds_a["consonant"] if cons not in common_cons]
)
column[f"{prefix}exclusive_{name_b}_cons"] = " ".join(
[cons for cons in sounds_b["consonant"] if cons not in common_cons]
)
column[f"{prefix}exclusive_{name_a}_vowl"] = " ".join(
[vowl for vowl in sounds_a["vowel"] if vowl not in common_vowl]
)
column[f"{prefix}exclusive_{name_b}_vowl"] = " ".join(
[vowl for vowl in sounds_b["vowel"] if vowl not in common_vowl]
)
return column | 9f9bcd938f216cacf35bc2d5a5f7d2429329fbf9 | 50,644 |
def build_score(_matrix, weight, priority):
"""
Calculates the curtailment score using the normalized matrix
and the weights vector. Returns a sorted vector of weights for each
device that is a candidate for curtailment.
:param _matrix:
:param weight:
:param priority:
:return:
"""
input_keys, input_values = _matrix.keys(), _matrix.values()
scores = []
for input_array in input_values:
criteria_sum = sum(i*w for i, w in zip(input_array, weight))
scores.append(criteria_sum*priority)
return zip(scores, input_keys) | 5bd41630fd1e300cc15d268e30a77580a82cc872 | 50,645 |
def is_int_type_malicious_score(confidence_score, params):
"""
determine if integer type confidence score is malicious in reputation_params
"""
return params['override_confidence_score_malicious_threshold'] and isinstance(confidence_score, int) and int(
params['override_confidence_score_malicious_threshold']) <= confidence_score | acfe565fba6c88d983bff2e29838bc33d9c82124 | 50,646 |
def check_no_number_numeric(alnum_text):
"""
Checks if, for the string, if
:param alnum_text:
:type alnum_text: str
:return: no_number_numeric
:rtype: bool
"""
no_number_numeric = False
if len(alnum_text) == 1 and not alnum_text.isalnum():
no_number_numeric = True
return no_number_numeric | 98517ecc9b3b12e4e8384770189e1b4286852524 | 50,647 |
def getMolParamIDToAtomIndex( oemol, ff):
"""Take an OEMol and a SMIRNOFF forcefield object and return a dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
Parameters
----------
oemol : OEMol
OpenEye OEMol with the molecule to investigate.
ff : ForceField
SMIRNOFF ForceField object (obtained from an ffxml via ForceField(ffxml)) containing FF of interest.
Returns
-------
param_usage : dictionary
Dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
"""
labels = ff.labelMolecules([oemol])
param_usage = {}
for mol_entry in range(len(labels)):
for force in labels[mol_entry].keys():
for (atom_indices, pid, smirks) in labels[mol_entry][force]:
if not pid in param_usage:
param_usage[pid] = (smirks, [atom_indices])
else:
param_usage[pid][1].append( atom_indices )
return param_usage | ed7335b7dc671cbbfa5292bc57889a485fcdb5a5 | 50,648 |
def get_pulsar_consumer_stage(pipeline_builder, topic, initial_offset):
"""Create and return a Pulsar Consumer origin stage depending on execution mode for the pipeline."""
pulsar_consumer = pipeline_builder.add_stage('Pulsar Consumer',
type='origin')
pulsar_consumer.set_attributes(data_format='TEXT',
batch_wait_time_in_ms=20000,
topic=topic,
consumer_name='consumer',
initial_offset=initial_offset)
return pulsar_consumer | 76069d8a4a9d0216c0496e6d8c0c5306b8c8ce42 | 50,649 |
def flatnonzero(a):
"""Return indicies that are not-zero in flattened version of a
Equivalent to a.ravel().nonzero()[0]
>>> from numpy import arange, flatnonzero
>>> arange(-2, 3)
array([-2, -1, 0, 1, 2])
>>> flatnonzero(arange(-2, 3))
array([0, 1, 3, 4])
"""
return a.ravel().nonzero()[0] | 467240bc330f2237b570046163f3603556b9ebd1 | 50,651 |
import random
def get_random_color(num_color: int):
"""
return color as a list
:param num_color:
:return:
"""
number_of_colors = num_color
color = ["#" + ''.join([random.choice('0123456789ABCDEF') for _ in range(6)])
for _ in range(number_of_colors)]
return color | 851f21e3144287ac14d259894e5057d2a475b3f5 | 50,652 |
import configparser
def parse_options(options_file_path, options_section):
"""Parses the given file and returns options from the given section."""
parser = configparser.ConfigParser()
parser.read(options_file_path)
if not parser.has_section(options_section):
return None
options = parser[options_section]
if options_section == 'libfuzzer':
options_string = ' '.join(
'-%s=%s' % (key, value) for key, value in options.items())
else:
# Sanitizer options.
options_string = ':'.join(
'%s=%s' % (key, value) for key, value in options.items())
return options_string | facacdc6ac4a8c17e0d53fb4e2d5c09a8b25cf6e | 50,653 |
import random
def choose_the_word(vocabulary_array):
"""Return the word that was conceived from vocabulary for the game.
Keyword arguments:
vocabulary_array - array of words from which to choose
"""
word_index = random.randint(0, len(vocabulary_array) - 1)
word = vocabulary_array[word_index]
return word | 05da96d215d9786c31eccf24348283c9dc1d0519 | 50,655 |
def rounded_down (value, granularity) :
"""Returns `value` rounded down to nearest multiple of `granularity`.
>>> rounded_down (3, 5)
0
>>> rounded_down (8, 5)
5
>>> rounded_down (5, 5)
5
>>> rounded_down (-3, 5)
-5
>>> rounded_down (-8, 5)
-10
>>> rounded_down (0.97, 0.01)
0.97
>>> rounded_down (0.971, 0.01)
0.97
>>> rounded_down (0.9699999999, 0.01)
0.96
>>> rounded_down (0.9799999999, 0.01)
0.97
>>> rounded_down (0.97, 0.05)
0.95
>>> rounded_down (-0.97, 0.01)
-0.97
>>> rounded_down (-0.971, 0.01)
-0.98
>>> rounded_down (-0.9699999999, 0.01)
-0.97
>>> rounded_down (-0.9799999999, 0.01)
-0.98
>>> rounded_down (-0.97, 0.05)
-1.0
"""
if 0 < granularity < 1 :
scale = 1.0 / granularity
result = rounded_down (value * scale, 1.0) / scale
else :
result = value - (value % granularity)
return result | e324bd512d58ff610444e138f958b47179053bff | 50,657 |
import os
def file_list(index_start, index_end):
"""Construct a list of file path on S3."""
base_path = os.environ['S3ADDRESS']
data_path = [base_path + '{0:012}'.format(i) + '.json' for i in range(index_start, index_end)]
return data_path | f598b4d5226d571c12011d4090f511c9c4d06a5f | 50,658 |
def handle_missing_threshold(df, prop_required_column = .3, prop_required_row = .9):
"""
This functions removes columns and rows whose
count of missing values exceeds threshold.
"""
threshold = int(round(prop_required_column*len(df.index),0))
df.dropna(axis=1, thresh=threshold, inplace=True)
threshold = int(round(prop_required_row*len(df.columns),0))
df.dropna(axis=0, thresh=threshold, inplace=True)
return df | e70f9368a7e0e939618c61fbdba824a1b7c21a1d | 50,660 |
def eliminateStopwordsDoc(document, stopList):
"""
Eliminate stopwords in a single document
"""
A = []
for word in document.split():
if word not in stopList:
A.append(word)
return ' '.join(A) | e9fb527a89f5723d6b7ea7731630109e9b896ebb | 50,661 |
import random
def isPrime(number, certainty = 12):
"""
:type number: int
:type certainty: int
:return: boolean
"""
assert isinstance(number, int) and (number >= 0), \
"'number' must been an int and positive"
if ( number < 2 ):
return False
if(number != 2 and (number & 1) == 0):
return False
s = number-1
while((s & 1) == 0):
s >>= 1
for _ in range( certainty) :
r = random.randrange(number-1) + 1
tmp = s
mod = pow(r, tmp, number)
while( tmp != number-1 and mod != 1 and mod != number - 1) :
mod = (mod*mod) % number
tmp <<= 1
if ( mod != number-1 and ( tmp & 1) == 0):
return False
return True | 0e65f7e1e5693666b4f00e4aed5aeea8c6ad3948 | 50,663 |
def bounds_contains(bounds, x):
"""
Returns True if `x` is contained in the bounds, and False otherwise.
Parameters
----------
bounds : numpy.ndarray
Array of shape (d, 2).
Bounds of each dimension [ [x0, y0], [x1, y1], ..., [xd, yd] ],
representing the following cartesian product in R^d:
[x0, y0] X [x1, y1] X ... X [xd, yd].
x : numpy.ndarray
Array of shape (d,)
"""
dim = x.shape[0]
for dd in range(dim):
if x[dd] < bounds[dd, 0] or x[dd] > bounds[dd, 1]:
return False
return True | 098cf7c12ea75bd7222e20149f4fd0d4580054ce | 50,664 |
def get_api_key():
"""Gets key for TT's API, expected to be present in data/secrets/tt_api_key.txt"""
with open("data/secrets/tt_api_key.txt", "r") as f:
key = f.read()
return key | 01084966ef38eb3db401aa7d99556c62a87352b5 | 50,665 |
def is_user_op(node):
"""Return true when the node is the intermediate variables of graph."""
return node.WhichOneof("op_type") == "user_conf" | 4fee7a91eb89f3916cbe742f492a265777a74451 | 50,666 |
def concat_track(logical_sectors):
"""returns a single bytes object containing all data from logical_sectors dict, in order"""
data = []
for i in range(16):
if i in logical_sectors:
data.append(logical_sectors[i].decoded)
else:
data.append(bytearray(256))
return b''.join(data) | bf676b61fdf203da5851df83eb5742fb37f468a0 | 50,667 |
def subreddit_search_key(sr):
"""Search key for subreddit."""
return sr['name']
# return '{} {}'.format(sr['name'], sr['title']) | d0d3300cf53d82111c9988930c9bf97ce27359f8 | 50,669 |
import itertools
def algo3GenerateNewActiveSets(newly_dropped, prev_processed):
"""This function does Algorithm 3 in the paper.
Args:
newly_dropped: A newly dropped set of size k. Stored as type frozenset.
This is s in the paper.
prev_processed: the set of previously processed sets. Stored as set of
frozensets. This is delta in the paper.
Returns:
Z, the new active sets. Stored as set of frozensets.
"""
new_active_sets = set() # this is Z in the paper
size_newly_dropped = len(list(newly_dropped)) # this is k in the paper
set_newly_dropped = set(newly_dropped)
# Step 3: delta_k is a set of frozensets.
# it contains all subsets of delta that are size k, and s.
subsets_size_k = set()
for fs_prevprocessed in prev_processed:
if len(fs_prevprocessed) == size_newly_dropped:
subsets_size_k.add(fs_prevprocessed)
subsets_size_k.add(newly_dropped)
delta_k = subsets_size_k
# Step 4: rho is all the covariates contained in sets in delta_k
rho = set()
for tup in delta_k:
for i in tup:
rho.add(i)
# Step 5: s_e is the support of covariate e in delta_k, for covars in rho
# possible time efficiency exploration: combine this with above loop,
# calculating rho in one step
s_e = dict()
for covar_e in rho:
s_e[covar_e] = 0
for tup in delta_k:
if covar_e in tup:
s_e[covar_e] += 1
# Step 6: omega is all the covariates not in s that have enough support
# .items() iterates through key,val pairs
# so dict_covars_eno is the dictionary of all covariates that have enough support
dict_covars_eno = dict((key, val) for key, val in s_e.items() if val >= size_newly_dropped)
omega = set(dict_covars_eno.keys())
omega = omega.difference(set_newly_dropped)
# Step 7: do all covariates in S have enough support in delta_k?
for covar_e, support_e in s_e.items():
if covar_e in newly_dropped and support_e < size_newly_dropped:
return new_active_sets
# Step 8
for alpha in omega:
# Step 9
r = set_newly_dropped.union(set([alpha]))
# Step 10: Get all subsets of size_newly_dropped in r, check if belong in delta_k
# Have to convert these to sets and frozen sets before doing the search
delta_k = set(frozenset(i) for i in delta_k)
subsets_size_k = set(list(itertools.combinations(r, size_newly_dropped)))
subsets_size_k = set(frozenset(i) for i in subsets_size_k)
allin = True
for subset in subsets_size_k:
if subset not in delta_k:
allin = False
break
if allin:
# Step 11: Add r to Z
new_active_sets.add(frozenset(r))
return new_active_sets | 353cabe9540e6983a5c899403ebf42b95ef8d8c0 | 50,670 |
from typing import Sequence
def pool_modulo(pool: Sequence[int], divisor: int) -> tuple[int, ...]:
"""Perform a modulo operation of each member."""
return tuple(n % divisor for n in pool) | 2694c2f7d5fac8b419928ea9b529fa481c87be7c | 50,671 |
def getIndexOfMinVal(lst):
""" Find index of smallest value in a list """
#initialize current min value and index to first element
minIndex = 0 # index of current minimal value
val = lst[0] # current minimal value
# loop through all elements
for i in range(1, len(lst)):
# if current value is smaller than current minimum -> update values
if lst[i] < val:
minIndex = i
val = lst[i]
return minIndex | fca37e2a8fdb1a04160098f80544366432d2c61f | 50,672 |
import pandas as pd
import os
import urllib.request
def get_crm_data(cache_path='.', preprocess=True):
"""
Load the example cloud-resolving model data, download if not present.
:param str cache_path: Path to load/store the data
:param bool preprocess: Whether or not to clean and concatenate the data
:return:
"""
N1_200_cache = os.path.join(cache_path, 'NARVAL1_1hr_200cdnc.csv')
if not os.path.isfile(N1_200_cache):
urllib.request.urlretrieve("https://zenodo.org/record/4323300/files/NARVAL1_1hr_200cdnc.csv?download=1", N1_200_cache)
N1_20_cache = os.path.join(cache_path, 'NARVAL1_1hr_20cdnc.csv')
if not os.path.isfile(N1_20_cache):
urllib.request.urlretrieve("https://zenodo.org/record/4323300/files/NARVAL1_1hr_20cdnc.csv?download=1", N1_20_cache)
N1_20_shal_cache = os.path.join(cache_path, 'NARVAL1_1hr_20cdnc_shal.csv')
if not os.path.isfile(N1_20_shal_cache):
urllib.request.urlretrieve("https://zenodo.org/record/4323300/files/NARVAL1_1hr_20cdnc_shal.csv?download=1", N1_20_shal_cache)
N1_200_shal_cache = os.path.join(cache_path, 'NARVAL1_1hr_200cdnc_shal.csv')
if not os.path.isfile(N1_200_shal_cache):
urllib.request.urlretrieve("https://zenodo.org/record/4323300/files/NARVAL1_1hr_200cdnc_shal.csv?download=1", N1_200_shal_cache)
if preprocess:
df20 = pd.read_csv(N1_20_shal_cache).set_index('time').drop(columns='plev')
df200 = pd.read_csv(N1_200_shal_cache).set_index('time').drop(columns='plev')
new_df = pd.concat([df20, df200]).reset_index().drop(columns='time')
return new_df
else:
df20 = pd.read_csv('NARVAL1_1hr_20cdnc_shal.csv')
df200 = pd.read_csv('NARVAL1_1hr_200cdnc_shal.csv')
return df20, df200 | b0526f4ebeaf6ddc4ca78ef9ff5dd22044dd9a91 | 50,673 |
def all_inputs_of_later_op(block, begin_idx):
"""
find all inputs of ops after an idx, used to determine the logical output of a cuda graph section
:param block: framework.Block, the original block
:param begin_idx: int, from which idx (not include) to find the later ins
:return: a list of inputs names for all ops behind begin_idx
"""
ins = []
for idx, op in enumerate(block.ops):
if idx <= begin_idx:
continue
for in_name in op.input_arg_names:
ins.append(in_name)
return list(set(ins)) | 8172bb31be025a6496e8e364f5be2a630ad280d9 | 50,674 |
import numpy
def lorentz(theta_bragg_deg,return_what=0):
"""
This function returns the Lorentz factor, polarization factor (unpolarized beam), geometric factor,
or a combination of them.
:param theta_bragg_deg: Bragg angle in degrees
:param return_what: A flag indicating the returned variable:
0: (default) PolFac*lorentzFac
1: PolFac
2: lorentzFac
3: geomFac
:return: a scalar value
"""
tr = theta_bragg_deg * numpy.pi / 180.
polarization_factor = 0.5 * (1.0 + (numpy.cos(2.0 * tr))**2)
lorentz_factor = 1.0 / numpy.sin(2.0 * tr)
geometrical_factor = 1.0 * numpy.cos(tr) / numpy.sin(2.0 * tr)
if return_what == 0:
return polarization_factor*lorentz_factor
elif return_what == 1:
return polarization_factor
elif return_what == 2:
return lorentz_factor
elif return_what == 3:
return geometrical_factor
elif return_what == 4:
return polarization_factor*lorentz_factor*geometrical_factor | 99792f138aa3934486d617db91a36900bc1eab98 | 50,675 |
def safeint(value):
"""safely converts value to integer or none"""
try:
return int(float(value))
except ValueError:
return None | 8fe8573eb2d0cac83b0851af5d3d171e0855d06b | 50,676 |
import torch
def weighted_mean_loss(x, weights, eps=1e-6):
"""
Args:
x (B, ...)
weights (B, ...)
Returns:
a scalar
"""
assert x.ndimension() == weights.ndimension() and x.shape[0] == weights.shape[0]
# normalize to sum=1
B = weights.shape[0]
weights_sum = torch.sum(weights.view(B, -1), dim=-1).view(B, 1, 1, 1)
weights_sum = torch.clamp(weights_sum, min=eps)
weights_n = weights / weights_sum
return torch.sum((weights_n * x).reshape(B, -1), dim=1) | a3bbc7ef1450ad6055ce2c118c96678329add473 | 50,677 |
def choose_assassin():
"""Assassin"""
return "Assassin" | 9998c75eca02717d108a0db7eb38021bba98a9a2 | 50,679 |
import os
def find_file(filename, include_paths):
"""find a file in a list of include paths.
include_paths MUST CONTAIN "" in order to search the
local directory.
"""
if include_paths is None:
return None
for path in include_paths:
p= os.path.join(path, filename)
if os.path.exists(p):
if os.access(p, os.R_OK):
return p
print("warning: file \"%s\" found but it is not readable" % \
p)
return None | 0f33cb3425da5b9ab64002d126dec612ccdae8fd | 50,680 |
def build_controllers(controller_definitions):
"""
loop through the list of controller definitions creating them.
"""
c_list = []
for controller_definition in controller_definitions:
c_list.append(controller_definition())
return c_list | f8ba581f24786751a08a9de85bfac08e848c577d | 50,681 |
def split_arbitrary_thickness_section(key, value):
"""
>>> key = 'T(11)'
>>> value = '[1.2,PT=(123,204)]'
>>> index, out = split_arbitrary_thickness_section(key, value)
>>> index
11
>>> out
[1.2, [123, 204]]
"""
assert key.endswith(')'), 'key=%r' % key
# T(3), CORE(3)
key_id = key[:-1].split('(', 1)[1]
key_id = int(key_id)
if isinstance(value, (int, float)):
return key_id, value
value = value.replace(' ', '')
if 'PT' in value:
bracketed_values = value.strip('[]')
sline = bracketed_values.split(',', 1)
thicknessi = float(sline[0])
pt_value = sline[1].split('=')
assert pt_value[0] == 'PT', pt_value
points = pt_value[1].strip('()').split(',')
assert len(points) == 2, pt_value
int_points = [int(pointi) for pointi in points]
out = [thicknessi, int_points]
else:
out = float(value)
return key_id, out | f491aa962ae5328074ae56785d6cf1f20a096359 | 50,682 |
def get_extension(file_path: str):
"""
Returns a file's extension if any, else None
:param str file_path:
:return:
"""
split = file_path.rsplit('.', 1)
return split[1] if len(split) > 1 else None | 41c91a0628345da0421c32aec77bbc02abb62607 | 50,683 |
def get_task_name(options):
"""
Given a dictionary of command options, return the name of the task
:param options: Options passed to the handle method of the management command
:return: The task name (str)
"""
options_dict = dict(options)
return [task for task in [key for key in options_dict] if str(options_dict[task]) == 'True'][0] | e58dfc7a4f7c4c88f5850b8f341b031cfc279e0c | 50,686 |
def sgn(data):
"""
sign function
:param data:
:return: sign
"""
if data >= 0 :
return 1
else :
return 0 | ffd34df63abde4b3d40fb5a13f91c386e8e47413 | 50,687 |
def _getRightmost(categories):
"""
Get rightmost toplevel category.
categories -- list of Category, all category from database.
"""
rightmost = None
for cat in categories:
if not rightmost or cat.getRight() > rightmost.getRight():
rightmost = cat
return rightmost | e3d7f835b4d85ecfdf8cc6e519b76661f3dac90c | 50,688 |
def reraise(error):
"""Return a function that raises the given error when evaluated"""
def local_function(*args, **kwargs):
raise error
return local_function | 22e2f206bcdbc6aae792eff1518507fdcf036cc1 | 50,689 |
import inspect
import os
def get_script_filepath():
"""
Returns the filesystem path of the Python script running the Client.
This function iterates back through the call stack until it finds a non-Verta stack frame and
returns its filepath.
Returns
-------
str
Raises
------
OSError
If the calling script cannot be identified.
"""
for frame_info in inspect.stack():
module = inspect.getmodule(frame_info[0])
if module is None or module.__name__.split('.', 1)[0] != "verta":
filepath = frame_info[1]
if os.path.exists(filepath): # e.g. Jupyter fakes the filename for cells
return filepath
else:
break # continuing might end up returning a built-in
raise OSError("unable to find script file") | b0649911bcc63563836313dacbcf600c7e1c08af | 50,690 |
def findin_slovo(list):
"""
Uzivatel zada zvire, funkce hleda je stli zadane zvire je vseznamu
ci nikoliv
"""
while True:
slovo = input("Napis zvire, ktere si myslíš ze je ve seznamu:\n")
if not 2 < len(slovo) < 15:
print("Napsal jsi nejakou blbost zkus to znova")
else:
break
for zvire in list:
if slovo.lower() == zvire:
return (True, slovo.lower())
return (False, slovo.lower()) | 2edbf06269a9aee0855c5fd6640adbb73137ca65 | 50,691 |
def lookup_dict(entities_file: str):
"""
:param entities_file:
:return:
"""
file_entities = open(entities_file, 'r', encoding='utf-8')
entities = file_entities.readlines()
file_entities.close()
output_pmids = open('pmids.txt', 'w', encoding='utf-8')
abstracts = []
dict_entities = {}
for entity_elements in entities:
abstract = entity_elements.split('\t')[0]
entity_id = entity_elements.split('\t')[1]
entity_type = entity_elements.split('\t')[2]
entity_name = entity_elements.split('\t')[-1][:-1]
abstracts.append(abstract)
unique_id = abstract + entity_id
dict_entities[unique_id] = [entity_type, entity_name]
for abstract in set(abstracts):
output_pmids.write(abstract + '\n')
output_pmids.close()
return dict_entities, set(abstracts) | f519967fbabb27826cf50c9db2fb8dba78f3c86a | 50,693 |
import time
from datetime import datetime
def control_1_4_rotated_keys(credreport):
"""Summary
Args:
credreport (TYPE): Description
Returns:
TYPE: Description
"""
result = True
failReason = ""
offenders = []
offenders_links = []
control = "1.4"
description = "Ensure access keys are rotated every 90 days or less"
scored = True
# Get current time
now = time.strftime('%Y-%m-%dT%H:%M:%S+00:00', time.gmtime(time.time()))
frm = "%Y-%m-%dT%H:%M:%S+00:00"
# Look for unused credentails
for i in range(len(credreport)):
if credreport[i]['access_key_1_active'] == "true":
try:
delta = datetime.strptime(now, frm) - datetime.strptime(credreport[i]['access_key_1_last_rotated'], frm)
# Verify keys have rotated in the last 90 days
if delta.days > 90:
result = False
failReason = "Key rotation >90 days or not used since rotation"
offenders.append(str(credreport[i]['arn']) + ":unrotated key1")
offenders_links.append('https://console.aws.amazon.com/iam/home#/users/{user}?section=security_credentials'.format(user=credreport[i]['user']))
except:
pass
try:
last_used_datetime = datetime.strptime(credreport[i]['access_key_1_last_used_date'], frm)
last_rotated_datetime = datetime.strptime(credreport[i]['access_key_1_last_rotated'], frm)
# Verify keys have been used since rotation.
if last_used_datetime < last_rotated_datetime:
result = False
failReason = "Key rotation >90 days or not used since rotation"
offenders.append(str(credreport[i]['arn']) + ":unused key1")
offenders_links.append('https://console.aws.amazon.com/iam/home#/users/{user}?section=security_credentials'.format(user=credreport[i]['user']))
except:
pass
if credreport[i]['access_key_2_active'] == "true":
try:
delta = datetime.strptime(now, frm) - datetime.strptime(credreport[i]['access_key_2_last_rotated'], frm)
# Verify keys have rotated in the last 90 days
if delta.days > 90:
result = False
failReason = "Key rotation >90 days or not used since rotation"
offenders.append(str(credreport[i]['arn']) + ":unrotated key2")
offenders_links.append('https://console.aws.amazon.com/iam/home#/users/{user}?section=security_credentials'.format(user=credreport[i]['user']))
except:
pass
try:
last_used_datetime = datetime.strptime(credreport[i]['access_key_2_last_used_date'], frm)
last_rotated_datetime = datetime.strptime(credreport[i]['access_key_2_last_rotated'], frm)
# Verify keys have been used since rotation.
if last_used_datetime < last_rotated_datetime:
result = False
failReason = "Key rotation >90 days or not used since rotation"
offenders.append(str(credreport[i]['arn']) + ":unused key2")
offenders_links.append('https://console.aws.amazon.com/iam/home#/users/{user}?section=security_credentials'.format(user=credreport[i]['user']))
except:
pass
return {'Result': result, 'failReason': failReason, 'Offenders': offenders, 'OffendersLinks': offenders_links, 'ScoredControl': scored, 'Description': description, 'ControlId': control} | 074ef53f086e25ecb4e247b824650b55117c1508 | 50,694 |
import os
def get_cwd():
"""Dummy function."""
print("\nHello World!")
print("Working directory:", os.getcwd(), "\n")
return 42 | 3de9fb1d6437ace04ef5a6ebb02606698e9e4437 | 50,695 |
import logging
def _num_groups(num_tokens: int,
max_group_size: int,
num_experts: int,
num_expert_replicas: int,
strict_group_size: bool = False) -> int:
"""Returns the number of token routing groups.
Note: For pjit-based training, all quantities are global.
We select the smallest num_groups such that:
- num_groups >= num_tokens / max_group_size (ensuring the group size is no
larger than max_group_size),
- num_tokens % num_groups = 0 (ensuring that the group size evenly divides
into the num_tokens),
- num_groups % (num_expert_replicas * num_experts) = 0 (ensuring that number
of groups can be split across the total number of experts).
Args:
num_tokens: Number of tokens from input batch.
max_group_size: Maximum size of each token routing group. Actual group size
may end up being smaller.
num_experts: Total number of unique experts.
num_expert_replicas: Number of copies of each expert.
strict_group_size: If True, fail if unable to set the token group size equal
to max_group_size.
Returns:
Number of token routing groups.
Raises:
ValueError if we cannot find a group_size satisfying the above requirements.
"""
# For pjit-based partitioning, we manipulated arrays globally. The number of
# experts must evenly divide the number of (global) groups.
min_num_groups = num_tokens // max_group_size
min_num_groups = max(min_num_groups, num_expert_replicas * num_experts)
def viable(n):
"""Returns true iff n is a viable number of groups."""
return num_tokens % n == 0 and n % (num_expert_replicas * num_experts) == 0
# Increase the number of groups (and decrease the group size) until we have
# a viable number of groups.
num_groups = min_num_groups
while num_groups < num_tokens and not viable(num_groups):
num_groups += 1
if num_tokens % num_groups > 0:
raise ValueError(
'Group size and the number of experts must divide evenly into the '
f'global number of tokens, but num_tokens={num_tokens}, while '
f'num_groups={num_groups} for max_group_size={max_group_size} '
f'and num_experts={num_experts}, each with {num_expert_replicas} '
'replicas')
group_size = num_tokens // num_groups
logging.info(
'Selected group_size=%d and num_groups=%d for input num_tokens=%d, '
'max_group_size=%d, num_experts=%d and num_expert_replicas=%d',
group_size, num_groups, num_tokens, max_group_size, num_experts,
num_expert_replicas)
if strict_group_size and group_size != max_group_size:
raise ValueError(
f'Selected group_size={group_size} is less than the '
f'max_group_size={max_group_size}. Exiting because strict mode is '
'active (strict_group_size=True)')
return num_groups | e3075322c50a635fa60a6b6c8b7d0aba69b04e95 | 50,696 |
def str_to_bool(s):
""" String to bool used to read config file
Parameters
----------
s : str
String to convert
Returns
-------
s : bool
Boolean value of input string
"""
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError | 1ccfd2b39ef298fb7b02925fe667cc6d38398614 | 50,698 |
import subprocess
def run_cmd_shell(cmd):
"""
run cmd on frontend machine with the shell
"""
return subprocess.check_output(cmd,shell=True) | 9ca9b12c942002cb7d5bfd7b9806dd3d621e4fd2 | 50,700 |
def extract_python_code(filepath):
"""Removes the license part from the scripts"""
python_str = ""
with open(filepath, "r") as python_file:
read_python_file = python_file.readlines()
for i in range(21, len(read_python_file)):
python_str += read_python_file[i]
return python_str | dcfb53ed71267948f8d22ecd542234923f2f08d3 | 50,701 |
def IR(spot:list, m=1):
"""
IR(): A function to calculate Single Effective Interest Rate from an array of spot rates.
:param spot: An array/List of Spot rates
:type spot: list
:param m: Frequency of Interest Calculation (eg: 2 for Semi-annually), defaults to 1.
:type m: float
:return: float, None for -ve values of m.
:rtype: float
"""
if(m<=0 or len(spot)==0):
return None
return spot[-1] | c6c0c8cd221ab5fa52949693f44451576208bff1 | 50,703 |
def Gamma_phi_fn(site,p):
"""
Calculates the value of Gamma and phi given the input site and the input parameters.
Parameters
----------
p : Namespace class containing the parameters of the system
Notes
-----
site.pos[0] is the x value of the position. !!! Could also implement as heavyside step function, but this works as it is.
"""
if site.pos[0] <= p.left[-1]: #
Gamma = p.GammaL; phi = p.phiL
elif p.middle[0] <= site.pos[0] <= p.middle[-1]:
Gamma = 0; phi = 0
elif p.right[0] <= site.pos[0] <= p.right[-1]:
Gamma = p.GammaR; phi = p.phiR
else:
raise ValueError("In Gamma_phi_fn: site.pos[0] was in neither parts of the system. Cannot assign Gamma- and phi-values.")
return [Gamma,phi] | 514dc0159d63fcd8f2c476605d06355bb3c66172 | 50,704 |
def check_bouncy(n: int) -> bool:
"""
Returns True if number is bouncy, False otherwise
>>> check_bouncy(6789)
False
>>> check_bouncy(-12345)
False
>>> check_bouncy(0)
False
>>> check_bouncy(6.74)
Traceback (most recent call last):
...
ValueError: check_bouncy() accepts only integer arguments
>>> check_bouncy(132475)
True
>>> check_bouncy(34)
False
>>> check_bouncy(341)
True
>>> check_bouncy(47)
False
>>> check_bouncy(-12.54)
Traceback (most recent call last):
...
ValueError: check_bouncy() accepts only integer arguments
>>> check_bouncy(-6548)
True
"""
if not isinstance(n, int):
raise ValueError("check_bouncy() accepts only integer arguments")
str_n = str(n)
sorted_str_n = "".join(sorted(str_n))
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n | e0c85e0c9c0dd750d87bd731de7eac29786b8f20 | 50,706 |
def bestOf(predictorList):
"""Return the most succesful predictor(s) of the list. The
return value is a list of one or more predictors. The latter
is the case if there are several equally good best predictors
in the list.
"""
assert predictorList != [], "Predictor list is empty!"
bestList = []
bestRate = -1.0
for p in predictorList:
if p.successRate > bestRate:
bestList = [p]
bestRate = p.successRate
elif p.successRate == bestRate:
bestList.append(p)
return bestList | 52a865d232462a3dfce565925a210220a8ae8c1e | 50,707 |
def unique(value):
"""
"""
return list(set((value))) | 4fa971240b57117c6bc8bd561656733daa0ede19 | 50,709 |
def infile(arg):
"""Decorate an argument as an input.
:parameter arg: Argument to designate as an input file.
"""
return ('in', arg) | 6bbbfeb0f112d9ec6ede1d935a8b8ea0aac5b51c | 50,711 |
def search_area_landscape(mesh, domain, landscape, factor=3):
"""Defines the search area in a mesh for given sources using a given factor that determines
the search range."""
cells = mesh.cell_centers
cell_width_Z = min(mesh.h[2]) # minimum cell width in z-direction
Left_X = domain[0][0] # Left X
Right_X = domain[0][1] # Right X
Left_Y = domain[1][0] # Left Y
Right_Y = domain[1][1] # Right Y
Upper_Z = max(landscape[:, 2]) # Upper Z
Lower_Z = domain[2][0] # Lower Z
search_area = cells[(cells[:, 0] > (Left_X)) & (
cells[:, 0] < (Right_X))
& (cells[:, 1] > (Left_Y)) & (
cells[:, 1] < (Right_Y))
& (cells[:, 2] > (Lower_Z)) & (
cells[:, 2] < (Upper_Z + factor * cell_width_Z))]
return search_area | 639d1bbb2c89d4b5241ff8880ce965ee3801cb5c | 50,712 |
def _get_provenance_record(attributes, ancestor_files):
"""Create the provenance record dictionary.
Inputs:
attributes = dictionary of ensembles/models used, the region bounds
and years of data used.
ancestor_files = list of data files used by the diagnostic.
Outputs:
record = dictionary of provenance records.
"""
caption = "Timeseries of ESA CCI LST difference to mean of "\
+ "model ensembles calculated over region bounded by latitude "\
+ "{lat_south} to {lat_north}, longitude {lon_west} to {lon_east} "\
+ "and for model/ensembles {ensembles}. "\
+ "Shown for years {start_year} to {end_year}.".format(**attributes)
record = {
'caption': caption,
'statistics': ['mean', 'stddev'],
'domains': ['reg'],
'plot_types': ['times'],
'authors': ['king_robert'],
# 'references': [],
'ancestors': ancestor_files
}
return record | b8edab22347b5f1d468ef311817269c01d03ca27 | 50,713 |
import inspect
def is_rpc_method(object):
""" Returns true if the given object is a method marked with @rpc """
if not inspect.ismethod(object):
return False
return hasattr(object, 'rpc') | 2aefabf684cdde1f9b73e5ec3879d21d8f8ad7d0 | 50,714 |
def get_explicit_dynamic_libraries(ctx, cc_deps):
"""The explicit shared C library dependencies for this target.
These libraries must be linked explicitly with "-l".
Args:
ctx: The current rule context.
cc_deps: A struct of all C++ dependencies for this target,
as stored in HaskellInfo.transitive.cc.
Returns:
A list of Files.
"""
return [
lib.dynamic_library
for lib in cc_deps.libs.to_list()
# "Forwarding" C libraries without any srcs of their own
# don't specify a dynamic_library. We can just ignore them
# since they don't add any new symbols.
if lib.dynamic_library
] | 3e3a94cac9dbc68b6a64fb5729dfdd36f454a584 | 50,717 |
def extrep_frac(lst1, lst2):
"""Returns the fraction of items in lst1 that are in lst2"""
if len(lst1) == 0:
return 0
num_rep = len([x for x in lst1 if x in lst2])
return num_rep / len(lst1) | 02ee94eff05d60bab7035480bb94ea60212c1e1f | 50,718 |
from bs4 import BeautifulSoup
def print_children(soup: BeautifulSoup) -> None:
"""
Print information about all children and descendents.
Args:
soup (BeautifulSoup): dictionary entry
"""
print('\n')
print('Number of children and descendants of main soup object:\n')
print('No. children: ', len(list(soup.children)))
print('\nThe children are printed below:')
print('\n', list(soup.children))
print('\nNo. descendants:', len(list(soup.descendants)))
print('\nThe descendants are printed below:')
print('\n', list(soup.descendants))
print('\n')
return None | 85d0a9fe052d23f634c85a8e5537a41c87b9495f | 50,720 |
def calculate_initial_position_of_sliding_window(num_seeds):
"""
Calculate the initial position where the sliding position
will start its calculations.
"""
# Calculate the initial position of the sliding window:
# We know that the left part of the first interval in the sliding window is:
# i - num_seeds / 2 = 1
# So, if we want to know i (the initial position) we have to calculate:
# i = 1 + num_seeds / 2
initial_position = 1 + int( float(num_seeds) / float(2) )
#print('Initial position: {}'.format(initial_position))
return initial_position | d877514fb4e17506adaa8a62e5e9f2898450690e | 50,722 |
import string
def tensor_for_label(label):
"""Fake embedding based on occurrence of 26 ASCII letters in the label."""
return tuple(0.1 if c in label else -0.1 for c in string.ascii_lowercase) | 4a723d6af26183c40cea48b9cd33eb6d0d7847fd | 50,723 |
from pathlib import Path
def development_parse_input(
path_project, name_method,
path_database_literatur, path_database_inorganics, path_database_predictions,
path_database_metabolites, path_database_pathways,
organisms, pathways, drop_sumpeaks, drop_sumpeaks_no
):
"""
Parse input.
Parse user input and generate dictionary for easy access.
Parameters
----------
path_project : str
Raw string to results folder.
path_database_literatur : str
Raw string to literature data.
path_database_inorganics : str
Raw string to inorganics data.
path_database_predictions : str
Raw string to predictions folder.
path_database_metabolites : str
Raw string to metabolite information file.
path_database_pathways : str
Raw string to pathway folder.
organisms : list
List of KEGG organism identifier.
drop_sumpeaks : bool, default False
Drop convoluted metabolite mass transitions.
drop_sumpeaks_no : int, default 3
If drop_sumpeaks == True, drop convoluted mass transitions greater equal than int
Returns
-------
inp : dict
Dictionary with user input.
"""
inp = {}
# Set paths
inp['path_project'] = Path(path_project)
inp['name_method'] = name_method
inp['path_literature'] = Path(path_database_literatur)
inp['path_inorganics'] = Path(path_database_inorganics)
inp['path_predictions'] = Path(path_database_predictions)
inp['path_metabolites'] = Path(path_database_metabolites)
inp['path_pathways'] = Path(path_database_pathways)
# Set parameter
inp['organisms'] = organisms
inp['pathways'] = pathways
inp['drop_sumpeaks'] = drop_sumpeaks
inp['drop_sumpeaks_no'] = drop_sumpeaks_no
# Set plotting parameter
inp['figsize'] = (6,5)
inp['labelsize'] = 14
return inp | 194b35e412c9a929095015c062f3b9c766c22e93 | 50,724 |
def __get_os_creds_dict(var_config_values, os_creds_dict):
"""
Returns the associated OS credentials as a dict
:param var_config_values: the configuration dictionary
:param os_creds_dict: dict of creds where the key is the username
:return: the value dict
"""
if 'creds_name' in var_config_values:
os_creds = os_creds_dict.get[var_config_values['creds_name']]
else:
os_creds = os_creds_dict.get('admin-creds')
if os_creds:
return os_creds.to_dict() | 63ac931ac6b5eca3f19d9cdb0b96f17244915b60 | 50,726 |
def dataDictUnfold(inputdict):
"""returns a dictionary with the 1st layer of keys removed
note-this is only suited to dictionairies with 2 layers of singleton keys
common when importing using the dataParse functions defined above
"""
x={}
k=inputdict.keys()
for i in k:
x[list(inputdict[i].keys())[0]]=inputdict[i][list(inputdict[i].keys())[0]]
return x | 3d425ab10d610da609b497e1b7890c1fe74d2eaf | 50,727 |
def OmahaCertificateTag(env, target, source):
"""Adds a superfluous certificate with a magic signature to an EXE or MSI.
The file must be signed with Authenticode in order for Certificate Tagging to
succeed.
Args:
env: The environment.
target: Name of the certificate-tagged file.
source: Name of the file to be certificate-tagged.
Returns:
Output node list from env.Command().
"""
certificate_tag = ('"' + env['ENV']['GOROOT'] + '/bin/go.exe' + '"' +
' run ' +
'$MAIN_DIR/../common/certificate_tag/certificate_tag.go')
magic_bytes = 'Gact2.0Omaha'
padded_length = len(magic_bytes) + 2 + 8192
certificate_tag_cmd = env.Command(
target=target,
source=source,
action=certificate_tag + ' -set-superfluous-cert-tag=' + magic_bytes +
' -padded-length=' + str(padded_length) + ' -out $TARGET $SOURCE',
)
return certificate_tag_cmd | bceee2c90d593b84cd7b95413110f23c36fc7246 | 50,728 |
def abbrv(num):
"""
Shortens the amount so it will have a letter at the end to indicate the place value of the number (e.g. 1,500 -> 1.5K)
This goes upto trillion.
"""
abbrv = {"T": 1_000_000_000_000, "B": 1_000_000_000, "M": 1_000_000, "K": 1000}
for abbrv_value in abbrv.values():
if num / abbrv_value >= 1:
shorten_num = str(round((num / abbrv_value), 2)).strip(".0")
for key, value in abbrv.items():
if value == abbrv_value:
return shorten_num + key | 19955aa5eab46af33c0422ed32c42f572b13cd12 | 50,730 |
def set_params(object,kw,warn=1):
"""Given an object and a dictionary of keyword arguments,
set only those object properties that are already instance
variables of the given object. Returns a new dictionary
without the key,value pairs that have been used. If
all keywords have been used, afterwards, len(kw)==0."""
kw = kw.copy()
for k,v in kw.items():
if hasattr(object,k):
setattr(object,k,v)
del kw[k]
return kw | a4f9286a331478de2b93be9b86e79038d75c754c | 50,733 |
import os
def isA_subdirOfB_orAisB(A, B):
"""It is assumed that A is a directory."""
relative = os.path.relpath(os.path.realpath(A),
os.path.realpath(B))
return not (relative == os.pardir
or relative.startswith(os.pardir + os.sep)) | d31265cea63ac085dc7084486098782e2d9db776 | 50,737 |
def is_op(call, op):
"""
:param call: The specific operator instance (a method call)
:param op: The the operator we are testing against
:return: isinstance(call, op), but faster
"""
try:
return call.get_id() == op.get_id()
except Exception as e:
return False | 9b1f64dffad2a5b6955367ef36f64dfe7dd8d7c7 | 50,738 |
def dfs(grid, row, col):
"""
:type grid: List[List[str]]
:type row : int
:type col : int
:rtype : int
"""
# Checking for Out of Bounds
if (row < 0 or col < 0 or row > len(grid)-1 or col > len(grid[row])-1 or grid[row][col] == '0'):
return 0
# Assigning the traversed the element to a '0'
grid[row][col] = '0'
# Recursive DFS calls to traverse through the neighbors
# Below the element
dfs(grid, row+1, col)
# Above the element
dfs(grid, row-1, col)
# To the right of the element
dfs(grid, row, col+1)
# To the left of the element
dfs(grid, row, col-1)
# Returning 1 after successful traversing from all the neighbors
return 1 | 4f0c15658e4ca6250b9ccd122e97fa429baa8311 | 50,740 |
def merge_traces(mtraces):
"""Merge MultiTrace objects.
Parameters
----------
mtraces : list of MultiTraces
Each instance should have unique chain numbers.
Raises
------
A ValueError is raised if any traces have overlapping chain numbers.
Returns
-------
A MultiTrace instance with merged chains
"""
base_mtrace = mtraces[0]
for new_mtrace in mtraces[1:]:
for new_chain, strace in new_mtrace._straces.items():
if new_chain in base_mtrace._straces:
raise ValueError("Chains are not unique.")
base_mtrace._straces[new_chain] = strace
return base_mtrace | d94bf6eec17445640a4ad3077e17380793d8233b | 50,741 |
import torch
def tbh2bht(t: torch.Tensor) -> torch.Tensor:
"""Permute the dimensions, first goes to third, second goes to first, last moves to second"""
return t.permute(1, 2, 0).contiguous() | 9503a6e386aa51c09bf3777d7d6e03d700a7bf39 | 50,742 |
import glob
import os
import random
def populate_train_list(lowlight_images_path, image_type):
"""
Randomly shuffle the training dataset for training.
"""
image_list_lowlight = glob.glob(os.path.join(lowlight_images_path, f"*.{image_type}"))
train_list = image_list_lowlight
random.shuffle(train_list)
return train_list | e4d07ee39c43b0e6edcb0c1d63089bb16ddb21c8 | 50,744 |
import os
import json
def notelists():
"""Get all files"""
strs = [x for x in os.listdir("html")]
myjson = json.dumps([{'filename': k} for k in os.listdir("html")], indent=4)
jsonvar= "{"
for file in os.listdir("html"):
if file.endswith(".html"):
print(file)
jsonobject = "{filename:" + file + "},"
jsonvar+=jsonobject
jsonvar += "{}}"
# return jsonvar
return myjson | 162ab00b1c88474203d35c7e13dd5b1d4af677df | 50,745 |
def get_resource_dict(package_id, resource_id, acl_xml):
"""
Derives a resource_dict dictionary from the supplied package ID,
resource ID, and access control XML values
"""
resource_dict = {"package_id" : package_id,
"resource_id" : resource_id,
"acl_xml" : acl_xml
}
return resource_dict | b0b375e136f66d0c1dfaeff3519ef92d82864832 | 50,746 |
def get_bags_contained(rules, color):
""" Counts the bags that are in the specified bag.
Solved with recursion.
:param rules: rules
:param color: color of bag
:return: int
"""
counter = 0
# if no bags contained return 0
if rules[color] is None:
return counter
# iterate over bags that are directly in the current bag
for bag, count in rules[color].items():
# add amount of that bag
counter += count
# add amount of that bag * bags in that bag
counter += count * get_bags_contained(rules, bag)
return counter | 95c605f44a1178fd28be13206fdca33232418596 | 50,747 |
import argparse
import os
import getpass
def process_argv():
"""Processing input arguments
:return: input args, has to be an instance of :attr:`argparse.Namespace`.
"""
parser = argparse.ArgumentParser(prog=os.path.basename(__file__))
parser.add_argument('--pop3address', '-a',
help='The address of the pop3 server.',
required=True)
parser.add_argument('--port', '-p',
type=int,
default=110,
help="The port number for pop3")
parser.add_argument('--username', '-u',
help='The user name of this email account',
required=True)
parser.add_argument('--no-tls', dest='tls',
action='store_false',
default=True,
help='Disable TLS/SSl')
# parser.add_argument('--password', '-p',
# help='The pass word of this email account',
# required=True)
# password = input('Password: ')
password = getpass.getpass("Enter your password: ")
return (parser.parse_args(), password) | 9fa63370af99ff27d8064ecafd9204741bd71cce | 50,748 |
from typing import List
def groupThePeople1(groupSizes: List[int]) -> List[List[int]]:
"""
"""
indices = [i for i in range(len(groupSizes))] #O(n)
zipped = sorted(zip(groupSizes, indices)) #O(n)
i = 0
result = []
while i < len(indices): #O(n)
c, val = zipped[i]
curr = []
for j in range(i, i + c):
curr.append(zipped[j][1])
i += c
result.append(curr)
return result | d9c1fed65666d40c5d0242d2d3625fd45d04e250 | 50,750 |
def tile(a, *arg):
"""the funcion performs tiling similar to tf.tile"""
dim = len(arg)-1
for k in range(0, dim+1):
i = dim-k
repeat_idx = [1] * a.dim()
repeat_idx[i] = arg[i]
a = a.repeat(*(repeat_idx))
return a | 36f0ae6ad10fd281368d652c2bfd4579184138cb | 50,751 |
def walk_down(subgraph, node):
""" return all the nodes below node including node """
res = [node]
for edge in subgraph.out_edges(node.name):
res.extend(walk_down(subgraph, edge.to_node))
return res | 4213c2dcb9d5b84ec77d9d7b923b9ffb54faf98d | 50,752 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.