content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def ht_pairtree(request):
"""Override django settings for **HATHI_DATA**and create a temporary
directory structure mirroring top-level of hathi pairtree data
(currently has no pairtree content). Sets list of `hathi_prefixes`
on the calling class."""
# create temporary directories mocking hathi pairtree structure
with tempfile.TemporaryDirectory(prefix="ht_data_") as tmpdir_name:
prefixes = ["ab", "cd", "ef1", "g23"]
request.cls.hathi_prefixes = prefixes
with override_settings(HATHI_DATA=tmpdir_name):
for prefix in prefixes:
os.mkdir(os.path.join(tmpdir_name, prefix))
# use yield to ensure tests operate with the overridden settings
yield prefixes
| 22,200
|
def summarize_single_OLS(regression, col_dict, name, is_regularized=False):
"""Return dataframe aggregating over-all stats from a dictionary-like object containing OLS result objects."""
reg = regression
try:
col_dict['rsquared'][name] = reg.rsquared
except AttributeError:
col_dict['rsquared'][name] = 'NA'
try:
col_dict['rsquared_adj'][name] = reg.rsquared_adj
except AttributeError:
col_dict['rsquared_adj'][name] = 'NA'
col_dict['f_pvalue'][name] = reg.f_pvalue
col_dict['condition_number'][name] = reg.condition_number
col_dict['regularized'][name] = is_regularized
if not is_regularized:
outliers = reg.outlier_test(method='fdr_bh')['fdr_bh(p)'] <= 0.05
col_dict['n_outliers'][name] = (outliers).sum()
col_dict['outliers'][name] = ','.join(outliers.index[outliers].values)
else:
col_dict['n_outliers'][name] = "NA"
col_dict['outliers'][name] = "NA"
col_dict['aic'][name] = reg.aic
return col_dict
| 22,201
|
def simplify(polynom):
"""Simplifies a function with binary variables
"""
polynom = Poly(polynom)
new_polynom = 0
variables = list(polynom.free_symbols)
for var_i in variables:
coefficient_i = polynom.as_expr().coeff(var_i)/2
coefficient_i += polynom.as_expr().coeff(var_i ** 2)
new_polynom += coefficient_i.as_coefficients_dict()[1] * var_i
for var_j in variables:
if var_j != var_i:
coefficient_j = coefficient_i.coeff(var_j)
new_polynom += coefficient_j.as_coefficients_dict()[1] *\
var_i * var_j
return new_polynom + polynom.as_expr().as_coefficients_dict()[1]
| 22,202
|
def get_universe_planets_planet_id(*, planet_id, if_none_match=None):
"""
:param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag
:param planet_id: planet_id integer
Get information on a planet
---
Alternate route: `/dev/universe/planets/{planet_id}/`
Alternate route: `/legacy/universe/planets/{planet_id}/`
Alternate route: `/v1/universe/planets/{planet_id}/`
---
This route expires daily at 11:05
"""
ESI_request.request(if_none_match=if_none_match,
planet_id=planet_id,
data_source='tranquility',
version='latest',
HTTP_method='GET',
path=f'/universe/planets/{planet_id}/')
| 22,203
|
def process_login():
"""Log user into site.
Find the user's login credentials located in the 'request',
look up the user, and store them in the session.
"""
user_login = request.get_json()
if crud.get_user_by_email(user_login['email']):
current_user = crud.get_user_by_email(user_login['email'])
print(current_user)
if current_user.password == user_login['password']:
session['user'] = current_user.user_name
flash("You've logged in successfully. Welcome to your Shelve-It account.")
return(jsonify({'status': "ok. you are logged in!", "user" : current_user.user_name}))
else:
session['user'] = 'unknown'
return (jsonify({'status': "incorrect password"}))
else:
session['user'] = 'needs_to_register'
flash("No account with that email exists. Please create one or try again")
return(jsonify({'status': "no user with that email"}))
| 22,204
|
def cli():
"""A small tool for download HRSL data"""
pass
| 22,205
|
def verify_sha256sum_stream(ioobj, target_hash):
"""Verify the SHA256 hash sum of a file like object"""
verify_hashsum_stream(ioobj, target_hash, hashlib.sha256())
| 22,206
|
def is_int(var):
"""
is this an integer (ie, not a float)?
"""
return isinstance(var, int)
| 22,207
|
def returnstringpacket(pkt):
"""Returns a packet as hex string"""
myString = ""
for c in pkt:
myString += "%02x" % c
return myString
| 22,208
|
def GetIndicesMappingFromTree( tree ):
"""
GetIndicesMappingFromTree
=========================
reuse bill's idea to gives the indexes of all nodes (may they be
a sub tree or a single leaf) gives a list of indices of every sublist.
To do that, I add one thing: the last element of an index is the length
of the present list. e.g.
- get_indices_mapping_from_tree([1,2,3,4,5,6,7,8,9])
gives: [([0], 9)]
- get_indices_mapping_from_tree([1,[2,3],4,5,6,7,8,9])
gives: [([0], 8), ([1], 2)]
- get_indices_mapping_from_tree([1,[2,3,7],4,5,6,7,8,9])
gives: [([0], 8), ([1], 3)]
- get_indices_mapping_from_tree([1,[2,3,7],4,[5,[6,[7,8,9]]]])
gives: [([0], 4), ([1], 3), ([3], 2), ([3, 1], 2), ([3, 1, 1], 3)]
@param tree: a nested list representing a tree
@return: a nested list representing the indexes of the nested lists by depth
"""
q = deque([ ([],tree) ])
list_of_index_lists = [([0],len(tree))]
while q:
(indices, sub_tree) = q.popleft()
list_of_index_lists.append((indices,len(sub_tree)))
for (ordinal, sst) in enumerate( sub_tree[1:] ):
if isinstance( sst, list ):
idxs = indices[:]
idxs.append(ordinal+1)
q.append( (idxs, sst) )
list_of_index_lists.pop(1)
return list_of_index_lists
| 22,209
|
def preprocess_labs(lab_df: pd.DataFrame, material_to_include: list = ['any_blood'],
verbose: bool = True) -> pd.DataFrame:
"""
Preprocess the labs dataframe
:param lab_df:
:param material_to_include: list of materials to include where material is one of the following: 'any_blood', 'urine'
:param verbose: print preprocessing safety details
:return:
"""
lab_df = lab_df.copy()
lab_df['patient_admission_id'] = lab_df['patient_id'].astype(str) + '_' + lab_df['begin_date'].apply(
lambda bd: ''.join(bd.split(' ')[0].split('.')))
lab_df.drop(columns_to_drop, axis=1, inplace=True)
lab_names = set([c.split('_')[0] for c in lab_df.columns if c not in identification_columns])
new_lab_column_headers = set(
['_'.join(c.split('_')[1:]) for c in lab_df.columns if c not in identification_columns])
print('Labs measured:', lab_names)
# split lab df into individual lab dfs for every lab name
lab_df_split_by_lab_name = []
for _, lab_name in enumerate(lab_names):
selected_columns = identification_columns + [c for c in lab_df.columns if c.split('_')[0] == lab_name]
individual_lab_df = lab_df[selected_columns].dropna(subset=[f'{lab_name}_value'])
individual_lab_df.columns = identification_columns + ['_'.join(c.split('_')[1:]) for c in
individual_lab_df.columns if c.startswith(lab_name)]
individual_lab_df['lab_name'] = lab_name
lab_df_split_by_lab_name.append(individual_lab_df)
reorganised_lab_df = pd.concat(lab_df_split_by_lab_name, ignore_index=True)
equalized_reorganised_lab_df = reorganised_lab_df.copy()
for equivalence_list in equivalence_lists:
equalized_reorganised_lab_df.loc[
reorganised_lab_df['dosage_label'].isin(equivalence_list[1:]), 'dosage_label'] = equivalence_list[0]
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
~equalized_reorganised_lab_df['dosage_label'].isin(dosage_labels_to_exclude)]
# check that units correspond
for dosage_label in equalized_reorganised_lab_df['dosage_label'].unique():
units_for_dosage_label = \
equalized_reorganised_lab_df[equalized_reorganised_lab_df['dosage_label'] == dosage_label][
'unit_of_measure'].unique()
print(dosage_label, units_for_dosage_label)
if len(units_for_dosage_label) > 1:
warnings.warn(f'{dosage_label} has different units: {units_for_dosage_label}')
raise ValueError(f'{dosage_label} has different units: {units_for_dosage_label}')
# fixing material equivalents and materials to exclude
# raise error if pO2, pCO2 or pH come from arterial and venous blood
for dosage_label in ['pO2', 'pCO2', 'pH']:
dosage_label_materials = \
equalized_reorganised_lab_df[equalized_reorganised_lab_df['dosage_label'].str.contains(dosage_label)][
'material_label'].unique()
if 'sga' in dosage_label_materials and len(dosage_label_materials) > 1:
raise ValueError(f'{dosage_label} has arterial and other materials: {dosage_label_materials}')
equalized_reorganised_lab_df.loc[
reorganised_lab_df['material_label'].isin(blood_material_equivalents), 'material_label'] = 'any_blood'
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
~equalized_reorganised_lab_df['material_label'].isin(material_to_exclude)]
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
equalized_reorganised_lab_df['material_label'].isin(material_to_include)]
# correct non numeric values
equalized_reorganised_lab_df = correct_non_numerical_values(equalized_reorganised_lab_df)
# remove non numerical values in value column
equalized_reorganised_lab_df = equalized_reorganised_lab_df[
~equalized_reorganised_lab_df['value'].isin(non_numerical_values_to_remove)]
equalized_reorganised_lab_df.dropna(subset=['value'], inplace=True)
remaining_non_numerical_values = \
equalized_reorganised_lab_df[pd.to_numeric(equalized_reorganised_lab_df['value'], errors='coerce').isnull()][
'value'].unique()
print('Remaining non-numerical values:', remaining_non_numerical_values)
if len(remaining_non_numerical_values) > 0:
raise ValueError(f'Remaining non-numerical values: {remaining_non_numerical_values}')
equalized_reorganised_lab_df['value'] = pd.to_numeric(equalized_reorganised_lab_df['value'], errors='coerce')
# correct negative values
# set negative values for dosage label 'hémoglobine' to NaN (NaN values will be removed later)
equalized_reorganised_lab_df.loc[(equalized_reorganised_lab_df['dosage_label'] == 'hémoglobine') & (
equalized_reorganised_lab_df['value'] < 0), 'value'] = np.NAN
# set negative values for dosage label 'glucose' to NaN (NaN values will be removed later)
equalized_reorganised_lab_df.loc[(equalized_reorganised_lab_df['dosage_label'] == 'glucose') & (
equalized_reorganised_lab_df['value'] < 0), 'value'] = np.NAN
equalized_reorganised_lab_df.dropna(subset=['value'], inplace=True)
# warn if negative values are still present
if len(equalized_reorganised_lab_df[equalized_reorganised_lab_df['value'] < 0]) > 0:
warnings.warn('Negative values are present. Check data.')
# remove all french accents and cedillas
equalized_reorganised_lab_df = remove_french_accents_and_cedillas_from_dataframe(equalized_reorganised_lab_df)
# restrict to possible value ranges
possible_value_ranges_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'possible_ranges_for_variables.xlsx')
possible_value_ranges = pd.read_excel(possible_value_ranges_file)
for variable in possible_value_ranges['variable_label'].dropna().unique():
possible_value_ranges_for_variable = possible_value_ranges[
possible_value_ranges['variable_label'] == variable]
equalized_reorganised_lab_df.loc[(equalized_reorganised_lab_df['dosage_label'] == variable)
& (~equalized_reorganised_lab_df['value'].between(
possible_value_ranges_for_variable['Min'].values[0],
possible_value_ranges_for_variable['Max'].values[0])), 'value'] = np.NAN
if verbose:
print(f'Excluding {equalized_reorganised_lab_df["value"].isna().sum()} observations because out of range')
equalized_reorganised_lab_df.dropna(subset=['value'], inplace=True)
# get mean number of values per dosage label patient admission id
median_observations_per_patient_admission_id = \
equalized_reorganised_lab_df.groupby(['patient_admission_id', 'dosage_label'])['value'].count().reset_index()
if verbose:
print(median_observations_per_patient_admission_id.groupby('dosage_label').median())
print(equalized_reorganised_lab_df.groupby('dosage_label')['value'].describe())
return equalized_reorganised_lab_df
| 22,210
|
def find_option(command, name):
"""
Helper method to find command option by its name.
:param command: string
:param name: string
:return: CommandOption
"""
# TODO: use all_options
if command in COMMAND_OPTIONS:
if name == 'help':
return OPTION_HELP
for opt in COMMAND_OPTIONS[command]:
if name in [opt.short_name, opt.long_name]:
return opt
return None
| 22,211
|
def generate_pairwise(params, n_comparisons=10):
"""Generate pairwise comparisons from a Bradley--Terry model.
This function samples comparisons pairs independently and uniformly at
random over the ``len(params)`` choose 2 possibilities, and samples the
corresponding comparison outcomes from a Bradley--Terry model parametrized
by ``params``.
Parameters
----------
params : array_like
Model parameters.
n_comparisons : int
Number of comparisons to be returned.
Returns
-------
data : list of (int, int)
Pairwise-comparison samples (see :ref:`data-pairwise`).
"""
n = len(params)
items = tuple(range(n))
params = np.asarray(params)
data = list()
for _ in range(n_comparisons):
# Pick the pair uniformly at random.
a, b = random.sample(items, 2)
if compare((a, b), params) == a:
data.append((a, b))
else:
data.append((b, a))
return tuple(data)
| 22,212
|
def render_json(fun):
"""
Decorator for views which return a dictionary that encodes the dictionary
into a JSON string and sets the mimetype of the response to
application/json.
"""
@wraps(fun)
def wrapper(request, *args, **kwargs):
response = fun(request, *args, **kwargs)
try:
return JSONResponse(response)
except TypeError:
# The response isn't JSON serializable.
return response
return wrapper
| 22,213
|
def count_smileys_concise(arr: List[str]) -> int:
"""
Another person's implementation. Turns the list into an string,
then uses findall() on that string.
Turning the result into a list makes it possible to return the length of that list.
So this version is more concise, but uses more space. O(n) where n is length of arr.
"""
return len(list(findall(r"[:;][-~]?[)D]", " ".join(arr))))
| 22,214
|
def transform_dead_op_vars(graph, translator=None):
"""Remove dead operations and variables that are passed over a link
but not used in the target block. Input is a graph."""
return transform_dead_op_vars_in_blocks(list(graph.iterblocks()),
[graph], translator)
| 22,215
|
def test_reconstruction_torch():
"""Test that input reconstruction via backprop has decreasing loss."""
if skip_all:
return None if run_without_pytest else pytest.skip()
if cant_import('torch'):
return None if run_without_pytest else pytest.skip()
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
J = 6
Q = 8
N = 1024
n_iters = 30
jtfs = TimeFrequencyScattering1D(J, N, Q, J_fr=4, average_fr=False,
frontend='torch', out_type='array',
sampling_filters_fr=('exclude', 'resample'),
max_pad_factor=1, max_pad_factor_fr=2,
pad_mode_fr='conj-reflect-zero',
).to(device)
y = torch.from_numpy(echirp(N, fmin=1).astype('float32')).to(device)
Sy = jtfs(y)
div = Sy.max()
Sy /= div
torch.manual_seed(0)
x = torch.randn(N, device=device)
x /= torch.max(torch.abs(x))
x.requires_grad = True
optimizer = torch.optim.SGD([x], lr=140000, momentum=.9, nesterov=True)
loss_fn = torch.nn.MSELoss()
losses, losses_recon = [], []
for i in range(n_iters):
optimizer.zero_grad()
Sx = jtfs(x)
Sx /= div
loss = loss_fn(Sx, Sy)
loss.backward()
optimizer.step()
losses.append(float(loss.detach().cpu().numpy()))
xn, yn = x.detach().cpu().numpy(), y.detach().cpu().numpy()
losses_recon.append(float(rel_l2(yn, xn)))
# unsure why CPU's worse
th = 1e-5 if device == 'cuda' else 2e-5
th_end_ratio = 50 if device == 'cuda' else 30
th_recon = 1.05
end_ratio = losses[0] / losses[-1]
assert end_ratio > th_end_ratio, end_ratio
assert min(losses) < th, "{:.2e} > {}".format(min(losses), th)
assert min(losses_recon) < th_recon, "{:.2e} > {}".format(min(losses_recon),
th_recon)
if metric_verbose:
print(("\nReconstruction (torch):\n(end_start_ratio, min_loss, "
"min_loss_recon) = ({:.1f}, {:.2e}, {:.2f})").format(
end_ratio, min(losses), min(losses_recon)))
| 22,216
|
def to_pixels(Hinv, loc):
"""
Given H^-1 and (x, y, z) in world coordinates, returns (c, r) in image
pixel indices.
"""
loc = to_image_frame(Hinv, loc).astype(int)
return (loc[1], loc[0])
| 22,217
|
def fetch(params, seqid=None, db=None, update=False):
"""
Obtains data from NCBI
"""
for p in params:
# Skip if exists (or not update).
if p.json and not update:
continue
# The JSON representation of the data.
json_name = resolve_fname(name=p.acc, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=p.acc, format="gb")
# Fetch and store genbank from remote site.
ncbi_efetch(p.acc, db=db, gbk_name=gbk_name)
# Convert genbank to JSON.
data = jsonrec.parse_file(fname=gbk_name, seqid=seqid)
# Save JSON file.
save_json_file(fname=json_name, data=data)
| 22,218
|
def temporary_upload(request):
"""
Accepts an image upload to server and saves it in a temporary folder.
"""
if not 'image' in request.FILES:
return HttpResponse(simplejson.dumps({'status': 'no image uploaded'}))
filename = request.FILES['image']._get_name().strip().lower()
imgdata = StringIO(request.FILES['image'].read())
imgdata.seek(0)
try:
im = Image.open(imgdata)
im.size
if im.size < (480, 250):
return HttpResponse(simplejson.dumps({'status': "Image size should be minimum 480 width and 250 height.\n\nYours is %dx%d." % (im.size[0], im.size[1]) }))
except Exception:
return HttpResponse(simplejson.dumps({'status': 'couldn\'t open the image'}))
local_dir = os.path.join(settings.MEDIA_ROOT, 'tmp', request.session.session_key)
try:
shutil.rmtree(local_dir, onerror=lambda f, p, e: None)
os.makedirs(local_dir)
except IOError:
pass
local_filename = os.path.join(local_dir, filename)
url = os.path.join(settings.MEDIA_URL, 'tmp', request.session.session_key, filename)
f = open(local_filename, 'wb')
f.write(imgdata.getvalue())
f.close()
request.session['temporary_filename'] = local_filename
ret = simplejson.dumps({'status': 'ok', 'link': url, 'filename': local_filename})
return HttpResponse(ret)
| 22,219
|
def pin_to_pinata(filename: str, config: tp.Dict[str, tp.Dict[str, tp.Any]]) -> None:
"""
:param filename: full name of a recorded video
:type filename: str
:param config: dictionary containing all the configurations
:type config: dict
pinning files in pinata to make them broadcasted around ipfs
"""
pinata_api = config["pinata"]["pinata_api"] # pinata credentials
pinata_secret_api = config["pinata"]["pinata_secret_api"]
if pinata_api and pinata_secret_api:
pinata = PinataPy(pinata_api, pinata_secret_api)
pinata.pin_file_to_ipfs(filename) # here we actually send the entire file to pinata, not just its hash. It will
# remain the same as if published locally, cause the content is the same.
logging.info("File published to Pinata")
| 22,220
|
def adf_test(path):
"""
Takes a csv file path as input (as a string)
This file must have one heading as Dates and the other as Close
This csv file will be converted into a series and then the ADF test will be
completed using data from that csv file
(Optional:
will plot the data using matplotlib as a line graph)
"""
if not os.path.exists(path):
raise Exception("The path specified does not exist")
df = pd.read_csv(path, parse_dates=['Time'])
series = df.loc[:, 'Close'].values
# # Plotting the graph of the date against the close
# df.plot(figsize=(14,8), label="Close Price", title='Series', marker=".")
# plt.ylabel("Close Prices")
# plt.legend()
# plt.show()
# ADF test
result = adfuller(series, autolag="AIC")
print(f"ADF Statistic = {result[0]}")
print(f"p_value = {result[1]}")
print(f"usedlags = {result[2]}")
# Result 4 is a dictionary that contains the critical values
for k, v in result[4].items():
print(f"Critical Values are:\n {k}, {v}")
print(result)
return result[0], result[1], result[2]
| 22,221
|
def add_dependency(key, value, dict):
"""
Add dependency to appropriate dictionary
ToDo: check if defaultdict would eliminate this function
"""
if key in dict:
if value not in dict[key]:
dict[key].append(value)
else:
dict[key] = [value]
| 22,222
|
def calc_distance_between_points_two_vectors_2d(v1, v2):
"""calc_distance_between_points_two_vectors_2d [pairwise distance between vectors points]
Arguments:
v1 {[np.array]} -- [description]
v2 {[type]} -- [description]
Raises:
ValueError -- [description]
ValueError -- [description]
ValueError -- [description]
Returns:
[type] -- [description]
testing:
>>> v1 = np.zeros((2, 5))
>>> v2 = np.zeros((2, 5))
>>> v2[1, :] = [0, 10, 25, 50, 100]
>>> d = calc_distance_between_points_two_vectors_2d(v1.T, v2.T)
"""
# Check dataformats
if not isinstance(v1, np.ndarray) or not isinstance(v2, np.ndarray):
raise ValueError("Invalid argument data format")
if not v1.shape[1] == 2 or not v2.shape[1] == 2:
raise ValueError("Invalid shape for input arrays")
if not v1.shape[0] == v2.shape[0]:
raise ValueError("Error: input arrays should have the same length")
# Calculate distance
if v1.shape[1] < 20000 and v1.shape[0] < 20000:
# For short vectors use cdist
dist = distance.cdist(v1, v2, "euclidean")
dist = dist[:, 0]
else:
dist = [
calc_distance_between_points_2d(p1, p2) for p1, p2 in zip(v1, v2)
]
return dist
| 22,223
|
def get_nearest_list_index(node_list, guide_node):
"""
Finds nearest nodes among node_list, using the metric given by weighted_norm
and chooses one of them at random.
Parameters
----------
node_list : list
list of nodes corresponding to one of the two search trees growing towards each other.
guide_node : dict
node that has been randomly chosen to expand towards
Returns
-------
min_ind : int
index of the chosen node
min_dist_choice : float
distance between the chosen node and the guide_node
"""
k_nearest = int(len(node_list) / 100) + 1
dlist = [weighted_norm(node, guide_node) for node in node_list]
k_min_dist_list = heapq.nsmallest(k_nearest, dlist)
min_dist_choice = random.choice(k_min_dist_list)
min_ind = dlist.index(min_dist_choice)
return min_ind, min_dist_choice
| 22,224
|
def get_extensions_from_dir(path: str) -> list[str]:
"""Gets all files that end with ``.py`` in a directory and returns a python dotpath."""
dirdotpath = ".".join(path.split(sep)[1:]) # we ignore the first part because we don't want to add the ``./``.
return [f"{dirdotpath}.{file}" for file in listdir(path) if file.endswith(".py")]
| 22,225
|
def to_normalized_exacta_dividends(x,scr=-1):
""" Convert 2-d representation of probabilities to dividends
:param x:
:param scr:
:return:
"""
fx = to_normalized_dividends( to_flat_exacta(x), scr=scr )
return from_flat_exacta(fx, diag_value=scr)
| 22,226
|
def CSVcreation():
"""This functions allows to access to page for the creation of csv"""
if "logged_in" in session and session["logged_in"] == True:
print("User login", session["username"])
try:
count1 = managedb.getCountLoginDB(session["username"])
if count1 == 0:
return redirect(url_for('index'))
return render_template('CSVcreation.html',data=data)
except Exception as e:
print("Error DB:",str(e))
return redirect(url_for('index'))
return redirect(url_for('index'))
| 22,227
|
def aws_services():
"""List or search for services by attribute."""
pass
| 22,228
|
def filter_perm(user, queryset, role):
"""Filter a queryset.
Main authorization business logic goes here.
"""
# Called outside of view
if user is None:
# TODO: I think this is used if a user isn't logged in and hits our endpoints which is a problem
return queryset
# Must be logged in
if not user.is_active or user.is_anonymous:
return queryset.none()
# Superusers can see all (not staff users)
if user.is_active and user.is_superuser:
return queryset
# Check permissions
conditions = []
model = queryset.model
paths_to_checksumfile = [*get_paths(model, models.ChecksumFile)]
if model == models.Collection:
# Add custom reverse relationships
field = model._meta.get_field('checksumfiles')
path = Path(field)
paths_to_checksumfile.append(path)
for path in paths_to_checksumfile:
# A user can read/write a file if they are the creator
is_creator = path.q(created_by=user)
conditions.append(is_creator)
if (
getattr(settings, 'RGD_GLOBAL_READ_ACCESS', False)
and role == models.CollectionPermission.READER
):
# A user can read any file by default
has_no_owner = path.q(created_by__isnull=True)
conditions.append(has_no_owner)
for path in get_paths(model, models.Collection):
# Check collection permissions
has_permission = path.q(collection_permissions__user=user)
has_role_level = path.q(collection_permissions__role__gte=role)
conditions.append(has_permission & has_role_level)
whitelist = (
queryset.none()
.union(*(queryset.filter(condition) for condition in conditions), all=True)
.values('pk')
)
return queryset.filter(pk__in=whitelist)
| 22,229
|
def generate_copies(func, phis):
"""
Emit stores to stack variables in predecessor blocks.
"""
builder = Builder(func)
vars = {}
loads = {}
# Allocate a stack variable for each phi
builder.position_at_beginning(func.startblock)
for block in phis:
for phi in phis[block]:
vars[phi] = builder.alloca(types.Pointer(phi.type))
# Generate loads in blocks containing the phis
for block in phis:
leaders = list(block.leaders)
last_leader = leaders[-1] if leaders else block.head
builder.position_after(last_leader)
for phi in phis[block]:
loads[phi] = builder.load(vars[phi])
# Generate copies (store to stack variables)
for block in phis:
for phi in phis[block]:
preds, args = phi.args
var = vars[phi]
phi_args = [loads.get(arg, arg) for arg in args]
for pred, arg in zip(preds, phi_args):
builder.position_before(pred.terminator)
builder.store(arg, var)
# Replace phis
for block in phis:
for phi in phis[block]:
phi.replace_uses(loads[phi])
phi.delete()
return vars, loads
| 22,230
|
def poll_and_notify():
"""
Check for updated results, and send a notification if a change is detected.
"""
# query the API for the latest result
new_results = get_status(MYSEJAHTERA_USERNAME, MYSEJAHTERA_PASSWORD)
# load the previous results from file
try:
with open(RESULTS_FILENAME) as resultsfile:
old_results = json.load(resultsfile)
except:
# the first run, there probably won"t be such a file
# imagine a default
old_results = {
'Health Facility:': '',
'Vaccination Location:': '',
'Date:': '',
'Time:': ''}
if new_results == "DONE":
print("Congratulations, you have received all your vaccinations.")
return
# compare the results:
elif new_results and old_results != new_results:
# send notification
print("Found updated results for your vaccination.")
NOTIFIER.notify(*messages.update_message(new_results))
# update the results file for next time
with open(RESULTS_FILENAME, "w") as resultsfile:
json.dump(new_results, resultsfile, indent=2)
else:
# no change to results to vaccination status. ignore it!
print("No changes to your vaccination status.")
for key, value in old_results.items():
print(key, value)
| 22,231
|
def calculate_dv(wave: Sequence):
"""
Given a wavelength array, calculate the minimum ``dv`` of the array.
Parameters
----------
wave : array-like
The wavelength array
Returns
-------
float
delta-v in units of km/s
"""
return C.c_kms * np.min(np.diff(wave) / wave[:-1])
| 22,232
|
def check_link_errors(*args, visit=(), user="user", **kwargs):
"""
Craw site starting from the given base URL and raise an error if the
resulting error dictionary is not empty.
Notes:
Accept the same arguments of the :func:`crawl` function.
"""
errors, visited = crawl(*args, **kwargs)
for url in visit:
if url not in visited:
errors[url] = f"URL was not visited by {user}"
if errors:
for url, code in errors.items():
if isinstance(code, int):
print(f"URL {url} returned invalid status code: {code}")
else:
print(f"Invalid URL {url} encountered at {code}")
raise AssertionError(errors, visited)
return visited
| 22,233
|
def cart3_to_polar2(xyz_array):
"""
Convert 3D cartesian coordinates into 2D polar coordinates.
This is a simple routine for converting a set of 3D cartesian vectors into
spherical coordinates, where the position (0, 0) lies along the x-direction.
Parameters
----------
xyz_array : ndarray of float
Cartesian coordinates, need not be of unit vector length. Shape is
(3, coord_shape).
Returns
-------
lon_array : ndarray of float
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians, shape is (coord_shape,).
lat_array : ndarray of float
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians, shape is (coord_shape,).
"""
if not isinstance(xyz_array, np.ndarray):
raise ValueError("xyz_array must be an ndarray.")
if xyz_array.ndim == 0:
raise ValueError("xyz_array must have ndim > 0")
if xyz_array.shape[0] != 3:
raise ValueError("xyz_array must be length 3 across the zeroth axis.")
# The longitude coord is relatively easy to calculate, just take the X and Y
# components and find the arctac of the pair.
lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float)
# If we _knew_ that xyz_array was always of length 1, then this call could be a much
# simpler one to arcsin. But to make this generic, we'll use the length of the XY
# component along with arctan2.
lat_array = np.arctan2(
xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float
)
# Return the two arrays
return lon_array, lat_array
| 22,234
|
def deploy_release(rel_id, env_id):
"""deploy_release will start deploying a release to a given environment"""
uri = config.OCTOPUS_URI + "/api/deployments"
r = requests.post(uri, headers=config.OCTOPUS_HEADERS, verify=False,
json={'ReleaseId': rel_id, 'EnvironmentId': env_id})
return r.json()
| 22,235
|
def create_static_route(dut, next_hop=None, static_ip=None, shell="vtysh", family='ipv4', interface = None, vrf = None):
"""
To create static route
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param next_hop:
:param static_ip:
:param shell: sonic|vtysh
:param family: ipv4|ipv6
:return:
"""
if not static_ip:
st.log("Provide static_ip")
return False
if shell == "vtysh":
if family.lower() == "ipv4" or family.lower() == "":
if next_hop:
command = "ip route {} {}".format(static_ip, next_hop)
else:
command = "ip route {}".format(static_ip)
elif family.lower() == "ipv6":
command = "ipv6 route {} {}".format(static_ip, next_hop)
if interface:
command +=" {}".format(interface)
if vrf:
command +=" vrf {}".format(vrf)
st.config(dut, command, type='vtysh')
else:
if family.lower() == "ipv4" or family.lower() == "":
if next_hop:
command = "ip route add {} via {}".format(static_ip, next_hop)
else:
command = "ip route add {}".format(static_ip)
elif family.lower() == "ipv6":
if next_hop:
command = "ip -6 route add {} via {}".format(static_ip, next_hop)
else:
command = "ip -6 route add {}".format(static_ip)
if interface:
command +=" dev {}".format(interface)
st.config(dut, command)
| 22,236
|
def _markfoundfiles(arg, initargs, foundflags):
"""Mark file flags as found."""
try:
pos = initargs.index(arg) - 1
except ValueError:
pos = initargs.index("../" + arg) - 1
# In cases where there is a single input file as the first parameter. This
# should cover cases such as:
# exec input.file
# exec input.file > output.file
if arg == initargs[0]:
foundflags.append("<")
# Other cases should pretty much be formats like:
# exec -flag file -flag file -flag file
elif (len(initargs) > 1 and initargs[pos][0] == "-"
and initargs[pos] not in foundflags):
foundflags.append(initargs[pos])
# Or cases like exec -flag file -flag file inputfile > outputfile
elif (len(initargs) > 1 and initargs[pos][0] != "-"
and initargs[pos] not in foundflags):
foundflags.append("<")
return foundflags
| 22,237
|
def part_a(puzzle_input):
"""
Calculate the answer for part_a.
Args:
puzzle_input (list): Formatted as the provided input from the website.
Returns:
string: The answer for part_a.
"""
recipes_to_make = int(''.join(puzzle_input))
elf_index_1 = 0
elf_index_2 = 1
recipies = [3, 7]
while len(recipies) < recipes_to_make + 10:
new_recipes = recipies[elf_index_1] + recipies[elf_index_2]
if new_recipes >= 10:
recipies.append(1)
recipies.append(new_recipes - 10)
else:
recipies.append(new_recipes)
elf_index_1 = (elf_index_1 + (recipies[elf_index_1] + 1)) % len(recipies)
elf_index_2 = (elf_index_2 + (recipies[elf_index_2] + 1)) % len(recipies)
return ''.join(map(str, recipies[recipes_to_make:recipes_to_make + 10]))
| 22,238
|
def _CompareFields(field, other_field):
"""Checks if two ProtoRPC fields are "equal".
Compares the arguments, rather than the id of the elements (which is
the default __eq__ behavior) as well as the class of the fields.
Args:
field: A ProtoRPC message field to be compared.
other_field: A ProtoRPC message field to be compared.
Returns:
Boolean indicating whether the fields are equal.
"""
field_attrs = _GetFieldAttributes(field)
other_field_attrs = _GetFieldAttributes(other_field)
if field_attrs != other_field_attrs:
return False
return field.__class__ == other_field.__class__
| 22,239
|
def classpath_dest_filename(coord: str, src_filename: str) -> str:
"""Calculates the destination filename on the classpath for the given source filename and coord.
TODO: This is duplicated in `COURSIER_POST_PROCESSING_SCRIPT`.
"""
dest_name = coord.replace(":", "_")
_, ext = os.path.splitext(src_filename)
return f"{dest_name}{ext}"
| 22,240
|
def loadtxt_rows(filename, rows, single_precision=False):
"""
Load only certain rows
"""
# Open the file
f = open(filename, "r")
# Storage
results = {}
# Row number
i = 0
# Number of columns
ncol = None
while(True):
# Read the line and split by commas
line = f.readline()
cells = line.split(",")
# Quit when you see a different number of columns
if ncol is not None and len(cells) != ncol:
break
# Non-comment lines
if cells[0] != "#":
# If it's the first one, get the number of columns
if ncol is None:
ncol = len(cells)
# Otherwise, include in results
if i in rows:
if single_precision:
results[i] = np.array([float(cell) for cell in cells],\
dtype="float32")
else:
results[i] = np.array([float(cell) for cell in cells])
i += 1
results["ncol"] = ncol
return results
| 22,241
|
def create_signal(frequencies, amplitudes, number_of_samples, sample_rate):
"""Create a signal of given frequencies and their amplitudes.
"""
timesamples = arange(number_of_samples) / sample_rate
signal = zeros(len(timesamples))
for frequency, amplitude in zip(frequencies, amplitudes):
signal += amplitude * sin(2*pi*frequency*timesamples)
return signal, timesamples
| 22,242
|
def serp_goog(q, cx, key, c2coff=None, cr=None,
dateRestrict=None, exactTerms=None, excludeTerms=None,
fileType=None, filter=None, gl=None, highRange=None,
hl=None, hq=None, imgColorType=None, imgDominantColor=None,
imgSize=None, imgType=None, linkSite=None, lowRange=None,
lr=None, num=None, orTerms=None, relatedSite=None,
rights=None, safe=None, searchType=None, siteSearch=None,
siteSearchFilter=None, sort=None, start=None):
"""Query Google and get search results in a DataFrame.
For each parameter, you can supply single or multiple values / arguments.
If you pass multiple arguments, all the possible combinations of
arguments (the product) will be requested, and you will get one
DataFrame combining all queries. See examples below.
:param q: The search expression.
:param cx: The custom search engine ID to use for this
request.
:param key: The API key of your custom search engine.
:param c2coff: Enables or disables Simplified and
Traditional Chinese Search. The default value for this
parameter is 0 (zero), meaning that the feature is enabled.
Supported values are:1: Disabled0: Enabled (default)
:param cr: Restricts search results to documents
originating in a particular country. You may use Boolean
operators in the cr parameter's value.Google Search
determines the country of a document by analyzing:the top-
level domain (TLD) of the document's URLthe geographic
location of the Web server's IP addressSee the Country
Parameter Values page for a list of valid values for this
parameter.
:param dateRestrict: Restricts results to URLs based on
date. Supported values include:d[number]: requests results
from the specified number of past days.
- d[number]: requests results from the specified number of past days.
- w[number]: requests results from the specified number of past weeks.
- m[number]: requests results from the specified number of past months.
- y[number]: requests results from the specified number of past years.
:param exactTerms: Identifies a phrase that all
documents in the search results must contain.
:param excludeTerms: Identifies a word or phrase that
should not appear in any documents in the search results.
:param fileType: Restricts results to files of a
specified extension. A list of file types indexable by
Google can be found in Search Console Help Center.
:param filter: Controls turning on or off the duplicate
content filter.See Automatic Filtering for more information
about Google's search results filters. Note that host
crowding filtering applies only to multi-site searches.By
default, Google applies filtering to all search results to
improve the quality of those results. Acceptable values
are: "0": Turns off duplicate content filter. "1": Turns
on duplicate content filter.
:param gl: Geolocation of end user. The gl parameter
value is a two-letter country code. The gl parameter boosts
search results whose country of origin matches the parameter
value. See the Country Codes page for a list of valid
values.Specifying a gl parameter value should lead to more
relevant results. This is particularly true for
international customers and, even more specifically, for
customers in English- speaking countries other than the
United States.
:param highRange: Specifies the ending value for a
search range.Use lowRange and highRange to append an
inclusive search range of lowRange...highRange to the query.
:param hl: Sets the user interface language. Explicitly
setting this parameter improves the performance and the
quality of your search results.See the Interface
Languages section of Internationalizing Queries and Results
Presentation for more information, and Supported Interface
Languages for a list of supported languages.
:param hq: Appends the specified query terms to the
query, as if they were combined with a logical AND operator.
:param imgColorType: Returns black and white, grayscale,
or color images: mono, gray, and color. Acceptable values
are: "color": color "gray": gray "mono": mono
:param imgDominantColor: Returns images of a specific
dominant color. Acceptable values are: "black": black
"blue": blue "brown": brown "gray": gray "green": green
"orange": orange "pink": pink "purple": purple "red": red
"teal": teal "white": white "yellow": yellow
:param imgSize: Returns images of a specified size.
Acceptable values are: "huge": huge "icon": icon "large":
large "medium": medium "small": small "xlarge": xlarge
"xxlarge": xxlarge
:param imgType: Returns images of a type. Acceptable
values are: "clipart": clipart "face": face "lineart":
lineart "news": news "photo": photo
:param linkSite: Specifies that all search results
should contain a link to a particular URL
:param lowRange: Specifies the starting value for a
search range. Use lowRange and highRange to append an
inclusive search range of lowRange...highRange to the query.
:param lr: Restricts the search to documents written in
a particular language (e.g., lr=lang_ja). Acceptable values
are: "lang_ar": Arabic "lang_bg": Bulgarian "lang_ca":
Catalan "lang_cs": Czech "lang_da": Danish "lang_de":
German "lang_el": Greek "lang_en": English "lang_es":
Spanish "lang_et": Estonian "lang_fi": Finnish "lang_fr":
French "lang_hr": Croatian "lang_hu": Hungarian
"lang_id": Indonesian "lang_is": Icelandic "lang_it":
Italian "lang_iw": Hebrew "lang_ja": Japanese "lang_ko":
Korean "lang_lt": Lithuanian "lang_lv": Latvian
"lang_nl": Dutch "lang_no": Norwegian "lang_pl": Polish
"lang_pt": Portuguese "lang_ro": Romanian "lang_ru":
Russian "lang_sk": Slovak "lang_sl": Slovenian "lang_sr":
Serbian "lang_sv": Swedish "lang_tr": Turkish "lang_zh-
CN": Chinese (Simplified) "lang_zh-TW": Chinese
(Traditional)
:param num: Number of search results to return.Valid
values are integers between 1 and 10, inclusive.
:param orTerms: Provides additional search terms to
check for in a document, where each document in the search
results must contain at least one of the additional search
terms.
:param relatedSite: Specifies that all search results
should be pages that are related to the specified URL.
:param rights: Filters based on licensing. Supported
values include: cc_publicdomain, cc_attribute,
cc_sharealike, cc_noncommercial, cc_nonderived, and
combinations of these.
:param safe: Search safety level. Acceptable values
are: "active": Enables SafeSearch filtering. "off":
Disables SafeSearch filtering. (default)
:param searchType: Specifies the search type: image. If
unspecified, results are limited to webpages. Acceptable
values are: "image": custom image search.
:param siteSearch: Specifies all search results should
be pages from a given site.
:param siteSearchFilter: Controls whether to include or
exclude results from the site named in the siteSearch
parameter. Acceptable values are: "e": exclude "i":
include
:param sort: The sort expression to apply to the
results.
:param start: The index of the first result to
return.Valid value are integers starting 1 (default) and the
second result is 2 and so forth. For example &start=11 gives
the second page of results with the default "num" value of
10 results per page.Note: No more than 100 results will ever
be returned for any query with JSON API, even if more than
100 documents match the query, so setting (start + num) to
more than 100 will produce an error. Note that the maximum
value for num is 10.
The following function call will produce two queries:
"hotel" in the USA, and "hotel" in France
>>> serp_goog(q='hotel', gl=['us', 'fr'], cx='YOUR_CX', key='YOUR_KEY')
The below function call will prouce four queries and make four requests:
"fligts" in UK
"fligts" in Australia
"tickets" in UK
"tickets" in Australia
'cr' here refers to 'country restrict', which focuses on content
originating from the specified country.
>>> serp_goog(q=['flights', 'tickets'], cr=['countryUK', 'countryAU'],
cx='YOUR_CX', key='YOUR_KEY')
"""
params = locals()
supplied_params = {k: v for k, v in params.items() if params[k] is not None}
for p in supplied_params:
if isinstance(supplied_params[p], (str, int)):
supplied_params[p] = [supplied_params[p]]
for p in supplied_params:
if p in SERP_GOOG_VALID_VALS:
if not set(supplied_params[p]).issubset(SERP_GOOG_VALID_VALS[p]):
raise ValueError('Please make sure you provide a'
' valid value for "{}", valid values:\n'
'{}'.format(p,
sorted(SERP_GOOG_VALID_VALS[p])))
params_list = _dict_product(supplied_params)
base_url = 'https://www.googleapis.com/customsearch/v1?'
specified_cols = ['searchTerms', 'rank', 'title', 'snippet',
'displayLink', 'link', 'queryTime', 'totalResults']
responses = []
for param in params_list:
param_log = ', '.join([k + '=' + str(v) for k, v in param.items()])
logging.info(msg='Requesting: ' + param_log)
resp = requests.get(base_url, params=param)
if resp.status_code >= 400:
raise Exception(resp.json())
responses.append(resp)
result_df = pd.DataFrame()
for i, resp in enumerate(responses):
request_metadata = resp.json()['queries']['request'][0]
del request_metadata['title']
search_info = resp.json()['searchInformation']
if int(search_info['totalResults']) == 0:
df = pd.DataFrame(columns=specified_cols, index=range(1))
df['searchTerms'] = request_metadata['searchTerms']
# These keys don't appear in the response so they have to be
# added manually
for missing in ['lr', 'num', 'start', 'c2coff']:
if missing in params_list[i]:
df[missing] = params_list[i][missing]
else:
df = pd.DataFrame(resp.json()['items'])
df['cseName'] = resp.json()['context']['title']
start_idx = request_metadata['startIndex']
df['rank'] = range(start_idx, start_idx + len(df))
for missing in ['lr', 'num', 'start', 'c2coff']:
if missing in params_list[i]:
df[missing] = params_list[i][missing]
meta_columns = {**request_metadata, **search_info}
df = df.assign(**meta_columns)
df['queryTime'] = datetime.datetime.now(tz=datetime.timezone.utc)
df['queryTime'] = pd.to_datetime(df['queryTime'])
if 'image' in df:
img_df = json_normalize(df['image'])
img_df.columns = ['image.' + c for c in img_df.columns]
df = pd.concat([df, img_df], axis=1)
result_df = result_df.append(df, sort=False, ignore_index=True)
ordered_cols = (list(set(params_list[i]).difference({'q', 'key', 'cx'})) +
specified_cols)
non_ordered = result_df.columns.difference(set(ordered_cols))
final_df = result_df[ordered_cols + list(non_ordered)]
if 'pagemap' in final_df:
pagemap_df = pd.DataFrame()
for p in final_df['pagemap']:
try:
temp_pagemap_df = json_normalize(p)
pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False)
except Exception as e:
temp_pagemap_df = pd.DataFrame({'delete_me': None},
index=range(1))
pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False)
pagemap_df = pagemap_df.reset_index(drop=True)
if 'delete_me' in pagemap_df:
del pagemap_df['delete_me']
for col in pagemap_df:
if col in final_df:
pagemap_df = pagemap_df.rename(columns={col: 'pagemap_' + col})
final_df = pd.concat([final_df, pagemap_df], axis=1)
if 'metatags' in pagemap_df:
metatag_df = pd.DataFrame()
for m in pagemap_df['metatags']:
try:
temp_metatags_df = json_normalize(m)
metatag_df = metatag_df.append(temp_metatags_df,
sort=False)
except Exception as e:
temp_metatags_df = pd.DataFrame({'delete_me': None},
index=range(1))
metatag_df = metatag_df.append(temp_metatags_df,
sort=False)
metatag_df = metatag_df.reset_index(drop=True)
if 'delete_me' in metatag_df:
del metatag_df['delete_me']
for col in metatag_df:
if col in final_df:
metatag_df = metatag_df.rename(columns={col: 'metatag_' + col})
final_df = pd.concat([final_df, metatag_df], axis=1)
return final_df
| 22,243
|
def get_git_revision():
"""
Get the number of revisions since the beginning.
"""
revision = "0"
if os.path.isdir(os.path.join(basedir, '.git')):
try:
proc = subprocess.Popen(
['git', '-C', basedir, 'rev-list', '--count', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rev, err = proc.communicate()
if proc.returncode == 0:
revision = rev.strip().decode('ascii')
except OSError:
pass
return revision
| 22,244
|
def get_wiki_modal_data(term):
"""
runs the wikiperdia helper functions and created the
wikipedia data ready for the modal
"""
return_data = False
summary_data = get_wiki_summary(term=term)
related_terms = get_similar_search(term=term)
if summary_data:
return_data = {
'wiki_term': term,
'summary_data': summary_data,
'related_terms': related_terms
}
return return_data
| 22,245
|
def format_taxa_to_js(otu_coords, lineages, prevalence, min_taxon_radius=0.5,
max_taxon_radius=5, radius=1.0):
"""Write a string representing the taxa in a PCoA plot as javascript
Parameters
----------
otu_coords : array_like
Numpy array where the taxa is positioned
lineages : array_like
Label for each of these lineages
prevalence : array_like
Score of prevalence for each of the taxa that is drawn
min_taxon_radius : float, optional
Smallest radius for a sphere.
max_taxon_radius : float, optional
Largest radius for a spehere.
radius : float, optional
Base radius for a sphere.
Outputs
-------
str
JavaScript string where the taxa information is written to create the
spheres representing each of these, will return only the variable
declaration if the inputs are empty.
Notes
-----
These parameters should work more as constants and once we find out that
there's a value that is too big to be presented, the proper checks should
be put into place. Currently we haven't found such cases in any study*
min_taxon_radius: minimum value for the radius of the spheres on the plot
max_taxon_radious: maximum value for the radius of the spheres on the plot
radius: default value size
"""
js_biplots_string = []
js_biplots_string.append('\nvar g_taxaPositions = new Array();\n')
# if we have prevalence scores, calculate the taxa radii values
if len(prevalence):
taxa_radii = radius * (min_taxon_radius + (max_taxon_radius -
min_taxon_radius) * prevalence)
else:
taxa_radii = []
index = 0
# write the data in the form of a dictionary
for taxa_label, taxa_coord, t_radius in zip(lineages,
otu_coords, taxa_radii):
js_biplots_string.append("g_taxaPositions['%d'] = { 'lineage': '%s', "
"'x': %f, 'y': %f, 'z': %f, 'radius': %f};\n"
% (index, taxa_label, taxa_coord[0],
taxa_coord[1], taxa_coord[2], t_radius))
index += 1
js_biplots_string.append('\n')
# join the array of strings as a single string
return ''.join(js_biplots_string)
| 22,246
|
def _get_symbols_from_args(args: argparse.Namespace) -> List[icmsym.Symbol]:
"""
Get list of symbols to extract.
"""
# If all args are specified to extract only one symbol, return this symbol.
if args.symbol and args.exchange and args.asset_class and args.currency:
return [
icmsym.Symbol(
ticker=args_symbol,
exchange=args.exchange,
asset_class=args.asset_class,
contract_type=args.contract_type,
currency=args.currency,
)
for args_symbol in args.symbol
]
# Find all matched symbols otherwise.
file_path_generator = iasfil.FilePathGeneratorFactory.get_file_path_generator(
args.provider
)
latest_symbols_file = file_path_generator.get_latest_symbols_file()
symbol_universe = iassym.SymbolUniverseFactory.get_symbol_universe(
args.provider, symbols_file=latest_symbols_file
)
if args.symbol is None:
args_symbols = [args.symbol]
else:
args_symbols = args.symbol
symbols: List[icmsym.Symbol] = []
for symbol in args_symbols:
symbols.extend(
symbol_universe.get(
ticker=symbol,
exchange=args.exchange,
asset_class=args.asset_class,
contract_type=args.contract_type,
currency=args.currency,
is_downloaded=True,
frequency=args.frequency,
path_generator=file_path_generator,
)
)
return symbols
| 22,247
|
def compute_heading(mag_read):
""" Computes the compass heading from the magnetometer X and Y.
Returns a float in degrees between 0 and 360.
"""
return ((atan2(mag_read[1], mag_read[0]) * 180) / pi) + 180
| 22,248
|
def data_loader(filename, input_directory, input_directory_processed, fs_resampled, p_and_t_waves=False):
"""Convert data and header_data to .npy and dict format."""
# Dataset lookup
lookup = {'A': 'A', 'Q': 'B', 'I': 'C', 'S': 'D', 'H': 'E', 'E': 'F'}
# Get datset
dataset = lookup[filename[0]]
# Import header file
header = _load_header_file(filename=filename, input_directory=input_directory)
# Get labels
labels = Labels(labels_SNOMEDCT=header['labels_SNOMEDCT'])
# Import matlab file
waveforms = _load_mat_file(filename=filename, input_directory=input_directory)
# Resample waveforms
samples = int(waveforms.shape[0] * fs_resampled / header['fs'])
waveforms = signal.resample(x=waveforms, num=samples, axis=0)
# Compute heart rate
hr = _compute_heart_rate(waveforms=waveforms, fs=fs_resampled)
# Get rpeaks
rpeaks = _get_rpeaks(waveforms=waveforms, fs=fs_resampled)
rpeak_array = _get_peak_array(waveforms=waveforms, peaks=rpeaks)
rpeak_times = _get_peak_times(waveforms=waveforms, peak_array=rpeak_array, fs=fs_resampled)
# Get P-waves and T-waves
if p_and_t_waves:
p_waves, t_waves = _get_p_and_t_waves(waveforms=waveforms, rpeaks=rpeaks)
else:
p_waves = None
t_waves = None
p_wave_array = _get_peak_array(waveforms=waveforms, peaks=p_waves)
p_wave_times = _get_peak_times(waveforms=waveforms, peak_array=p_wave_array, fs=fs_resampled)
t_wave_array = _get_peak_array(waveforms=waveforms, peaks=t_waves)
t_wave_times = _get_peak_times(waveforms=waveforms, peak_array=t_wave_array, fs=fs_resampled)
os.makedirs(os.path.join(input_directory_processed, dataset, 'formatted'), exist_ok=True)
# Save waveform data npy file
np.save(os.path.join(input_directory_processed, dataset, 'formatted', '{}.npy'.format(filename)), waveforms)
# Save meta data JSON
with open(os.path.join(input_directory_processed, dataset, 'formatted', '{}.json'.format(filename)), 'w') as file:
json.dump({'filename': filename,
'dataset': dataset,
'datetime': header['datetime'],
'channel_order': header['channel_order'],
'age': header['age'],
'sex': header['sex'],
'amp_conversion': header['amp_conversion'],
'fs': header['fs'],
'fs_resampled': fs_resampled,
'length': header['length'],
'num_leads': header['num_leads'],
'labels_SNOMEDCT': labels.labels_SNOMEDCT,
'labels_short': labels.labels_short,
'labels_full': labels.labels_full,
'labels_int': labels.labels_int,
'labels_training': labels.labels_training,
'labels_training_merged': labels.labels_training_merged,
'shape': waveforms.shape,
'hr': hr,
'rpeaks': rpeaks,
'rpeak_array': rpeak_array.tolist(),
'rpeak_times': rpeak_times,
'p_waves': p_waves,
'p_wave_array': p_wave_array.tolist(),
'p_wave_times': p_wave_times,
't_waves': t_waves,
't_wave_array': t_wave_array.tolist(),
't_wave_times': t_wave_times,
'labels_unscored_SNOMEDCT': labels.labels_unscored_SNOMEDCT,
'labels_unscored_short': labels.labels_unscored_short,
'labels_unscored_full': labels.labels_unscored_full,
'p_and_t_waves': p_and_t_waves
},
file, sort_keys=False, indent=4)
| 22,249
|
def generate_all_commands(
configs_path, commands_file, n_repeats, row_wise=True, optimize_access=True
):
"""
Parameters:
row_wise: if True, commands are generated s.t. all configs (columns) are evaluated for one
dataset (rows) first before moving on to the next dataset. If False, all datasets are run for
one config file before moving on to the next config file.
optimize_access: shuffle the commands at random for a window of 20 configs x n datasets
in the table to reduce number of simultaenous dataset accesses. n=20 per default
"""
with Path(configs_path.parent, "default.yaml").open() as in_stream:
config = yaml.safe_load(in_stream)
print("using {} as default config file".format(str(configs_path.parent) + "/default.yaml"))
base_datasets_dir = config["cluster_datasets_dir"]
all_configs = [config_path.name for config_path in configs_path.glob("*")]
if row_wise:
def _chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
all_datasets_chunks = list(_chunks(all_datasets, 20))
commands = []
for dataset_chunk in all_datasets_chunks:
sub_list_commands = []
for dataset in dataset_chunk:
for config in all_configs:
sub_list_commands.append((config, dataset))
if optimize_access:
shuffle(sub_list_commands)
for sub_cmd in sub_list_commands:
for repeat in range(n_repeats):
commands.append(
construct_command(
sub_cmd[0], sub_cmd[1], base_datasets_dir, repeat, configs_path
)
)
else:
commands = [
construct_command(config, dataset, base_datasets_dir, repeat, configs_path)
for config, dataset, repeat in it.product(all_configs, all_datasets, range(n_repeats))
]
commands_file.write_text("\n".join(commands))
| 22,250
|
def ConvertTrieToFlatPaths(trie, prefix=None):
"""Flattens the trie of paths, prepending a prefix to each."""
result = {}
for name, data in trie.items():
if prefix:
name = prefix + '/' + name
if len(data) != 0 and not 'results' in data:
result.update(ConvertTrieToFlatPaths(data, name))
else:
result[name] = data
return result
| 22,251
|
def get_root_version_for_subset_version(root_dataset_path: str,
sub_dataset_version: str,
sub_dataset_path: MetadataPath
) -> List[str]:
"""
Get the versions of the root that contains the
given sub_dataset_version at the given sub_dataset_path,
if any exists. If the configuration does not exist
return an empty iterable.
"""
root_path = Path(root_dataset_path).resolve()
current_path = (root_path / sub_dataset_path).resolve()
# Ensure that the sub-dataset path is under the root-dataset path
current_path.relative_to(root_path)
current_version = sub_dataset_version
current_path = current_path.parent
while len(current_path.parts) >= len(root_path.parts):
# Skip intermediate directories, i.e. check only on git
# repository roots.
if len(tuple(current_path.glob(".git"))) == 0:
current_path = current_path.parent
continue
current_version = find_version_containing(current_path, current_version)
if current_version == "":
return []
current_path = current_path.parent
return [current_version]
| 22,252
|
def tojson(input, pretty, echo, strip_whitespace, strip_namespace, strip_attribute, **kwargs):
"""
Converts the XML input to JSON output. Requires valid input.
"""
# output = xml2json.json2xml(input)
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
xmlstring = f.read()
if echo:
click.echo('\nXML:')
click.echo(xmlstring)
click.echo('\nJSON:')
output = xml_utils.xml_to_json(xmlstring, strip_whitespace=strip_whitespace, strip_namespace=strip_namespace,
strip_attribute=strip_attribute, pretty=pretty)
# output = xml2json.elem2json(dom, options=options, strip_ns=None, strip=None)
# click.echo('\nJSON:\n{}\n'.format(output))
click.echo(output)
| 22,253
|
def subject() -> JsonCommandTranslator:
"""Get a JsonCommandTranslator test subject."""
return JsonCommandTranslator()
| 22,254
|
def acquire(arn: Optional[str]):
"""Aquire SUDO privileges.
This requires that the AWS_SUDO_RULE_ARN environment variable be set. If the
system was using environment variables they will be lost. It is recommended that
the credentials file be used for standard credentials.
"""
if not arn:
arn = os.environ.get("AWS_SUDO_ROLE_ARN")
assert arn, "AWS_SUDO_ROLE_ARN environment variable must be set or --arn used."
sudo_params = boto3.client("sts").assume_role(RoleArn=arn, RoleSessionName="SUDO")
for cred_name, cred_value in sudo_params["Credentials"].items():
cred_var = f"AWS_{inflection.underscore(cred_name).upper()}"
if cred_var not in CREDENTIAL_VARS:
continue
print(f"export {cred_var}={cred_value}")
| 22,255
|
def accuracy(output, target, cuda_enabled=True):
"""
Compute accuracy.
Args:
output: [batch_size, 10, 16, 1] The output from DigitCaps layer.
target: [batch_size] Labels for dataset.
Returns:
accuracy (float): The accuracy for a batch.
"""
batch_size = target.size(0)
v_length = torch.sqrt((output**2).sum(dim=2, keepdim=True))
softmax_v = F.softmax(v_length, dim=1)
assert softmax_v.size() == torch.Size([batch_size, 10, 1])
_, max_index = softmax_v.max(dim=1)
assert max_index.size() == torch.Size([batch_size, 1])
pred = max_index.view(batch_size) # max_index.squeeze() #
assert pred.size() == torch.Size([batch_size])
if cuda_enabled:
target = target.cuda()
pred = pred.cuda()
correct_pred = torch.eq(target, pred.data) # tensor
# correct_pred_sum = correct_pred.sum() # scalar. e.g: 6 correct out of 128 images.
acc = correct_pred.float().mean() # e.g: 6 / 128 = 0.046875
return acc
| 22,256
|
def save_message(my_dict):
"""
Saves a message if it is not a duplicate.
"""
conn = sqlite3.connect(DB_STRING)
# Create a query cursor on the db connection
queryCurs = conn.cursor()
if my_dict.get('message_status') == None:
my_dict['message_status'] = "Unconfirmed"
queryCurs.execute("SELECT rowid FROM Messages WHERE sender = ? and destination = ? and stamp = ? and hash = ?", (my_dict.get('sender'), my_dict.get('destination'), my_dict.get('stamp'), my_dict.get('hash'),))
data = queryCurs.fetchone()
if data == None:
queryCurs.execute('''INSERT INTO Messages (sender, destination, message, stamp, markdown, encoding, encryption, hashing, hash, decryption_key, file, filename, content_type, message_status) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', (my_dict.get('sender'), my_dict.get('destination'), my_dict.get('message'), my_dict.get('stamp'), my_dict.get('markdown'), my_dict.get('encoding'), my_dict.get('encryption'), my_dict.get('hashing'), my_dict.get('hash'), my_dict.get('decryptionKey'), my_dict.get('file'), my_dict.get('filename'), my_dict.get('content_type'), my_dict.get('message_status')))
conn.commit()
conn.close()
return True
| 22,257
|
def write_feats_space(fpath):
"""
Writes the features configuration in *fpath*.
Args:
fpath (str): Path to write
Returns:
None
"""
with open(fpath, 'w') as file:
writer = csv.writer(file)
writer.writerow(['feature', 'normalized'])
for f in Config.included_features:
writer.writerow([f, True if f in Config.normalized_features else False])
| 22,258
|
def getKFolds(train_df: pd.DataFrame,
seeds: List[str]) -> List[List[List[int]]]:
"""Generates len(seeds) folds for train_df
Usage:
# 5 folds
folds = getKFolds(train_df, [42, 99, 420, 120, 222])
for fold, (train_idx, valid_idx, test_idx) in enumerate(folds):
train_fold = train.iloc[train_idx]
valid_fold = train.iloc[valid_idx]
...
Returns:
folds: list of [train, val, test] indices for each
"""
folds = []
for seed in seeds:
train, val, test = split_into_train_val_test(train_df, seed=seed)
folds.append([list(train), list(val), list(test)])
return folds
| 22,259
|
def redownload_window() -> str:
"""The number of days for which the performance data will be redownloaded"""
return '30'
| 22,260
|
def p_addtotals_opt(p):
"""wc_string : ADDTOTALS_OPT EQ value"""
p[0] = ParseTreeNode('EQ', raw='assign')
p[1] = ParseTreeNode('OPTION', raw=p[1])
if p[1].raw in ['row', 'col']:
p[3].role = 'VALUE'
p[3].nodetype = 'BOOLEAN'
if p[1].raw in ['fieldname', 'labelfield']:
p[3].role = 'FIELD'
p[1].values.append(p[3])
p[0].add_children([p[1],p[3]])
| 22,261
|
def _write_snapshot(timewsync_data_dir: str, monthly_data: Dict[str, str]) -> None:
"""Creates a backup of the written files as a tar archive in gz compression.
Takes the file name specified in the timewsync config, defaults to 'snapshot.tgz'.
Args:
timewsync_data_dir: The timewsync data directory.
monthly_data: A dictionary containing the file names and corresponding data for every month.
"""
# Find timewsync data directory, create if not present
os.makedirs(timewsync_data_dir, exist_ok=True)
snapshot_path = os.path.join(timewsync_data_dir, "snapshot.tgz")
# Write data to files in snapshot
with tarfile.open(snapshot_path, mode="w:gz") as snapshot:
for file_name in monthly_data.keys():
snapshot.add(os.path.join(DATA_FOLDER, file_name), arcname=file_name)
| 22,262
|
def clean_flight_probs(flight_probs: np.ndarray, rng: np.random.Generator) -> np.ndarray:
"""
Round off probabilities in flight_probs to 0 or 1 with random bias of the current probability
:param flight_probs: a vector of inclusion probabilities after the landing phase
:param rng: a random number generator
:returns: a vector of inclusion probabilities that have been rounded off
"""
for i in range(len(flight_probs)):
if flight_probs[i] - 0 > tol and flight_probs[i] < 1 - tol:
flight_probs[i] = 1 if rng.random() < flight_probs[i] else 0
return flight_probs
| 22,263
|
def file_scan_validation(file):
"""
This validator sends the file to ClamAV for scanning and returns returns to the form. By default, if antivirus
service is not available or there are errors, the validation will fail.
Usage:
class UploadForm(forms.Form):
file = forms.FileField(validators=[file_scan_validation])
:param file:
:return:
"""
logger.debug("starting file scanning with clamav")
if not settings.CLAMAV_ENABLED:
logger.warning('File scanning has been disabled.')
return
# make sure we're at the beginning of the file stream
file.seek(0)
# we're just going to assume a network connection to clamav here .. no local unix socket support
scanner = clamd.ClamdNetworkSocket(settings.CLAMAV_HOST, settings.CLAMAV_PORT)
try:
result = scanner.instream(file)
except Exception:
# it doesn't really matter what the actual error is .. log it and raise validation error
logger.error('Error occurred while trying to scan file. %s', sys.exc_info()[0])
raise ValidationError('Unable to scan file.', code='scanerror')
finally:
# reset file stream
file.seek(0)
if result and result['stream'][0] == 'FOUND':
logger.warning('Virus found: %s', file.name)
raise ValidationError('Infected file found.', code='infected')
| 22,264
|
async def question(session: AskSession):
"""
Ask user for his answer on which LeetCode problem
he whats to anticipate.
"""
return await session.prompt(
message="Enter the problem URL from LeetCode site: ",
validator=LeetCodeUrlValidator(session)
)
| 22,265
|
def mongo_instance(instance_dict, ts_dt):
"""An instance as a model."""
dict_copy = copy.deepcopy(instance_dict)
dict_copy["status_info"]["heartbeat"] = ts_dt
return Instance(**dict_copy)
| 22,266
|
def remove(path, force=False):
"""
Remove the named file or directory
Args:
path (str): The path to the file or directory to remove.
force (bool): Remove even if marked Read-Only. Default is False
Returns:
bool: True if successful, False if unsuccessful
CLI Example:
.. code-block:: bash
salt '*' file.remove C:\\Temp
"""
# This must be a recursive function in windows to properly deal with
# Symlinks. The shutil.rmtree function will remove the contents of
# the Symlink source in windows.
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise HubbleInvocationError("File path must be absolute: {0}".format(path))
# Does the file/folder exists
if not os.path.exists(path) and not is_link(path):
raise CommandExecutionError("Path not found: {0}".format(path))
# Remove ReadOnly Attribute
if force:
# Get current file attributes
file_attributes = win32api.GetFileAttributes(path)
win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)
try:
if os.path.isfile(path):
# A file and a symlinked file are removed the same way
os.remove(path)
elif is_link(path):
# If it's a symlink directory, use the rmdir command
os.rmdir(path)
else:
for name in os.listdir(path):
item = "{0}\\{1}".format(path, name)
# If its a normal directory, recurse to remove it's contents
remove(item, force)
# rmdir will work now because the directory is empty
os.rmdir(path)
except (OSError, IOError) as exc:
if force:
# Reset attributes to the original if delete fails.
win32api.SetFileAttributes(path, file_attributes)
raise CommandExecutionError("Could not remove '{0}': {1}".format(path, exc))
return True
| 22,267
|
def get_conversion_option(shape_records):
"""Prompts user for conversion options"""
print("1 - Convert to a single zone")
print("2 - Convert to one zone per shape (%d zones) (this can take a while)" % (len(shape_records)))
import_option = int(input("Enter your conversion selection: "))
return import_option
| 22,268
|
def main(flags=None):
""" Script main function """
if not flags:
flags = sys.argv
module_name = flags[0]
module_args = flags[1:]
log(logging.INFO, DataCategory.PUBLIC, "Read parameters...")
# construct the argument parser
parser = get_arg_parser()
args = parser.parse_args(module_args)
log(logging.INFO, DataCategory.PUBLIC, "Finished reading parameters.")
execute(args, module_name)
| 22,269
|
def base_conditional(Kmn, Lm, Knn, f, *, full_cov=False, q_sqrt=None, white=False):
"""
Given a g1 and g2, and distribution p and q such that
p(g2) = N(g2;0,Kmm)
p(g1) = N(g1;0,Knn)
p(g1|g2) = N(g1;0,Knm)
And
q(g2) = N(g2;f,q_sqrt*q_sqrt^T)
This method computes the mean and (co)variance of
q(g1) = \int q(g2) p(g1|g2)
:param Kmn: M x N
:param Kmm: M x M
:param Knn: N x N or N
:param f: M x R
:param full_cov: bool
:param q_sqrt: None or R x M x M (lower triangular)
:param white: bool
:return: N x R or R x N x N
"""
# compute kernel stuff
num_func = tf.shape(f)[1] # R
# Compute the projection matrix A
A = tf.matrix_triangular_solve(Lm, Kmn, lower=True)
# compute the covariance due to the conditioning
if full_cov:
fvar = Knn - tf.matmul(A, A, transpose_a=True)
fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N
else:
fvar = Knn - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N
# another backsubstitution in the unwhitened case
if not white:
A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False)
# construct the conditional mean
fmean = tf.matmul(A, f, transpose_a=True)
if q_sqrt is not None:
if q_sqrt.get_shape().ndims == 2:
LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N
elif q_sqrt.get_shape().ndims == 3:
L = q_sqrt
A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1]))
LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N
else: # pragma: no cover
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))
if full_cov:
fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # R x N x N
else:
fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # R x N
if not full_cov:
fvar = tf.transpose(fvar) # N x R
return fmean, fvar
| 22,270
|
def find_shortest_dijkstra_route(graph, journey):
"""
all_pairs_dijkstra_path() and all_pairs_dijkstra_path_length both return
a generator, hense the use of dict().
"""
all_paths = dict(nx.all_pairs_dijkstra_path(graph))
all_lengths = dict(nx.all_pairs_dijkstra_path_length(graph))
if len(all_paths) != len(all_lengths):
print("Path count is not equal to path length count, "
"maybe some links are missing a weight?")
return False
shortest_path = []
for destination, path in all_paths[journey[0]].items():
# If all nodes in our journey are in the current path being checked
if all(node in path for node in journey):
if (len(shortest_path) == 0) or (len(path) < len(shortest_path)):
shortest_path = path
total = 0
for section in shortest_path:
total += len(section) - 1
print("\nShortest dijkstra journey: {} connection(s)".format(total))
if len(shortest_path) < 1:
print("No shortest dijkstra path found!\n")
return False
else:
print("{} hop(s) {}\n".format(len(shortest_path) - 1, shortest_path))
return shortest_path
| 22,271
|
def product_consec_digits(number, consecutive):
"""
Returns the largest product of "consecutive"
consecutive digits from number
"""
digits = [int(dig) for dig in str(number)]
max_start = len(digits) - consecutive
return [reduce(operator.mul, digits[i:i + consecutive])
for i in range(max_start + 1)]
| 22,272
|
def alpha_161(code, end_date=None, fq="pre"):
"""
公式:
MEAN(MAX(MAX((HIGH-LOW),ABS(DELAY(CLOSE,1)-HIGH)),ABS(DELAY(CLOSE,1)-LOW)),12)
Inputs:
code: 股票池
end_date: 查询日期
Outputs:
因子的值
"""
end_date = to_date_str(end_date)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_191(**locals())
| 22,273
|
def log_info(who, text):
"""Helper logging abstract to be redone later."""
print datetime.datetime.utcnow(),\
'{} {}'.format(who, current_process().pid),\
text
| 22,274
|
def WeightedCrossEntropyLoss(alpha=0.5):
"""
Calculates the Weighted Cross-Entropy Loss, which applies a factor alpha, allowing one to
trade off recall and precision by up- or down-weighting the cost of a positive error relative
to a negative error.
A value alpha > 1 decreases the false negative count, hence increasing the recall.
Conversely, setting alpha < 1 decreases the false positive count and increases the precision.
"""
def _gradient(yhat, dtrain, alpha):
"""Compute the weighted cross-entropy gradient.
Args:
yhat (np.array): Margin predictions
dtrain: The XGBoost / LightGBM dataset
alpha (float): Scale applied
Returns:
grad: Weighted cross-entropy gradient
"""
y = dtrain.get_label()
yhat = clip_sigmoid(yhat)
grad = (y * yhat * (alpha - 1)) + yhat - (alpha * y)
return grad
def _hessian(yhat, dtrain, alpha):
"""Compute the weighted cross-entropy hessian.
Args:
yhat (np.array): Margin predictions
dtrain: The XGBoost / LightGBM dataset
alpha (float): Scale applied
Returns:
hess: Weighted cross-entropy Hessian
"""
y = dtrain.get_label()
yhat = clip_sigmoid(yhat)
hess = (y * (alpha - 1) + 1) * yhat * (1 - yhat)
return hess
def weighted_cross_entropy(
yhat,
dtrain,
alpha=alpha
):
"""
Calculate gradient and hessian for weight cross-entropy,
Args:
yhat (np.array): Predictions
dtrain: The XGBoost / LightGBM dataset
alpha (float): Scale applied
Returns:
grad: Weighted cross-entropy gradient
hess: Weighted cross-entropy Hessian
"""
grad = _gradient(yhat, dtrain, alpha=alpha)
hess = _hessian(yhat, dtrain, alpha=alpha)
return grad, hess
return weighted_cross_entropy
| 22,275
|
def expand_stylesheet(abbr: str, config: Config):
"""
Expands given *stylesheet* abbreviation (a special Emmet abbreviation designed for
stylesheet languages like CSS, SASS etc.) and outputs it according to options
provided in config
"""
return stringify_stylesheet(stylesheet_abbreviation(abbr, config), config)
| 22,276
|
def generate_legacy_dir(ctx, config, manifest, layers):
"""Generate a intermediate legacy directory from the image represented by the given layers and config to /image_runfiles.
Args:
ctx: the execution context
config: the image config file
manifest: the image manifest file
layers: the list of layer tarballs
Returns:
The filepaths generated and runfiles to be made available.
config: the generated config file.
layers: the generated layer tarball files.
temp_files: all the files generated to be made available at runtime.
"""
# Construct image runfiles for input to pusher.
image_files = [] + layers
if config:
image_files += [config]
if manifest:
image_files += [manifest]
path = "image_runfiles/"
layer_files = []
# Symlink layers to ./image_runfiles/<i>.tar.gz
for i in range(len(layers)):
layer_symlink = ctx.actions.declare_file(path + str(i) + ".tar.gz")
layer_files.append(layer_symlink)
ctx.actions.run_shell(
outputs = [layer_symlink],
inputs = [layers[i]],
command = "ln {src} {dst}".format(
src = layers[i].path,
dst = layer_symlink.path,
),
)
# Symlink config to ./image_runfiles/config.json
config_symlink = ctx.actions.declare_file(path + "config.json")
ctx.actions.run_shell(
outputs = [config_symlink],
inputs = [config],
command = "ln {src} {dst}".format(
src = config.path,
dst = config_symlink.path,
),
)
return {
"config": config_symlink,
"layers": layer_files,
"temp_files": [config_symlink] + layer_files,
}
| 22,277
|
def workflow_key(workflow):
"""Return text search key for workflow"""
# I wish tags were in the manifest :(
elements = [workflow['name']]
elements.extend(workflow['tags'])
elements.extend(workflow['categories'])
elements.append(workflow['author'])
return ' '.join(elements)
| 22,278
|
def get_logits(input_ids,mems,input_mask,target_mask):
"""Builds the graph for calculating the final logits"""
is_training = False
cutoffs = []
train_bin_sizes = []
eval_bin_sizes = []
proj_share_all_but_first = True
n_token = FLAGS.n_token
batch_size = FLAGS.batch_size
features = {"input": input_ids}
inp = tf.transpose(features["input"], [1, 0])
input_mask = tf.transpose(input_mask, [1, 0])
target_mask = tf.transpose(target_mask, [1, 0])
tgt = None
inp_perms, tgt_perms, head_tgt = None, None, None
if FLAGS.init == "uniform":
initializer = tf.initializers.random_uniform(
minval=-FLAGS.init_range,
maxval=FLAGS.init_range,
seed=None)
elif FLAGS.init == "normal":
initializer = tf.initializers.random_normal(
stddev=FLAGS.init_std,
seed=None)
proj_initializer = tf.initializers.random_normal(
stddev=FLAGS.proj_init_std,
seed=None)
tie_projs = [False for _ in range(len(cutoffs) + 1)]
if proj_share_all_but_first:
for i in range(1, len(tie_projs)):
tie_projs[i] = True
tf.logging.info("Vocab size : {}".format(n_token))
tf.logging.info("Batch size : {}".format(batch_size))
logits, new_mems = model.transformer(
dec_inp=inp,
target=tgt,
mems=mems,
n_token=n_token,
n_layer=FLAGS.n_layer,
d_model=FLAGS.d_model,
d_embed=FLAGS.d_embed,
n_head=FLAGS.n_head,
d_head=FLAGS.d_head,
d_inner=FLAGS.d_inner,
dropout=0,
dropatt=0,
initializer=initializer,
is_training=is_training,
mem_len=FLAGS.seq_len+FLAGS.max_decode_length,
cutoffs=cutoffs,
div_val=1,
tie_projs=tie_projs,
input_perms=inp_perms,
target_perms=tgt_perms,
head_target=head_tgt,
same_length=FLAGS.same_length,
clamp_len=FLAGS.clamp_len,
use_tpu=FLAGS.use_tpu,
untie_r=FLAGS.untie_r,
proj_same_dim=True,
bidirectional_mask=FLAGS.bi_mask,
infer=True,
target_mask=target_mask,
input_mask=input_mask,
tgt_len=1)
return logits,new_mems
| 22,279
|
def serve_protocols(environ, start_response):
"""Serve a list of all protocols.
"""
status = '200 OK'
response_headers = [('Content-type', 'text/html')]
start_response(status, response_headers)
repo = os.path.join(APP_ROOT, 'storage')
protocols = [f_name for f_name in os.listdir(repo) if f_name.endswith('.protocol')]
if not check_test_index():
add_ids_test_index()
doc = [PROTOCOL_LIST % write_protocols(protocols)]
return doc
| 22,280
|
def download_tar(local_path, dropbox_paths, download_q):
"""
Downloads files (tar.gz.aes) from dropbox, decrypts them, then extracts the chunks files inside
and merges them into single files, finally decrypts the files and builds a bozorth list file.
This method is used for charge operations
:param local_path: str, where the files will be downloaded
:param dropbox_paths: str, dropbox's path files
:param download_q: queue, communication queue (pipe)
:return: None
"""
try:
# get the files from the cloud (dropbox accounts)
__get_folders_processes(local_path, dropbox_paths)
# decrypts tar.gz and extract files in local_path
enc.decrypt_chunks_parallel(local_path, key_path_2, enc_format)
files = os.listdir(local_path)
for file_name in files:
if file_name.rfind(suffix_chunk_tar):
__extract_tarfile(file_name, local_path)
# merges and decrypts files
enc.decrypt_to_file_parallel(local_path, key_path_1, enc_format)
# build the file .lis
__buid_bozorth_lis_file(local_path, name_lis)
except (OSError, IOError):
logging.error('error ')
download_q.put(1)
else:
download_q.put('done')
| 22,281
|
def _api_get_scripts(name, output, kwargs):
""" API: accepts output """
return report(output, keyword="scripts", data=list_scripts())
| 22,282
|
def dump_iowawfo(fn):
"""A region with the Iowa WFOs"""
pgconn = get_dbconn("postgis", user="nobody")
df = read_postgis(
""" SELECT ST_Simplify(ST_Union(the_geom), 0.01) as geom
from cwa
WHERE wfo in ('DMX', 'ARX', 'DVN', 'OAX', 'FSD')""",
pgconn,
geom_col="geom",
)
df.to_parquet(fn)
| 22,283
|
def WriteNewIRC(FilePath):
"""Writes correctly the irc in a .xyz file"""
fwrite = open(FilePath.split(".")[0] + "NewIRC.xyz","w")
#If a irc reversed is asked, reverse will equal True
for key in sorted(DictLines,reverse=Reversed):
fwrite.writelines(DictLines[key])
fwrite.close()
| 22,284
|
def get_children(key):
"""
Lists all direct child usages for a name usage
:return: list of species
"""
api_url = 'http://api.gbif.org/v1/species/{key}/children'.format(
key=key
)
try:
response = requests.get(api_url)
json_response = response.json()
if json_response['results']:
return json_response['results']
return None
except (HTTPError, KeyError) as e:
print(e)
return None
| 22,285
|
def test_alias():
"""Test alias functionality in help commands."""
from commands import help
commands = help.alias()
assert 'help' in commands
| 22,286
|
def _extend(obj, *args):
"""
adapted from underscore-py
Extend a given object with all the properties in
passed-in object(s).
"""
args = list(args)
for src in args:
obj.update(src)
for k, v in src.items():
if v is None:
del obj[k]
return obj
| 22,287
|
async def test_offers_no_records(http_client):
"""Request offers for a product is registered, but has no offer records."""
prod_id = await create_product(http_client)
await set_reg_token_hash(prod_id)
resp = await http_client.get(f"/products/{prod_id}/offers")
assert resp.status_code == 200
assert resp.json() == []
| 22,288
|
def async_test(func):
"""
Wrap async_to_sync with another function because Pytest complains about
collecting the resulting callable object as a test because it's not a true
function:
PytestCollectionWarning: cannot collect 'test_foo' because it is not a
function.
"""
# inner import because for Python 3.6+ tests only
from asgiref.sync import async_to_sync
sync_func = async_to_sync(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return sync_func(*args, **kwargs)
return wrapper
| 22,289
|
def get_logger(lname, logfile):
"""logging setup
logging config - to be moved to file at some point
"""
logger = logging.getLogger(lname)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(levelname)s:\t%(message)s'
},
'verbose': {
'format': '%(levelname)s:\t%(message)s\tFROM: %(name)s'
}
},
'handlers': {
'stdout': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.StreamHandler'
},
'logfile': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.FileHandler',
'filename': logfile
}
},
'loggers': {
'': {
'handlers': ['stdout', 'logfile'],
'level': 'INFO',
'propagate': True
}
}
})
return logger
| 22,290
|
def get_percentiles(data, percentiles, integer_valued=True):
"""Returns a dict of percentiles of the data.
Args:
data: An unsorted list of datapoints.
percentiles: A list of ints or floats in the range [0, 100] representing the
percentiles to compute.
integer_valued: Whether or not the values are all integers. If so,
interpolate to the nearest datapoint (instead of computing a fractional
value between the two nearest datapoints).
Returns:
A dict mapping each element of percentiles to the computed result.
"""
# Ensure integer datapoints for cleaner binning if necessary.
interpolation = 'nearest' if integer_valued else 'linear'
results = np.percentile(data, percentiles, interpolation=interpolation)
return {percentiles[i]: results[i] for i in range(len(percentiles))}
| 22,291
|
def estimate_poster_dedpul(diff, alpha=None, quantile=0.05, alpha_as_mean_poster=False, max_it=100, **kwargs):
"""
Estimates posteriors and priors alpha (if not provided) of N in U with dedpul method
:param diff: difference of densities f_p / f_u for the sample U, np.array (n,), output of estimate_diff()
:param alpha: priors, share of N in U (estimated if None)
:param quantile: if alpha is None, relaxation of the estimate of alpha;
here alpha is estimaeted as infinum, and low quantile is its relaxed version;
share of posteriors probabilities that we allow to be negative (with the following zeroing-out)
:param kwargs: dummy
:return: tuple (alpha, poster), e.g. (priors, posteriors) of N in U for the U sample, represented by diff
"""
if alpha_as_mean_poster and (alpha is not None):
poster = 1 - diff * (1 - alpha)
poster[poster < 0] = 0
cur_alpha = np.mean(poster)
if cur_alpha < alpha:
left_border = alpha
right_border = 1
else:
left_border = 0
right_border = alpha
poster_zero = 1 - diff
poster_zero[poster_zero < 0] = 0
if np.mean(poster_zero) > alpha:
left_border = -50
right_border = 0
# return 0, poster_zero
it = 0
try_alpha = cur_alpha
while (abs(cur_alpha - alpha) > kwargs.get('tol', 10 ** -5)) and (it < max_it):
try_alpha = (left_border + (right_border - left_border) / 2)
poster = 1 - diff * (1 - try_alpha)
poster[poster < 0] = 0
cur_alpha = np.mean(poster)
if cur_alpha > alpha:
right_border = try_alpha
else:
left_border = try_alpha
it += 1
alpha = try_alpha
if it >= max_it:
print('Exceeded maximal number of iterations in finding mean_poster=alpha')
else:
if alpha is None:
alpha = 1 - 1 / max(np.quantile(diff, 1 - quantile, interpolation='higher'), 1)
poster = 1 - diff * (1 - alpha)
poster[poster < 0] = 0
return alpha, poster
| 22,292
|
def test():
"""Run the unit tests."""
raise SystemExit(helper.test())
| 22,293
|
def get_short_topic_name(test_run_name):
"""Returns the collection name for the DLQ.
Keyword arguments:
test_run_name -- the unique id for this test run
"""
return test_run_name[3:] if test_run_name.startswith("db.") else test_run_name
| 22,294
|
def cut_in_two(line):
"""
Cuts input line into two lines of equal length
Parameters
----------
line : shapely.LineString
input line
Returns
----------
list (LineString, LineString, Point)
two lines and the middle point cutting input line
"""
from shapely.geometry import Point, LineString
# Get final distance value
distance = line.length / 2
# Cuts a line in two at a distance from its starting point
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [LineString(coords[: i + 1]), LineString(coords[i:]), pd]
if pd > distance:
cp = line.interpolate(distance)
return [
LineString(coords[:i] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[i:]),
cp,
]
| 22,295
|
def local_input_loop():
"""
Initialize the local input channels (fifo and socket).
Then poll on those and forward messages to Telegram.
"""
# Cleanup socket
# Use a separate lockfile to avoid locking the socket
# If lockfile can be locked, redo the socket
# If lockfile can't be locked, exit with error.
lock = open(lock_file, 'w')
try:
fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print('Server is already running')
die()
thread.start_new_thread(read_socket, ())
thread.start_new_thread(read_fifo, ())
| 22,296
|
def get_processor(aid):
"""
Return the processor module for a given achievement.
Args:
aid: the achievement id
Returns:
The processor module
"""
try:
path = get_achievement(aid)["processor"]
base_path = api.config.get_settings()["achievements"]["processor_base_path"]
return SourceFileLoader(path[:-3], join(base_path, path)).load_module()
except FileNotFoundError:
raise PicoException("Achievement processor is offline.")
| 22,297
|
def parse_price(price):
"""
Convert string price to numbers
"""
if not price:
return 0
price = price.replace(',', '')
return locale.atoi(re.sub('[^0-9,]', "", price))
| 22,298
|
def _validate_keys(connection_string_parts):
"""Raise ValueError if incorrect combination of keys
"""
host_name = connection_string_parts.get(HOST_NAME)
shared_access_key_name = connection_string_parts.get(SHARED_ACCESS_KEY_NAME)
shared_access_key = connection_string_parts.get(SHARED_ACCESS_KEY)
device_id = connection_string_parts.get(DEVICE_ID)
if host_name and device_id and shared_access_key:
pass
elif host_name and shared_access_key and shared_access_key_name:
pass
else:
raise ValueError("Invalid Connection String - Incomplete")
| 22,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.