content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def addactual():
"""Add actual spendings"""
if request.method == "POST":
allPayments = []
# Current user that is logged-in saved in variable
userId = session["user_id"]
month = request.form.get("month")
housing = request.form.get("housing")
housing = float(housing)
pensionIns = request.form.get("pensionIns")
pensionIns = float(pensionIns)
food = request.form.get("food")
food= float(food)
health = request.form.get("health")
health = float(health)
transport = request.form.get("transport")
transport = float(transport)
debt = request.form.get("debt")
debt = float(debt)
utilities = request.form.get("utilities")
utilities = float(utilities)
clothing = request.form.get("clothing")
clothing = float(clothing)
vacation = request.form.get("vacation")
vacation = float(vacation)
unexpected = request.form.get("unexpected")
unexpected = float(unexpected)
total = housing + pensionIns + food + health + transport + debt + utilities + clothing + vacation + unexpected
allPayments.append({"month": month, "housing": housing, "pensionIns": pensionIns, "food": food, "health": health, "transport": transport, "debt": debt, "utilities": utilities, "clothing": clothing, "vacation": vacation, "unexpected": unexpected, "total": total})
allMonths = db.execute("SELECT month FROM payments WHERE userid = :userId", userId=userId)
enteredMonths = allMonths[0]["month"]
db.execute("INSERT INTO payments(userId, month, housing, pensionIns, food, health, transport, debt, utilities, clothing, vacation, unexpected, total)\
VALUES(:userId, :month, :housing, :pensionIns, :food, :health, :transport, :debt, :utilities, :clothing, :vacation, :unexpected, :total)", userId=userId, month=month, housing=housing, pensionIns=pensionIns, food=food, health=health, transport=transport, debt=debt, utilities=utilities, clothing=clothing, vacation=vacation, unexpected=unexpected, total=total)
# Flash message to confirm that the user add a note
flash("Payments added")
if month in enteredMonths:
return apology("Monnth already entered!")
return redirect("/actual")
else:
return render_template("addactual.html") | ccf7d9c362aa1f250959dce51032e43b00ffe412 | 22,900 |
def parse_move(line):
""" Parse steps from a move string """
text = line.split()
if len(text) == 0:
raise ValueError("No steps in move given to parse. %s" % (repr(line)))
steps = []
for step in text:
from_ix = alg_to_index(step[1:3])
if len(step) > 3:
if step[3] == 'x':
continue
elif step[3] == 'n':
to_ix = from_ix + 8
elif step[3] == 's':
to_ix = from_ix - 8
elif step[3] == 'e':
to_ix = from_ix + 1
elif step[3] == 'w':
to_ix = from_ix - 1
else:
raise ValueError("Invalid step direction.")
steps.append((from_ix, to_ix))
else:
raise ValueError("Can't represent placing step")
return steps | 660374a82c19da61df3e0f8468f09c5df7d3be5e | 22,901 |
import os
def get_conf_path(run_id):
"""
Generate path for storing/loading configuration file
:param run_id (str): run ID to be used
:return: full file path for storing/loading config file
"""
return os.path.join('conf', run_id + '.ini') | 6d701be83e52b8294bf45366944543be8b205e9d | 22,902 |
def get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot) | d563a3e4be696fc1109aa7a60fb4dd140ec65431 | 22,903 |
def EstimateMarriageSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
# NOTE: Filling missing values would be better than dropping them.
complete = resp[resp.evrmarry == 1].agemarry.dropna()
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf | 06f7d307662a70ef4c77073e4202f69ec68ee9e4 | 22,904 |
def get_cowell_data():
"""
Gets Cowell data.
:return: Data and headers.
"""
n = 10000
Y = np.random.normal(0, 1, n)
X = np.random.normal(Y, 1, n)
Z = np.random.normal(X, 1, n)
D = np.vstack([Y, X, Z]).T
return D, ['Y', 'X', 'Z'] | bd2084b889e8e9068d11b0f49c1d00226bfc6a1f | 22,905 |
def is_str_str_tuple(t):
"""Is this object a tuple of two strings?"""
return (isinstance(t, tuple) and len(t) == 2
and isinstance(t[0], basestring)
and isinstance(t[1], basestring)) | e568821ee2d7a3926744b93eaf11356744ca4538 | 22,906 |
def g(dist, aq):
"""
Compute function g (Lemma 5) for a given full parent isntantiation.
Parameters
----------
dists: list ints
Counts of the child variable for a given full parent instantiation.
aq: float
Equivalent sample size divided by the product of parents arities.
"""
res = log(2*min(dist)/aq + 1)
for d in dist:
res += - log(2*d/aq + 1)
return res | 505f5c0857f97579bcb1be9e812a90c31ecf4e5e | 22,907 |
import tqdm
import os
def save_2D_animation(embeddings, target_optimizers, emb_space_sizes,
total_train_losses, total_test_losses,
n_bins=100, cmap_name='jet', **plotting_kwargs):
"""Utility function for visualizing the changes in weights over time in
UMAP space. The visualization is in 2D for better appreciating the global
loss surface.
Args:
- embeddings: list of embeddings, result of alligned UMAP
- target_optimizers: list of strings, name of the optimizers
considered.
- emb_space_sizes: list of arrays, define the limits of the
embedding space for the three layers of the MLP.
- total_train_losses: list, training losses history.
- total_test_losses: list, test losses.
- n_bins: int, number of bins for discretizing the training loss.
- cmap_name: string, name of the colormap used for representing
the change in train losses.
- **plotting_kwargs: keyword arguments, keyword arguments for the
plotting function.
Returns:
- None
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs = axs.flatten()
Z = np.array(total_train_losses).flatten()
for layer, emb in enumerate(embeddings):
x = emb[:, 0]
y = emb[:, 1]
xi = np.linspace(
x.min(),
x.max(),
1000
)
yi = np.linspace(
y.min(),
y.max(),
1000
)
x_grid, Y_grid = np.meshgrid(xi, yi)
zi = griddata(
(x, y),
Z,
(xi[None, :], yi[:, None]),
method='linear'
)
zi = np.nan_to_num(zi, nan=Z.mean())
cont = axs[layer].contourf(
x_grid,
Y_grid,
zi,
cmap=cmap_name,
levels=n_bins,
vmin=Z.min(),
vmax=Z.max()
)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.15, 0.04, 0.7])
fig.colorbar(
cont,
cax=cbar_ax,
label='Training Loss'
)
for index, opt_name in enumerate(target_optimizers):
print(f'Saving Optimizer {opt_name}')
emb_size = len(total_test_losses[index])
start = emb_size * index
stop = start + emb_size
embs = [emb[start:stop] for emb in embeddings]
for ax_idx, ax in enumerate(axs):
ax.set_title(
f'Layer {ax_idx + 1} \
\nOptimizer: {opt_name}'
)
if ax_idx == 0:
ax.set_ylabel('Weights Space \n UMAP 2')
ax.set_xlabel('Weights Space \n UMAP 1')
else:
ax.set_xlabel('Weights Space \n UMAP 1')
for i in tqdm(range(embs[0].shape[0])):
point_1 = axs[0].scatter(
embs[0][i, 0],
embs[0][i, 1],
marker="*",
c='white',
edgecolor='k',
s=60
)
point_2 = axs[1].scatter(
embs[1][i, 0],
embs[1][i, 1],
c='white',
marker="*",
edgecolor='k',
s=60
)
point_3 = axs[2].scatter(
embs[2][i, 0],
embs[2][i, 1],
c='white',
marker="*",
edgecolor='k',
s=60
)
if not os.path.exists(f'results\\2D_{opt_name}'):
os.makedirs(f'results\\2D_{opt_name}')
plt.savefig(
f'results\\2D_{opt_name}\\{i}.png',
bbox_inches='tight'
)
point_1.remove()
point_2.remove()
point_3.remove()
return None | 8f76501419b6699a07e76dc5679d7ab2e836b9f9 | 22,908 |
def order_tweets_by_polarity(tweets, positive_highest=True):
"""Sort the tweets by polarity, receives positive_highest which determines
the order. Returns a list of ordered tweets."""
reverse = True if positive_highest else False
return sorted(tweets, key=lambda tweet: tweet.polarity, reverse=reverse) | 996c0aa6c374716f10d4d7a890162fe1bb87eef1 | 22,909 |
import hashlib
import re
import sys
import os
def text_file_md5(filename, exclude_lines=None, exclude_re=None,
prepend_lines=None, append_lines=None):
"""Get a MD5 (check) sum of a text file.
Works in the same way as `file_md5()` function but ignores newlines
characters and excludes lines from the file as well as prepend or
append them if requested.
:param exclude_lines: list of strings to be excluded
(newline characters should not be part of the strings)
:param exclude_re: regular expression string;
lines matching this regular expression will not be considered
:param prepend_lines: list of lines to be prepended to the file
before computing the sum
:param append_lines: list of lines to be appended to the file
before computing the sum
"""
hasher = hashlib.md5()
if exclude_re:
regexp = re.compile(exclude_re)
if prepend_lines:
for line in prepend_lines:
hasher.update(line if sys.version_info[0] == 2 else encode(line))
with open(filename, 'r') as f:
for line in f:
# replace platform newlines by standard newline
if os.linesep != '\n':
line = line.rstrip(os.linesep) + '\n'
if exclude_lines and line in exclude_lines:
continue
if exclude_re and regexp.match(line):
continue
hasher.update(line if sys.version_info[0] == 2 else encode(line))
if append_lines:
for line in append_lines:
hasher.update(line if sys.version_info[0] == 2 else encode(line))
return hasher.hexdigest() | aadfab0ae4a76ecace4c1736c0f4b81bc3c83bbb | 22,910 |
from scipy.stats import beta
from params import VoC_start_date, use_vaccine_effect
def read_in_Reff_file(file_date, VoC_flag=None, scenario=''):
"""
Read in Reff h5 file produced by generate_RL_forecast.
Args:
file_date: (date as string) date of data file
VoC_date: (date as string) date from which to increase Reff by VoC
"""
if file_date is None:
raise Exception('Need to provide file date to Reff read.')
file_date = pd.to_datetime(file_date).strftime("%Y-%m-%d")
df_forecast = pd.read_hdf('results/soc_mob_R'+file_date+scenario+'.h5', key='Reff')
if (VoC_flag != '') and (VoC_flag is not None):
VoC_start_date = pd.to_datetime(VoC_start_date)
if VoC_flag == 'Alpha':
print('This VoC will be deprecated in future.')
# Here we apply the beta(6,14)+1 scaling from VoC to the Reff.
# We do so by editing a slice of the data frame. Forgive me for my sins.
row_bool_to_apply_VoC = (df_forecast.type == 'R_L') & (pd.to_datetime(df_forecast.date, format='%Y-%m-%d') >= VoC_start_date)
index_map = df_forecast.index[row_bool_to_apply_VoC]
# Index 9 and onwards are the 2000 Reff samples.
df_slice_after_VoC = df_forecast.iloc[index_map, 8:]
multiplier = beta.rvs(6,14, size = df_slice_after_VoC.shape) + 1
if VoC_flag == 'Delta': # Increase from Delta
# Here we apply the beta(2,2)+3 scaling from VoC to the Reff based on CDC results.
# We do so by editing a slice of the data frame. Forgive me for my sins.
row_bool_to_apply_VoC = (df_forecast.type == 'R_L') & (pd.to_datetime(df_forecast.date, format='%Y-%m-%d') >= VoC_start_date)
index_map = df_forecast.index[row_bool_to_apply_VoC]
# Index 9 and onwards are the 2000 Reff samples.
df_slice_after_VoC = df_forecast.iloc[index_map, 8:]
multiplier = beta.rvs(3,3, size = df_slice_after_VoC.shape) + 2.1 - 0.5 # Mean 2.1 Delta
df_forecast.iloc[index_map , 8:] = df_slice_after_VoC*multiplier
if use_vaccine_effect:
# Load in vaccination effect data
vaccination_by_state = pd.read_csv('data/vaccination_by_state.csv', parse_dates=['date'])
vaccination_by_state = vaccination_by_state[['state', 'date','overall_transmission_effect']]
# Make datetime objs into strings
vaccination_by_state['date_str'] = pd.to_datetime(vaccination_by_state['date'], format='%Y-%m-%d').dt.strftime('%Y-%m-%d')
df_forecast['date_str'] = pd.to_datetime(df_forecast['date'], format='%Y-%m-%d').dt.strftime('%Y-%m-%d')
# Filling in future days will the same vaccination level as current.
for state, forecast_df_state in df_forecast.groupby('state'):
latest_Reff_data_date = max(forecast_df_state.date_str)
latest_vaccination_data_date = max(vaccination_by_state.groupby('state').get_group(state)['date'])
latest_vaccination_date_effect = vaccination_by_state.groupby(['state', 'date']).get_group((state, latest_vaccination_data_date))['overall_transmission_effect'].iloc[0]
# Fill in the future dates with the same level of vaccination.
vaccination_by_state = vaccination_by_state.append(pd.DataFrame([(state, pd.to_datetime(date), latest_vaccination_date_effect, date.strftime('%Y-%m-%d')) for date in pd.date_range(latest_vaccination_data_date, latest_Reff_data_date)], columns = ['state', 'date', 'overall_transmission_effect', 'date_str']))
# Create a (state,date) indexed map of transmission effect
overall_transmission_effect = vaccination_by_state.set_index(['state', 'date_str'])['overall_transmission_effect'].to_dict()
# Apply this effect to the forecast
vaccination_multiplier = df_forecast.apply(lambda row: 1 if row['type']!='R_L' else overall_transmission_effect.get((row['state'], row['date_str']),1), axis=1)
df_forecast = df_forecast.drop('date_str', axis='columns')
# Apply the vaccine effect to the forecast. The 8:onwards columns are all the Reff paths.
df_forecast.iloc[: , 8:] = df_forecast.iloc[: , 8:].multiply(vaccination_multiplier.to_numpy(), axis='rows')
return df_forecast | 219118c333f14ba9f7a44416bd30e38c66b3d5d9 | 22,911 |
import sys
import pprint
def main(argv):
"""Main Compute Demo
When invoked from the command line, it will connect using secrets.py
(see secrets.py-dist for instructions and examples), and perform the
following tasks:
- List current nodes
- List available images (up to 10)
- List available sizes (up to 10)
"""
try:
driver = get_demo_driver()
except InvalidCredsError:
e = sys.exc_info()[1]
print("Invalid Credentials: " + e.value)
return 1
try:
print(">> Loading nodes...")
pprint(driver.list_nodes())
print(">> Loading images... (showing up to 10)")
pprint(driver.list_images()[:10])
print(">> Loading sizes... (showing up to 10)")
pprint(driver.list_sizes()[:10])
except Exception:
e = sys.exc_info()[1]
print("A fatal error occurred: " + e)
return 1 | 7d68d447591d9ca53db2a9bdc1108725f92c764a | 22,912 |
def string_with_fixed_length(s="", l=30):
"""
Return a string with the contents of s plus white spaces until length l.
:param s: input string
:param l: total length of the string (will crop original string if longer than l)
:return:
"""
s_out = ""
for i in range(0, l):
if i < len(s):
s_out += s[i]
else:
s_out += " "
return s_out | 2230a2893913eadb2c42a03c85728a5fe79e1e0f | 22,913 |
def fetch_ref_proteomes():
"""
This method returns a list of all reference proteome accessions available
from Uniprot
"""
ref_prot_list = []
response = urllib2.urlopen(REF_PROT_LIST_URL)
for ref_prot in response:
ref_prot_list.append(ref_prot.strip())
return ref_prot_list | f42c879f78a0e7281df369b40145c5d60aedb32b | 22,914 |
from typing import BinaryIO
def load(fp: BinaryIO, *, fmt=None, **kwargs) -> TextPlistTypes:
"""Read a .plist file (forwarding all arguments)."""
if fmt is None:
header = fp.read(32)
fp.seek(0)
if FMT_TEXT_HANDLER["detect"](header):
fmt = PF.FMT_TEXT
if fmt == PF.FMT_TEXT:
return FMT_TEXT_HANDLER["parser"](**kwargs).parse(fp)
else:
# This one can fail a bit more violently like the original
return pl.load(fp, fmt=translation[fmt], **kwargs) | d8445e388b33f69555c1270cebecfd552a34196a | 22,915 |
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper | d1e4f766827c13fa312ce3485daf43be2fc0eda1 | 22,916 |
from typing import Set
def create_affected_entities_description(security_data: SecurityData, limit: int = 5) -> str:
"""Create a description of the entities which are affected by a security problem.
:param security_data: the security details for which to create the description
:param limit: the maximum number of entities to list in the description
:return: the description
"""
def _stringify(entity_list: Set[str], label: str, the_limit: int):
if len(entity_list) > the_limit:
return f"{len(entity_list)} {label} affected ([details|{security_data.url}])\n"
return f"Affected {label}: {', '.join(entity_list)}\n"
desc = _stringify(security_data.affected_entity_names, 'entities', limit)
desc += _stringify(security_data.related_hostnames, 'hostnames', limit)
return desc | 95adc5a6a1fe88e0ec80273deee95e39ee196a55 | 22,917 |
def amac_person_org_list_ext():
"""
中国证券投资基金业协会-信息公示-从业人员信息-基金从业人员资格注册外部公示信息
http://gs.amac.org.cn/amac-infodisc/res/pof/extperson/extPersonOrgList.html
:return:
:rtype: pandas.DataFrame
"""
data = get_data(url=amac_person_org_list_ext_url, payload=amac_person_org_list_ext_payload)
need_data = data["content"]
keys_list = [
"orgName",
"orgType",
"extWorkerTotalNum",
"extOperNum",
"extSalesmanNum",
"extInvestmentManagerNum",
"extFundManagerNum",
] # 定义要取的 value 的 keys
manager_data_out = pd.DataFrame(need_data)
manager_data_out = manager_data_out[keys_list]
manager_data_out.columns = [
"机构名称",
"机构性质",
"员工人数",
"基金从业资格",
"基金销售业务资格",
"基金经理",
"投资经理",
]
return manager_data_out | f84d40a79ae49ebdf5a8ecb6612b37515f5ea676 | 22,918 |
from dimarray import DimArray, Dataset
def concatenate(arrays, axis=0, _no_check=False, align=False, **kwargs):
""" concatenate several DimArrays
Parameters
-----------
arrays : list of DimArrays
arrays to concatenate
axis : int or str
axis along which to concatenate (must exist)
align : bool, optional
align secondary axes before joining on the primary
axis `axis`. Default to False.
**kwargs : optional key-word arguments passed to align, if align is True
Returns
-------
concatenated DimArray
See Also
--------
stack: join arrays along a new dimension
align: align arrays
Examples
--------
1-D
>>> from dimarray import DimArray
>>> a = DimArray([1,2,3], axes=[['a','b','c']])
>>> b = DimArray([4,5,6], axes=[['d','e','f']])
>>> concatenate((a, b))
dimarray: 6 non-null elements (0 null)
0 / x0 (6): 'a' to 'f'
array([1, 2, 3, 4, 5, 6])
2-D
>>> a = DimArray([[1,2,3],[11,22,33]])
>>> b = DimArray([[4,5,6],[44,55,66]])
>>> concatenate((a, b), axis=0)
dimarray: 12 non-null elements (0 null)
0 / x0 (4): 0 to 1
1 / x1 (3): 0 to 2
array([[ 1, 2, 3],
[11, 22, 33],
[ 4, 5, 6],
[44, 55, 66]])
>>> concatenate((a, b), axis='x1')
dimarray: 12 non-null elements (0 null)
0 / x0 (2): 0 to 1
1 / x1 (6): 0 to 2
array([[ 1, 2, 3, 4, 5, 6],
[11, 22, 33, 44, 55, 66]])
"""
# input argument check
if not type(arrays) in (list, tuple):
raise ValueError("arrays must be list or tuple, got {}:{}".format(type(arrays), arrays))
arrays = [a for a in arrays]
for i, a in enumerate(arrays):
if isinstance(a, Dataset):
msg = "\n==>Note: you may use `concatenate_ds` for Datasets"
raise ValueError("concatenate: expected DimArray. Got {}".format(type(a))+msg)
elif np.isscalar(a):
arrays[i] = DimArray(a)
if not isinstance(a, DimArray):
raise ValueError("concatenate: expected DimArray. Got {}".format(type(a)))
if type(axis) is not int:
axis = arrays[0].dims.index(axis)
dim = arrays[0].dims[axis]
# align secondary axes prior to concatenate
# TODO: just encourage user to use align outside this function
# and remove argument passing
if align:
kwargs['strict'] = True
for ax in arrays[0].axes:
if ax.name != dim:
arrays = align_(arrays, axis=ax.name, **kwargs)
values = np.concatenate([a.values for a in arrays], axis=axis)
_get_subaxes = lambda x: [ax for i, ax in enumerate(arrays[0].axes) if i != axis]
subaxes = _get_subaxes(arrays[0])
# concatenate axis values
newaxis = _concatenate_axes([a.axes[axis] for a in arrays])
if not align and not _no_check:
# check that other axes match
for ax in subaxes:
for a in arrays:
if not np.all(a.axes[ax.name].values == ax.values):
raise ValueError("contatenate: secondary axes do not match. Align first? (`align=True`)")
# print arrays[0]
# for i,a in enumerate(arrays[1:]):
# if not _get_subaxes(a) == subaxes:
# msg = "First array:\n{}\n".format(subaxes)
# msg += "{}th array:\n{}\n".format(i,_get_subaxes(a))
# raise ValueError("contatenate: secondary axes do not match. Align first? (`align=True`)")
# print a
# print '==> arrays look ok'
newaxes = subaxes[:axis] + [newaxis] + subaxes[axis:]
return arrays[0]._constructor(values, newaxes) | 3204fb32fb2ea3192b469cd37c76f7018f3a8190 | 22,919 |
def ele_types(eles):
"""
Returns a list of unique types in eles
"""
return list(set([e['type'] for e in eles] )) | e87ea4c6256c2520f9f714dd065a9e8642f77555 | 22,920 |
def colorful_subgraph(G, colors, k, s, subgraph_type, get_detail=True, verbose=False):
"""Detect if colorful path exists fom s to any node by dynamic programming.
Args:
G (nx.Graph): with n nodes and m edges
colors (list): list of integers represents node colors
k (int): number of colors
s (int): starting node
subgraph (str): 'path' or 'cycle'
Return:
List: nodes connect to s with at least one colorful path
"""
n = G.number_of_nodes()
col = 2**k
dp_mat = np.zeros((n, col))
dp_mat[s][power2(colors[s])] = 1
targets = dp_helper(dp_mat, 2, G, colors, k, set([s]), s, subgraph_type)
if not get_detail:
return targets
else:
empty_color = 0
total_count = 0
for target in targets:
total_count += backtrack(dp_mat, G, colors, target, s, [str(target)], set_bit(empty_color, colors[target]), verbose)
if verbose:
print('from node', s, 'find in total', total_count, 'colorful paths of length', k)
return total_count | 644a1091bbee9bf79f236a8f815ae4c07fb1f538 | 22,921 |
import itertools
def find_all_combos(
conformer,
delta=float(120),
cistrans=True,
chiral_centers=True):
"""
A function to find all possible conformer combinations for a given conformer
Params:
- conformer (`Conformer`) an AutoTST `Conformer` object of interest
- delta (int or float): a number between 0 and 180 or how many conformers to generate per dihedral
- cistrans (bool): indication of if one wants to consider cistrans bonds
- chiral_centers (bool): indication of if one wants to consider chiral centers bonds
Returns:
- all_combos (list): a list corresponding to the number of unique conformers created.
"""
conformer.get_geometries()
_, torsions = find_terminal_torsions(conformer)
torsion_angles = np.arange(0, 360, delta)
torsion_combos = list(itertools.product(
torsion_angles, repeat=len(torsions)))
if cistrans:
cistranss = []
cistrans_options = ["E", "Z"]
try:
ring_info = conformer._pseudo_geometry.GetRingInfo()
except AttributeError:
ring_info = conformer.rdkit_molecule.GetRingInfo()
for cistrans in conformer.cistrans:
i,j,k,l = cistrans.atom_indices
if (ring_info.NumAtomRings(i) != 0) or (ring_info.NumAtomRings(k) != 0):
continue
cistranss.append(cistrans)
cistrans_combos = list(itertools.product(
cistrans_options, repeat=len(cistranss)))
else:
cistrans_combos = [()]
if chiral_centers:
chiral_centerss = []
chiral_options = ["R", "S"]
try:
ring_info = conformer._pseudo_geometry.GetRingInfo()
except AttributeError:
ring_info = conformer.rdkit_molecule.GetRingInfo()
for center in conformer.chiral_centers:
if ring_info.NumAtomRings(center.atom_index) != 0:
continue
chiral_centerss.append(center)
chiral_combos = list(itertools.product(
chiral_options, repeat=len(chiral_centerss)))
else:
chiral_combos = [()]
all_combos = list(
itertools.product(
torsion_combos,
cistrans_combos,
chiral_combos))
return all_combos | 1b5c5f44de23524a9392f51e76f46ef0f234648c | 22,922 |
import pyximport
from hmc2c import hmc_main_loop as c_hmc_main_loop
def hmc2(f, x, options, gradf, *args, **kargs):
"""
SAMPLES = HMC2(F, X, OPTIONS, GRADF)
Description
SAMPLES = HMC2(F, X, OPTIONS, GRADF) uses a hybrid Monte Carlo
algorithm to sample from the distribution P ~ EXP(-F), where F is the
first argument to HMC2. The Markov chain starts at the point X, and
the function GRADF is the gradient of the `energy' function F.
HMC2(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional arguments to
be passed to F() and GRADF().
[SAMPLES, ENERGIES, DIAGN] = HMC2(F, X, OPTIONS, GRADF) also returns a
log of the energy values (i.e. negative log probabilities) for the
samples in ENERGIES and DIAGN, a structure containing diagnostic
information (position, momentum and acceptance threshold) for each
step of the chain in DIAGN.POS, DIAGN.MOM and DIAGN.ACC respectively.
All candidate states (including rejected ones) are stored in
DIAGN.POS. The DIAGN structure contains fields:
pos
the position vectors of the dynamic process
mom
the momentum vectors of the dynamic process
acc
the acceptance thresholds
rej
the number of rejections
stp
the step size vectors
"""
global HMC_MOM
# Reference to structures is much slower, so...
opt_nsamples = options.nsamples
opt_nomit = options.nomit
opt_window = options.window
opt_steps = options.steps
opt_display = options.display
opt_persistence = options.persistence
if opt_persistence:
alpha = options.decay
salpha = np.sqrt(1-alpha**2);
else:
alpha = salpha = 0.
# TODO: not implemented yet. Haven't figured out how this is supposed to work...
if options.stepsf is not None:
# Stepsizes, varargin gives the opt.stepsf arguments (net, x ,y)
# where x is input data and y is a target data.
# epsilon = feval(opt.stepsf,varargin{:}).*opt.stepadj;
raise NotImplementedError
else:
epsilon = options.stepadj
nparams = len(x)
# Check the gradient evaluation.
if options.checkgrad:
# Check gradients
error = check_grad(f, gradf, x, *args)
print "Energy gradient error: %f"%error
# Initialize matrix of returned samples
samples = np.zeros((opt_nsamples, nparams))
# Check all keyword arguments
known_keyargs = ['return_energies','return_diagnostics']
for key in kargs.keys():
assert key in known_keyargs, 'unknown option %s'%key
# Return energies?
return_energies = kargs.get('return_energies',False)
if return_energies:
energies = np.zeros(opt_nsamples)
else:
energies = np.zeros(0)
# Return diagnostics?
return_diagnostics = kargs.get('return_diagnostics',False)
if return_diagnostics:
diagn_pos = np.zeros(opt_nsamples, nparams)
diagn_mom = np.zeros(opt_nsamples, nparams)
diagn_acc = np.zeros(opt_nsamples)
else:
diagn_pos = np.zeros((0,0))
diagn_mom = np.zeros((0,0))
diagn_acc = np.zeros(0)
if not opt_persistence or HMC_MOM is None or nparams != len(HMC_MOM):
# Initialise momenta at random
p = np.random.randn(nparams)
else:
# Initialise momenta from stored state
p = HMC_MOM
# Main loop.
all_args = [f,
x,
gradf,
args,
p,
samples,
energies,
diagn_pos,
diagn_mom,
diagn_acc,
opt_nsamples,
opt_nomit,
opt_window,
opt_steps,
opt_display,
opt_persistence,
return_energies,
return_diagnostics,
alpha,
salpha,
epsilon]
try:
os.environ['C_INCLUDE_PATH']=np.get_include()
pyximport.install()
print "Using compiled code"
c_hmc_main_loop(*all_args)
except:
print "Using pure python code"
hmc_main_loop(*all_args)
if opt_display:
print '\nFraction of samples rejected: %g\n'%(nreject/float(opt_nsamples))
# Store diagnostics
if return_diagnostics:
diagn = dict()
diagn['pos'] = diagn_pos # positions matrix
diagn['mom'] = diagn_mom # momentum matrix
diagn['acc'] = diagn_acc # acceptance treshold matrix
diagn['rej'] = nreject/float(opt_nsamples) # rejection rate
diagn['stps'] = epsilon # stepsize vector
# Store final momentum value in global so that it can be retrieved later
if opt_persistence:
HMC_MOM = p
else:
HMC_MOM = None
if return_energies or return_diagnostics:
out = (samples,)
else:
return samples
if return_energies: out += (energies,)
if return_diagnostics: out += (diagn,)
return out | 3e885f1bc0ec5a6d4c40d5fd7cc3db6ce37c8dd0 | 22,923 |
def generate_abbreviations(
labels: tp.Iterable[str],
max_abbreviation_len: int = 3,
dictionary: tp.Union[tp.Tuple[str], str] = "cdfghjklmnpqrstvxz"):
"""
Returns unique abbreviations for the given labels. Generates the abbreviations with
:func:`beatsearch.utils.generate_unique_abbreviation`.
:param labels: labels to abbreviate
:param max_abbreviation_len: maximum length of the abbreviations
:param dictionary: characteristic characters (defaults to consonants)
:return: abbreviations of the given labels
"""
abbreviations = list()
for label in labels:
abbreviations.append(generate_unique_abbreviation(
label,
max_len=max_abbreviation_len,
taken_abbreviations=abbreviations,
dictionary=dictionary
))
return abbreviations | a22e68990147bd973c4a7af8e9e1a8f28fa7b4ac | 22,924 |
def best_hand(hand):
"""Из "руки" в 7 карт возвращает лучшую "руку" в 5 карт """
i = iter(combinations(hand, 5))
best_rank = 0, 0, 0
best_combination = None
for combination in i:
current_rank = hand_rank(combination)
if compare(current_rank, best_rank):
best_rank = current_rank
best_combination = combination
return best_combination | c885625c2b5f60453b6dca59e25003b9f977e9d4 | 22,925 |
def calculate_label_counts(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = {}
for example in examples:
label = example.label
label_counts[label] = label_counts.get(label, 0) + 1
return label_counts | 4c45378c6e29ce3d1b40b4d02a112e1fbd23d8b6 | 22,926 |
def printer(arg1):
"""
Even though 'times' is destroyed when printer() has been called,
the 'inner' function created remembers what times is. Same goes
for the argument arg1.
"""
times = 3
def inner():
for i in range(times): print(arg1)
return inner | 7e3d2033602eaef9ef570c97a058208066073427 | 22,927 |
from bs4 import BeautifulSoup
def from_get_proxy():
"""
From "http://www.getproxy.jp"
:return:
"""
base = 'http://www.getproxy.jp/proxyapi?' \
'ApiKey=659eb61dd7a5fc509bef01f2e8b15669dfdb0f54' \
'&area={:s}&sort=requesttime&orderby=asc&page={:d}'
urls = [base.format('CN', i) for i in range(1, 25)]
urls += [base.format('US', i) for i in range(1, 25)]
urls += [base.format('CN', i) for i in range(25, 50)]
urls += [base.format('US', i) for i in range(25, 50)]
proxies = []
i = 0
retry = 0
length = len(urls)
while i < length:
res = _safe_http(urls[i])
try:
soup = BeautifulSoup(res, 'lxml')
except:
i += 1
continue
data = soup.find_all('ip')
if len(data) == 0:
retry += 1
if retry == 4:
break
else:
sleep(62)
else:
retry = 0
proxies += [pro.text for pro in data]
i += 1
return proxies | b3302b0092eb973022e2d322cc00e1391fe68c8b | 22,928 |
def KORL(a, kappa=None):
""" log rounds k-ary OR """
k = len(a)
if k == 1:
return a[0]
else:
t1 = KORL(a[:k//2], kappa)
t2 = KORL(a[k//2:], kappa)
return t1 + t2 - t1.bit_and(t2) | 2c85f7131dcfe0b35d3bfd8b04b876fad320572f | 22,929 |
import json
import requests
def verify(token):
"""Verifies a JWS token, returning the parsed token if the token has a
valid signature by the key provided by the key of the OpenID
Connect server stated in the ISS claim of the token. If the
signature does not match that key, None is returned.
"""
unverified_token_data = json.loads(jose.jws.get_unverified_claims(token))
jwks_uri = requests.get("%s/.well-known/openid-configuration" % unverified_token_data["iss"]).json()["jwks_uri"]
keys = requests.get(jwks_uri).json()["keys"]
for key in keys:
try:
verified_token_data = json.loads(
jose.jws.verify(token, key, [key["alg"]]))
except:
pass
else:
return verified_token_data
return None | 8d1dac4d1c87de3d2d619f58bdf077f82b54dfda | 22,930 |
def get_listing_panel(tool, ghidra):
""" Get the code listing UI element, so we can get up-to-date location/highlight/selection """
cvs = tool.getService(ghidra.app.services.CodeViewerService)
return cvs.getListingPanel() | f14477cf13cb7eb4e7ede82b0c2068ca53a30723 | 22,931 |
def template_data(environment, template_name="report_html.tpl", **kwds):
"""Build an arbitrary templated page.
"""
template = env.get_template(template_name)
return template.render(**environment) | 6b3c1ea5c280931280b5d6c69f380b9349ac0627 | 22,932 |
def resnet152_ibn_a(**kwargs):
"""
Constructs a ResNet-152-IBN-a model.
"""
model = ResNet_IBN(block=Bottleneck_IBN,
layers=[3, 8, 36, 3],
ibn_cfg=('a', 'a', 'a', None),
**kwargs)
return model | 5cb059910c5442b0df7c08471f75b96fe3fb4c80 | 22,933 |
import scipy
def calibratePose(pts3,pts2,cam,params_init):
"""
Calibrates the camera to match the view calibrated by updating R,t so that pts3 projects
as close as possible to pts2
:param pts3: Coordinates of N points stored in a array of shape (3,N)
:param pts2: Coordinates of N points stored in a array of shape (2,N)
:param cam_init: Initial estimate of camera
:param params_init:
:return: Refined estimate of camera with updated R,t parameters
"""
func = lambda rt: residuals(pts3,pts2,cam,rt)
least = scipy.optimize.leastsq(func,params_init)[0]
cam.update_extrinsics(least)
return cam | 5f1fcf55ec934596fd46f129d12e8457173239eb | 22,934 |
import base64
def image_to_base64(file_image):
"""
ESSA FUNÇÃO TEM COMO OBJETIVO, CONVERTER FORMATO DE INPUT (PNG) -> BASE64
O ARQUIVO OBTIDO (PNG) É SALVO NA MÁQUINA QUE ESTÁ EXECUTANDO O MODELO.
# Arguments
file_image - Required : Caminho do arquivo
no formato imagem (String)
# Returns
built_base64 - Required : Valor no formato Base64 (BASE64)
"""
# INICIANDO A VARIÁVEL QUE RECEBERÁ O VALOR DA BASE64
built_base64 = None
try:
# DECODOFICANDO A BASE64, ARMAZENANDO-O NO OBJETO ABERTO
# ESCREVENDO NA MÁQUINA
built_base64 = base64.b64encode(open(file_image, 'rb').read())
except Exception as ex:
execute_log.error("ERRO NA FUNÇÃO {} - {}".format(stack()[0][3], ex))
return built_base64 | 74e9c46ce48e23fdb5453cb9ce5223dfb8e6004b | 22,935 |
from pathlib import Path
from typing import Set
def get_files_recurse(path: Path) -> Set:
"""Get all files recursively from given :param:`path`."""
res = set()
for p in path.rglob("*"):
if p.is_dir():
continue
res.add(p)
return res | c129ce43130da09962264f6e7935410685815943 | 22,936 |
from typing import List
def img_after_ops(img: List[str], ops: List[int]) -> List[str]:
"""Apply rotation and flip *ops* to image *img* returning the result"""
new_img = img[:]
for op in ops:
if op == Tile.ROTATE:
new_img = [cat(l)[::-1] for l in zip(*new_img)]
elif op == Tile.FLIP:
new_img = [l[::-1] for l in new_img]
return new_img | a28f1dbdf7f756c9b8b313d889596797466ab729 | 22,937 |
import functools
import urllib
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
Fix the redirect url with full_url.
Tornado use uri by default.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.current_user
if not user:
if self.request.method == "GET":
url = self.get_login_url()
if "?" not in url:
url += "?" + urllib.urlencode(dict(next=self.request.full_url()))
self.redirect(url)
return
raise HTTPError(403)
#self._current_user = user
return method(self, *args, **kwargs)
return wrapper | c4dc18af60b9270d644ed807ea1c74b821ea7bca | 22,938 |
import math
def ring_samp_ranges(zma, rng_atoms):
""" Set sampling range for ring dihedrals.
:param zma: Z-Matrix
:type zma: automol.zmat object
:param rng_atoms: idxs for atoms inside rings
:type rng_atoms: list
"""
samp_range_dct = {}
ring_value_dct = ring_dihedrals(zma, rng_atoms)
for key, value in ring_value_dct.items():
samp_range_dct[key] = [value - math.pi/4, value + math.pi/4]
return samp_range_dct | 6e1958f66f596d9b1230864e3b9a2b73cd01cb35 | 22,939 |
def users_key(group='default'):
""" Returns the user key """
return db.Key.from_path('users', group) | 1912165ff75c39c9fbcb1432f46ef80f9b08c096 | 22,940 |
def VolumetricFlow(self):
"""Volumetric flow (m^3/hr)."""
stream, mol = self.data
m = mol[0]
if m:
c = self.name # c = compound
c.T = stream.T
c.P = stream.P
c.phase = stream._phase
return c.Vm * m * 1000
else:
return 0. | c799c27079494561e30975a6e03b5c1cefe9a907 | 22,941 |
def build_queue_adapter(workflow_client, logger=None, **kwargs):
"""Constructs a queue manager based off the incoming queue socket type.
Parameters
----------
workflow_client : object ("distributed.Client", "fireworks.LaunchPad")
A object wrapper for different distributed workflow types
logger : logging.Logger, Optional. Default: None
Logger to report to
**kwargs
Additional kwargs for the Adapter
Returns
-------
ret : Adapter
Returns a valid Adapter for the selected computational queue
"""
adapter_type = type(workflow_client).__module__ + "." + type(workflow_client).__name__
if adapter_type == "parsl.dataflow.dflow.DataFlowKernel":
adapter = parsl_adapter.ParslAdapter(workflow_client, logger=logger)
elif adapter_type == "distributed.client.Client":
adapter = dask_adapter.DaskAdapter(workflow_client, logger=logger)
elif adapter_type == "fireworks.core.launchpad.LaunchPad":
adapter = fireworks_adapter.FireworksAdapter(workflow_client, logger=logger)
else:
raise KeyError("QueueAdapter type '{}' not understood".format(adapter_type))
return adapter | bbd013fef1095dd4881a8b51561ed4080682141e | 22,942 |
import torch
def pad_sents(sents, pad_token, return_tensor = False):
""" Pad list of sentences according to the longest sentence in the batch.
The paddings should be at the end of each sentence.
@param sents (list[list[str]]): list of sentences, where each sentence
is represented as a list of words
@param pad_token (str): padding token
@returns sents_padded (list[list[str]]): list of sentences where sentences shorter
than the max length sentence are padded out with the pad_token, such that
each sentences in the batch now has equal length.
"""
sents_padded = []
maxLen = 0
### YOUR CODE HERE (~6 Lines)
for i in sents:
maxLen = max(len(i),maxLen)
for i in range(len(sents)):
sen = sents[i].cpu().numpy().tolist()
for j in range(maxLen - len(sen)):
sen.append(pad_token)
sen = torch.tensor(sen, dtype=torch.long).cuda()
sents_padded.append(sen)
if return_tensor:
t = torch.zeros(len(sents), maxLen).long()
for i in range(len(sents)):
t[i] = sents_padded[i]
sents_padded = t.cuda()
return sents_padded | 3100ef6f1924685f7a46b22753830cdf203e565d | 22,943 |
def _take_along_axis(array, indices,
axis):
"""Takes values from the input array by matching 1D index and data slices.
This function serves the same purpose as jax.numpy.take_along_axis, except
that it uses one-hot matrix multiplications under the hood on TPUs:
(1) On TPUs, we use one-hot matrix multiplications to select elements from the
array.
(2) Otherwise, we fall back to jax.numpy.take_along_axis.
Notes:
- To simplify matters in case (1), we only support slices along the second
or last dimensions.
- We may wish to revisit (1) for very large arrays.
Args:
array: Source array.
indices: Indices to take along each 1D slice of array.
axis: Axis along which to take 1D slices.
Returns:
The indexed result.
"""
if array.ndim != indices.ndim:
raise ValueError(
"indices and array must have the same number of dimensions; "
f"{indices.ndim} vs. {array.ndim}.")
if (axis != -1 and axis != array.ndim - 1 and # Not last dimension
axis != 1 and axis != -array.ndim + 1): # Not second dimension
raise ValueError(
"Only slices along the second or last dimension are supported; "
f"array.ndim = {array.ndim}, while axis = {axis}.")
if _favor_one_hot_slices():
one_hot_length = array.shape[axis]
one_hot_indices = jax.nn.one_hot(indices, one_hot_length, axis=axis)
if axis == -1 or array.ndim == 1:
# Take i elements from last dimension (s).
# We must use HIGHEST precision to accurately reproduce indexing
# operations with matrix multiplications.
result = jnp.einsum(
"...s,...is->...i",
array,
one_hot_indices,
precision=jax.lax.Precision.HIGHEST)
else:
# Take i elements from second dimension (s). We assume here that we always
# want to slice along the second dimension.
# We must use HIGHEST precision to accurately reproduce indexing
# operations with matrix multiplications.
result = jnp.einsum(
"ns...,nis...->ni...",
array,
one_hot_indices,
precision=jax.lax.Precision.HIGHEST)
return jax.lax.convert_element_type(result, array.dtype)
else:
return jnp.take_along_axis(array, indices, axis=axis) | 9a926a53341e0fc964fc568474ca29db286ed14e | 22,944 |
import requests
import logging
def send_envelope(
adfs_host: str,
envelope: str,
) -> requests.Response:
"""Send an envelope to the target ADFS server.
Arguments:
adfs_host: target ADFS server
envelope: envelope to send
Returns:
ADFS server response
"""
url = f"http://{adfs_host}/adfs/services/policystoretransfer"
headers = {"Content-Type": "application/soap+xml"}
response = None
try:
response = requests.post(url, data=envelope, headers=headers)
except Exception as e:
logging.error(e)
return response | bc59fa99fa28432dd969f1a72bbae00af716b443 | 22,945 |
def display_main(choice):
"""
Link option To main board
"""
return main(choice) | 66b0b0d36d47b4107b5b57dce9ea94787f3fa83b | 22,946 |
import random
def generate_network_table(seed=None):
"""
Generates a table associating MAC and IP addressed to be distributed by our virtual network adapter via DHCP.
"""
# we use the seed in case we want to generate the same table twice
if seed is not None:
random.seed(seed)
# number of IPs per network is 253 (2-254)
# generate random MACs, set ensures they are unique
macs: set[str] = set()
while len(macs) < 253:
macs.add(
"48:d2:24:bf:"
+ to_byte(random.randint(0, 255))
+ ":"
+ to_byte(random.randint(0, 255))
)
# associate each MAC with a sequential IP
table = {}
ip_counter = 2
for mac in macs:
table[mac] = "192.168.150." + str(ip_counter)
ip_counter += 1
return table | d39915c129b2d5a99fc41c90b718fcca17d20cd5 | 22,947 |
import torch
def loss_mGLAD(theta, S):
"""The objective function of the graphical lasso which is
the loss function for the meta learning of glad
loss-meta = 1/B(-log|theta| + <S, theta>)
Args:
theta (tensor 3D): precision matrix BxDxD
S (tensor 3D): covariance matrix BxDxD (dim=D)
Returns:
loss (tensor 1D): the loss value of the obj function
"""
B, D, _ = S.shape
t1 = -1*torch.logdet(theta)
# Batch Matrix multiplication: torch.bmm
t21 = torch.einsum("bij, bjk -> bik", S, theta)
# getting the trace (batch mode)
t2 = torch.einsum('jii->j', t21)
# print(t1, torch.det(theta), t2)
# regularization term
# tr = 1e-02 * torch.sum(torch.abs(theta))
meta_loss = torch.sum(t1+t2)/B # sum over the batch
return meta_loss | b056a5c57e681cca40c6a7a0d030dee25049e6de | 22,948 |
def parse_remote_path(remote_path):
""" Wrapper around the utils function - checks for the right protocol """
protocol, bucket, key = utils.parse_remote_path(remote_path)
assert protocol == "s3:", "Mismatched protocol (expected AWS S3)"
return bucket, key | 65c26139d0e28f64ae966a75bf730d1a6b8b2248 | 22,949 |
from typing import Callable
def operations(func: Callable) -> Callable:
"""Allows developers to specify operations which
should not be called in the fuzzing process.
Examples:
Ignoring operations specified by operation ids in lists
>>> @fuzz_lightyear.exclude.operations
... def b():
... return ['get_pets', 'get_store_inventory']
Ignoring operations specified by "tag.operation_id" in lists
>>> @fuzz_lightyear.exclude.operations
... def c():
... return ['pets.get_pets', 'store.get_store_inventory']
"""
get_operations_fn = _get_formatted_operations(func)
get_excluded_operations().update(get_operations_fn())
return func | ce6d9596ff307f15c86d4823d3ebcfdafa5f4e33 | 22,950 |
def _gen_dfa_table(t: UxsdComplex) -> str:
"""Generate a 2D C++ array representing DFA table from an UxsdComplex's DFA.
The array is indexed by the state and input token value, such that table[state][input]
gives the next state.
"""
assert isinstance(t.content, UxsdDfa)
dfa = t.content.dfa
out = ""
out += "constexpr int NUM_%s_STATES = %d;\n" % (t.cpp.upper(), len(dfa.states))
out += "constexpr const int NUM_%s_INPUTS = %d;\n" % (t.cpp.upper(), len(dfa.alphabet))
out += "constexpr int gstate_%s[NUM_%s_STATES][NUM_%s_INPUTS] = {\n" % (t.cpp, t.cpp.upper(), t.cpp.upper())
for i in range(0, max(dfa.states)+1):
state = dfa.transitions[i]
row = [str(state[x]) if state.get(x) is not None else "-1" for x in dfa.alphabet]
out += "\t{%s},\n" % ", ".join(row)
out += "};\n"
return out | f0bae5dd8f897786a62016b7b807e2c7730f1e89 | 22,951 |
def get_heat_capacity_derivative(Cv, temperature_list, plotfile='dCv_dT.pdf'):
"""
Fit a heat capacity vs T dataset to cubic spline, and compute derivatives
:param Cv: heat capacity data series
:type Cv: Quantity or numpy 1D array
:param temperature_list: List of temperatures used in replica exchange simulations
:type temperature: Quantity or numpy 1D array
:param plotfile: path to filename to output plot
:type plotfile: str
:returns:
- dC_v_out ( 1D numpy array (float) ) - 1st derivative of heat capacity, from a cubic spline evaluated at each point in Cv)
- d2C_v_out ( 1D numpy array (float) ) - 2nd derivative of heat capacity, from a cubic spline evaluated at each point in Cv)
- spline_tck ( scipy spline object (tuple) ) - knot points (t), coefficients (c), and order of the spline (k) fit to Cv data
"""
xdata = temperature_list
ydata = Cv
# Strip units off quantities:
if type(xdata[0]) == unit.quantity.Quantity:
xdata_val = np.zeros((len(xdata)))
xunit = xdata[0].unit
for i in range(len(xdata)):
xdata_val[i] = xdata[i].value_in_unit(xunit)
xdata = xdata_val
if type(ydata[0]) == unit.quantity.Quantity:
ydata_val = np.zeros((len(ydata)))
yunit = ydata[0].unit
for i in range(len(ydata)):
ydata_val[i] = ydata[i].value_in_unit(yunit)
ydata = ydata_val
# Fit cubic spline to data, no smoothing
spline_tck = interpolate.splrep(xdata, ydata, s=0)
xfine = np.linspace(xdata[0],xdata[-1],1000)
yfine = interpolate.splev(xfine, spline_tck, der=0)
dCv = interpolate.splev(xfine, spline_tck, der=1)
d2Cv = interpolate.splev(xfine, spline_tck, der=2)
dCv_out = interpolate.splev(xdata, spline_tck, der=1)
d2Cv_out = interpolate.splev(xdata, spline_tck, der=2)
figure, axs = plt.subplots(
nrows=3,
ncols=1,
sharex=True,
)
axs[0].plot(
xdata,
ydata,
'ok',
markersize=4,
fillstyle='none',
label='simulation data',
)
axs[0].plot(
xfine,
yfine,
'-b',
label='cubic spline',
)
axs[0].set_ylabel(r'$C_{V} (kJ/mol/K)$')
axs[0].legend()
axs[1].plot(
xfine,
dCv,
'-r',
label=r'$\frac{dC_{V}}{dT}$',
)
axs[1].legend()
axs[1].set_ylabel(r'$\frac{dC_{V}}{dT}$')
axs[2].plot(
xfine,
d2Cv,
'-g',
label=r'$\frac{d^{2}C_{V}}{dT^{2}}$',
)
axs[2].legend()
axs[2].set_ylabel(r'$\frac{d^{2}C_{V}}{dT^{2}}$')
axs[2].set_xlabel(r'$T (K)$')
plt.tight_layout()
plt.savefig(plotfile)
plt.close()
return dCv_out, d2Cv_out, spline_tck | 49f27209c9f6387fc25936481d5d35cebdc6523f | 22,952 |
def get_gradients_of_activations(model, x, y, layer_names=None, output_format='simple', nested=False):
"""
Get gradients of the outputs of the activation functions, regarding the loss.
Intuitively, it shows how your activation maps change over a tiny modification of the loss.
:param model: keras compiled model or one of ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2',
'mobilenet_v2', 'mobilenetv2'].
:param x: Model input (Numpy array). In the case of multi-inputs, x should be of type List.
:param y: Model target (Numpy array). In the case of multi-inputs, y should be of type List.
:param layer_names: (optional) Single name of a layer or list of layer names for which activations should be
returned. It is useful in very big networks when it is computationally expensive to evaluate all the layers/nodes.
:param output_format: Change the output dictionary key of the function.
- 'simple': output key will match the names of the Keras layers. For example Dense(1, name='d1') will
return {'d1': ...}.
- 'full': output key will match the full name of the output layer name. In the example above, it will
return {'d1/BiasAdd:0': ...}.
- 'numbered': output key will be an index range, based on the order of definition of each layer within the model.
:param nested: (optional) If set, will move recursively through the model definition to retrieve nested layers.
Recursion ends at leaf layers of the model tree or at layers with their name specified in layer_names.
E.g., a model with the following structure
-layer1
-conv1
...
-fc1
-layer2
-fc2
... yields a dictionary with keys 'layer1/conv1', ..., 'layer1/fc1', 'layer2/fc2'.
If layer_names = ['layer2/fc2'] is specified, the dictionary will only hold one key 'layer2/fc2'.
The layer names are generated by joining all layers from top level to leaf level with the separator '/'.
:return: Dict {layer_names (specified by output_format) -> activation of the layer output/node (Numpy array)}.
"""
nodes = _get_nodes(model, output_format, nested=nested, layer_names=layer_names)
return _get_gradients(model, x, y, nodes) | 5cb9234594b867383f92f4e2e7e91e39eb79d120 | 22,953 |
def string_to_screens_and_lines(source, allowed_width, allowed_height, f, pixels_between_lines = None, end_screens_with = (), do_not_include = ()):
"""
Convert a string to screens and lines.
Pygame does not allow line breaks ("\n") when rendering text. The purpose
of this function is to break a string into lines and screens given a font
and screen dimensions.
The following two assumptions are made:
1. Line breaks ("\n") in source denote the start of a new paragraph.
Therefore, to have an actual blank line (i.e., an empty string)
appear in the returned array, add another "\n" immediately
following the first.
2. Spaces denote the end of a word.
Parameters:
source: the string to divide into screens and lines.
allowed_width: the width, in pixels, permitted for lines; can be a
number of pixels or a proportion of the active screen's width.
allowed_height: same as allowed_width but for the height of a single
screen.
f: the font with which source is measured.
Keyword Parameters:
pixels_between_lines: blank pixel rows between lines of text; defaults
to None, in which case it is obtained from f.
end_screens_with: a restricted set of characters that may end a
screen; defaults to an empty tuple, in which case any character
ending a word can end a screen.
do_not_include: words that are exceptions to the end_screens_with
words (e.g., "Mrs." ends in a period but should not end a screen)
Returns:
screens: a multidimensional list of screens and lines.
"""
# Check if allowed_height and allowed_width need to be set:
if 0 < allowed_width <= 1 and 0 < allowed_height <= 1:
allowed_width, allowed_height = screen_dimensions()
elif 0 < allowed_width <= 1 or 0 < allowed_height <= 1:
raise ValueError("Both or neither of allowed_width and \
allowed_height can be between 0 and 1.")
# Check if pixels_between_lines needs to be set:
if not pixels_between_lines:
pixels_between_lines = f.get_linesize()
else:
assert pixels_between_lines > 0, "pixels_between_lines must be \
positive."
# Make sure that allowed_height can accommodate the tallest word in
# source:
assert f.size(source)[1] <= allowed_height, "allowed_height cannot \
accommodate source."
screens = []
# Break source into paragraphs and paragraphs into single words:
paragraphs = source.split("\n")
single_words = []
for paragraph in paragraphs:
individual_words = paragraph.split(" ")
# While here, verify that the longest word fits:
widest_word, pixels = longest_string_to_render(individual_words, f)
assert pixels < allowed_width, "{:s} in source is too long for \
allowed_width.".format(widest_word)
single_words.append(individual_words)
# The function branches next, depending on whether restrictions have been
# placed on where screen breaks can occur.
if not end_screens_with:
# Screen breaks can occur following any word.
# Break single_words into lines without regard to screens:
lines_of_text, total_height = wrap_text(
single_words,
allowed_width,
f,
return_height = True,
line_height = pixels_between_lines
)
if total_height <= allowed_height:
# Everything fits on one screen.
screens.append(lines_of_text)
else:
# There will be at least two screens.
# Initialize the first screen and a height counter:
screen = []
screen_height = 0
for line in lines_of_text:
line_height = f.size(line)[1]
screen_height = screen_height+line_height+pixels_between_lines
if screen_height < allowed_height:
# line fits on the current screen.
screen.append(line)
elif screen_height == allowed_height or screen_height-pixels_between_lines < allowed_height:
# line fits, but no more will.
screen.append(line)
screens.append(screen)
screen = []
screen_height = 0
else:
# line doesn't fit.
screens.append(screen)
screen = [line]
screen_height = line_height+pixels_between_lines
# Check for a remaining screen:
if screen:
screens.append(screen)\
else:
# Screens can only end following specific strings.
# These strings do not need to be end-of-sentence characters, but it
# is difficult to imagine what else they would be. Therefore, I refer
# to the resulting strings as sentences, acknowledging that this may
# be incorrect terminology.
# Break paragraphs into sentences:
sentences = []
for paragraph in paragraphs:
if sentences:
# This is not the first line, so start the paragraph on a new
# line:
sentences.append("")
if paragraph:
# paragraph is not a blank line.
# Break it into sentences:
paragraph_as_sentences = text_to_sentences(
paragraph,
terminators = end_screens_with,
exclude = do_not_include
)
sentences = sentences+paragraph_as_sentences
else:
# paragraph is a blank line.
sentences.append("")
# Initialize the first screen:
screen = []
for sentence in sentences:
# Determine whether sentence starts on a new line or continues
# from the current line:
if screen:
# If the last line in screen is blank, then sentence starts on
# a new line.
last_line = screen[-1]
if last_line:
next_line = False
else:
next_line = True
else:
# This screen is blank.
# Arbitrarily set next_line to False:
next_line = False
# Try adding sentence to the current screen:
possible_screen, screen_height = wrap_text(
sentence,
allowed_width,
f,
old_text = screen,
start_new_line = next_line,
return_height = True,
line_height = pixels_between_lines
)
if screen_height <= allowed_height:
# Update the current screen:
screen = possible_screen
else:
# This sentence does not fit.
# If screen is currently blank, it means that sentence needs
# to be broken across screens (i.e., it will not fit on a
# single screen).
if screen:
# This is not an issue.
# Save screen:
screens.append(screen)
# Initialize the next screen with sentence:
screen, current_height = wrap_text(
sentence,
allowed_width,
f,
return_height = True,
line_height = pixels_between_lines
)
if current_height > allowed_height:
# sentence needs to be broken across screens.
# This can be accomplished by calling the present
# function without restrictions on screen endings.
# However, the text currently on screen is needed too.
text_to_add = ""
for line in screen:
text_to_add = text_to_add+line+""
text_to_add = text_to_add+sentence
multiple_screens = string_to_screens_and_lines(
text_to_add,
allowed_width,
allowed_height,
f,
pixels_between_lines = pixels_between_lines
)
for s in multiple_screens:
screens.append(s)
else:
# screen is empty, but sentence will not fit.
# Call the present function to get this sentence's
# screens:
multiple_screens = string_to_screens_and_lines(
sentence,
allowed_width,
allowed_height,
f,
pixels_between_lines = pixels_between_lines
)
for s in multiple_screens:
screens.append(s)
# Check if a final screen needs to be added:
if screen:
screens.append(screen)
return screens | f43907fdf47b342b1eac3100c07e9d452b0d865f | 22,954 |
def trim_spectrum(self, scouse, flux):
"""
Trims a spectrum according to the user inputs
"""
return flux[scouse.trimids] | 3f18259986e677f8e8a9718408cdb56352d956e5 | 22,955 |
def test_bitwise_and(a, b):
"""
>>> test_bitwise_and(0b01, 0b10)
0L
>>> test_bitwise_and(0b01, 0b11)
1L
>>> test_bitwise_and(0b01, 2.0)
Traceback (most recent call last):
...
NumbaError: 27:15: Expected an int, or object, or bool
>>> test_bitwise_and(2.0, 0b01)
Traceback (most recent call last):
...
NumbaError: 27:11: Expected an int, or object, or bool
"""
return a & b | 0855921300751368eb0ad3f3cba37b6ddac759fd | 22,956 |
import inspect
def flagFunction(method, name=None):
"""
Determine whether a function is an optional handler for a I{flag} or an
I{option}.
A I{flag} handler takes no additional arguments. It is used to handle
command-line arguments like I{--nodaemon}.
An I{option} handler takes one argument. It is used to handle command-line
arguments like I{--path=/foo/bar}.
@param method: The bound method object to inspect.
@param name: The name of the option for which the function is a handle.
@type name: L{str}
@raise UsageError: If the method takes more than one argument.
@return: If the method is a flag handler, return C{True}. Otherwise return
C{False}.
"""
if _PY3:
reqArgs = len(inspect.signature(method).parameters)
if reqArgs > 1:
raise UsageError('Invalid Option function for %s' %
(name or method.__name__))
if reqArgs == 1:
return False
else:
reqArgs = len(inspect.getargspec(method).args)
if reqArgs > 2:
raise UsageError('Invalid Option function for %s' %
(name or method.__name__))
if reqArgs == 2:
return False
return True | 265f92ca52ec4b3c1b6b3955a7ac719f64099bad | 22,957 |
from pathlib import Path
def envnotfound(env):
"""`'Env "my-venv" not found. Did you mean "./my-venv"?'`"""
msg = f'Env "{env}" not found.'
if arg_is_name(env) and Path(env).exists():
msg += f'\nDid you mean "./{env}"?'
return msg | e2437bbf141a841650f33ede5d7fb6489a954f00 | 22,958 |
def random_translation_along_x(gt_boxes, points, offset_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_range: [min max]]
Returns:
"""
offset = np.random.uniform(offset_range[0], offset_range[1])
points[:, 0] += offset
gt_boxes[:, 0] += offset
# if gt_boxes.shape[1] > 7:
# gt_boxes[:, 7] += offset
return gt_boxes, points | 6998e463313faeaaf75e96b0374c0bdc5415c8f1 | 22,959 |
import numpy
import scipy
def predband(xd,yd,a,b,conf=0.95,x=None):
"""
Calculates the prediction band of the linear regression model at the desired confidence
level, using analytical methods.
Clarification of the difference between confidence and prediction bands:
"The 2sigma confidence interval is 95% sure to contain the best-fit regression line.
This is not the same as saying it will contain 95% of the data points. The prediction bands are
further from the best-fit line than the confidence bands, a lot further if you have many data
points. The 95% prediction interval is the area in which you expect 95% of all data points to fall."
(from http://graphpad.com/curvefit/linear_regression.htm)
Arguments:
- conf: desired confidence level, by default 0.95 (2 sigma)
- xd,yd: data arrays
- a,b: linear fit parameters as in y=ax+b
- x: (optional) array with x values to calculate the confidence band. If none is provided, will
by default generate 100 points in the original x-range of the data.
Usage:
>>> lpb,upb,x=nemmen.predband(all.kp,all.lg,a,b,conf=0.95)
calculates the prediction bands for the given input arrays
>>> pylab.fill_between(x, lpb, upb, alpha=0.3, facecolor='gray')
plots a shaded area containing the prediction band
:returns: Sequence (lpb,upb,x) with the arrays holding the lower and upper confidence bands
corresponding to the [input] x array.
References:
1. `Introduction to Simple Linear Regression, Gerard
E. Dallal, Ph.D. <http://www.JerryDallal.com/LHSP/slr.htm>`_
"""
alpha=1.-conf # significance
n=xd.size # data sample size
if x is None: x=numpy.linspace(xd.min(),xd.max(),100)
# Predicted values (best-fit model)
y=a*x+b
# Auxiliary definitions
sd=scatterfit(xd,yd,a,b) # Scatter of data about the model
sxd=numpy.sum((xd-xd.mean())**2)
sx=(x-xd.mean())**2 # array
# Quantile of Student's t distribution for p=1-alpha/2
q=scipy.stats.t.ppf(1.-alpha/2.,n-2)
# Prediction band
dy=q*sd*numpy.sqrt( 1.+1./n + sx/sxd )
upb=y+dy # Upper prediction band
lpb=y-dy # Lower prediction band
return lpb,upb,x | a235548f4593cfc105bba9d7268dba2e14374df7 | 22,960 |
def orient_edges(G):
"""Orient remaining edges after colliders have been oriented.
:param G: partially oriented graph (colliders oriented)
:returns: maximally oriented DAG
"""
undir_list = [edge for edge in G.edges() if G.is_undir_edge(edge)]
undir_len = len(undir_list)
idx = 0
while idx < undir_len:
success = False
for edge in undir_list:
if can_orient(G,edge):
G.remove_edge(*edge[::-1])
success = True
if success:
undir_list = [edge for edge in G.edges() if G.is_undir_edge(edge)]
idx += 1
else:
break
return G | 958c22b7c7906219bfc52d2cc59945a22e4e1060 | 22,961 |
def _create_deserialize_fn(attributes: dict, globals: dict, bases: tuple[type]) -> str:
"""
Create a deserialize function for binary struct from a buffer
The function will first deserialize parent classes, then the class attributes
"""
lines = []
# For this class bases
for parent in bases:
if not _is_parent_fn_callable(parent, 'deserialize'):
continue
lines.append(f'{parent.__name__}.deserialize(self, buf)')
lines.append(f'buf = buf[{parent.__name__}._bs_size(self):]')
# For this class attributes
for name, annotation in attributes.items():
annotation_type = _get_annotation_type(annotation)
if annotation_type == AnnotationType.TYPED_BUFFER:
lines.append(f'self.{name}.deserialize(buf)')
else:
lines.append(f'self.{name}.deserialize(buf[:self.{name}.size_in_bytes])')
lines.append(f'buf = buf[self.{name}.size_in_bytes:]')
return _create_fn('deserialize', ['self, buf'], lines + ['return self'], globals) | e5058b73d47a034323a4ebe65edbf888bdf98321 | 22,962 |
def blck_repeat(preprocessor: Preprocessor, args: str, contents: str) -> str:
"""The repeat block.
usage: repeat <number>
renders its contents one and copies them number times"""
args = args.strip()
if not args.isnumeric():
preprocessor.send_error("invalid-argument", "invalid argument. Usage: repeat [uint > 0]")
number = int(args)
if number <= 0:
preprocessor.send_error("invalid-argument", "invalid argument. Usage: repeat [uint > 0]")
preprocessor.context.update(preprocessor.current_position.end, "in block repeat")
contents = preprocessor.parse(contents)
preprocessor.context.pop()
return contents * number | 8dfedd854a68b2fcc33ea4b9714744302fe5934d | 22,963 |
import math
def is_prime(n: int) -> bool:
"""Determines if the natural number n is prime."""
# simple test for small n: 2 and 3 are prime, but 1 is not
if n <= 3:
return n > 1
# check if multiple of 2 or 3
if n % 2 == 0 or n % 3 == 0:
return False
# search for subsequent prime factors around multiples of 6
max_factor = int(math.sqrt(n))
for i in range(5, max_factor + 1, 6):
if n % i == 0 or n % (i + 2) == 0:
return False
return True | e7bd02271681906f9ee4e63305c5a6f630578171 | 22,964 |
import constants as c
import cPickle
def get_word_prob():
"""Returns the probabilities of all the words in the mechanical turk video labels.
"""
data = cPickle.load(open(c.datafile)) # Read in the words from the labels
wordcount = dict()
totalcount = 0
for label in data:
for word in label:
totalcount += 1
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
wordprob = dict([(word, float(wc)/totalcount) for word, wc in wordcount.items()])
return wordprob | c9f137ad4e844ff3cea3c6f9b1d64e9422359b79 | 22,965 |
def angDistance(ra, dec, df, raCol='fieldRA', decCol='fieldDec'):
"""
"""
df['dist'] = angSep(ra, dec, df[raCol], df[decCol])
idx = df.dist.idxmin()
rval = df.loc[idx]
df.drop('dist', axis=1, inplace=True)
return rval | 9e88711ff33a7ac1a223608ea5441e1cfdbb7a01 | 22,966 |
def offer_in_influencing_offers(offerId, influencing_offers):
"""
Find if a passed offerId is in the influencing_offers list
Parameters
----------
offerId: Offer Id from portfolio dataframe.
influencing_offers : List of offers found for a customer
Returns
-------
1 if offer is found 0 if not found
"""
if (offerId in influencing_offers):
return 1
else:
return 0 | 81c4a8bcb7432222a1fc5175449192681002539c | 22,967 |
from vortex.test import TestTuple
from vortex.test import VortexJSTupleLoaderTestHandler
def setupVortexServer(portNum=8345, wsPortNum=8344):
""" Setup Site
Sets up the web site to listen for connections and serve the site.
Supports customisation of resources based on user details
@return: Port object
"""
defer.setDebugging(True)
# Register the test tuple
TestTuple.__unused = False # Crap code
VortexJSTupleLoaderTestHandler.__unused = False # Crap code
rootResource = TestRootResource()
VortexFactory.createServer("default", rootResource)
VortexFactory.createWebsocketServer("default", wsPortNum)
# rootResource.putChild(b"vortex", VortexResource())
site = server.Site(rootResource)
site.protocol = HTTPChannel
port = reactor.listenTCP(portNum, site).port
# ip = subprocess.getoutput("/sbin/ifconfig").split("\n")[1].split()[1][5:]
logger.info('VortexServer test is alive and listening on port %s', port)
logger.info('VortexServerWebsocket test is alive and listening on port %s', wsPortNum)
logger.debug("Logging debug messages enabled")
NotifyTestTimer.startTupleUpdateNotifyer()
return port | 1cfa947cea049afeee6132b57cd0e918664e03ef | 22,968 |
def identity_block(filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
filters: integer, used for first and second conv layers, third conv layer double this value
stage: integer, current stage label, used for generating layer names
block: integer, current block label, used for generating layer names
# Returns
Output layer for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(input_tensor)
x = BatchNormalization(name=bn_name + '1', **bn_params)(x)
x = Activation('relu', name=relu_name + '1')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = GroupConv2D(filters, (3, 3), conv_params, conv_name + '2')(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = Conv2D(filters * 2, (1, 1), name=conv_name + '3', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = Add()([x, input_tensor])
x = Activation('relu', name=relu_name)(x)
return x
return layer | 43eb5d56a83d24db9bd60aabf1e7dbd601a093cb | 22,969 |
import psutil
def filebeat_service_running():
"""
Checks if the filebeat service is currently running on the OS.
:return: True if filebeat service detected and running, False otherwise.
"""
result = False
try:
filebeat_service = psutil.win_service_get('filebeat')
filebeat_service_info = filebeat_service.as_dict()
if filebeat_service_info['status'] == 'running':
result = True
except psutil.NoSuchProcess:
return result
return result | 51f9bc865b4f7d2de760fcc6952755b5c7c9106a | 22,970 |
def unhandled_request_message(request, cassette):
"""Generate exception for unhandled requests."""
return UNHANDLED_REQUEST_EXCEPTION.format(
url=request.url, cassette_file_path=cassette.cassette_name,
cassette_record_mode=cassette.record_mode,
cassette_match_options=cassette.match_options
) | dcbfec51a88d3ad62395f48c7c046400177c07fd | 22,971 |
from django.contrib.auth import logout as auth_logout
def logout(request):
"""
Logs out the user and displays 'You are logged out' message.
"""
if request.method == 'GET':
return _new_api_403()
auth_logout(request) | 7e975fdd68295e893d8f14321f722e941833b872 | 22,972 |
def compara_dv(cpf, primeiro_dv, segundo_dv):
"""Valida se dígitos verificadores calculados são iguais aos inseridos."""
return "válido" if primeiro_dv == int(cpf[9]) and segundo_dv == int(cpf[10]) else "inválido" | 4b1794f466ce8c00e91c8c5f281996ea262591f8 | 22,973 |
def write_file(file_name, data, line_length):
""" Writes the results to a text file using a name based on file_name
input: string, list
returns: int
"""
pos = file_name.rfind('.')
fn_o = file_name[:pos] + '.OUT' + file_name[pos:]
f = open(fn_o, "w")
for fsn, sequence in data:
f.write(fsn + '\n')
l_length = len(sequence) if line_length == 0 else line_length
for p in range(0, len(sequence), l_length):
f.write(sequence[p:p+l_length] + '\n')
f.close()
return len(data) | 0ad1b25106a6c9120289e8d55caafbebf475f9d5 | 22,974 |
def handle_duplicates(df, cutoff=5, agg_source_col='multiple'):
"""Aggregates duplicate measurements in a DataFrame.
Parameters
----------
df : pandas DataFrame
DataFrame with required columns: 'smiles', 'solvent', 'peakwavs_max'
cutoff : int
Wavelength cutoff in nm. Duplicate measurements of the same smiles-solvent
pair with standard deviation less than cutoff are averaged. Those with
standard deviation greater than cutoff are dropped.
Returns
-------
df : pandas DataFrame
An updated DataFrame with duplicates aggregated or removed
"""
col_names = ['smiles', 'solvent'] + target_names + ['source']
cols = [x for x in df.columns if x not in col_names]
agg_dict = {}
for property in target_names:
agg_dict[property] = ['mean','std']
if agg_source_col=='multiple':
agg_dict['source'] = lambda x: 'multiple' if len(x) > 1 else x,
elif agg_source_col=='random':
np.random.seed(0)
agg_dict['source'] = np.random.choice
for col in cols:
agg_dict[col] = 'mean'
# For all smiles+solvent pairs, find mean and std of target property/properties
# If std > cutoff, drop; elif std <= cutoff, take mean
df = df.groupby(['smiles','solvent']).agg(agg_dict).reset_index()
for property in target_names:
high_std_idx = df[df[property]['std']>cutoff].index
df.drop(index=high_std_idx, inplace=True)
df.drop(columns='std', level=1, inplace=True)
df.columns = df.columns.get_level_values(0)
return df | 0f37ba0256d3a73ebc86d628b65d054c049a7456 | 22,975 |
def splitData(features, target, trainFraction=0.25):
"""
Split the data into test and train data
Inputs:
> features: the model feature data (DataFrame)
> target: the target data (Series)
> trainFraction (0.25 by default): fraction of events to use for training
Outputs:
> Training feature data (DataFrame), Testing feature data (DataFrame), Training target data (Series), Testing target data (Series)
"""
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=1-trainFraction, random_state=42)
return X_train, X_test, y_train, y_test | b3dba6e5b1082062995c4272c7e42fe24c7c8712 | 22,976 |
def poisson_moment( k, n):
"""
returns the moment of x**n with expectation value k
CURRENTLY A SET OF HARD CODED EXPRESSIONS! VERY FRAGILE!
--> would be *much* better if we could do this algorithmically
"""
if n==0:
return 1
elif n==1:
return k
elif n==2:
return k**2 + k
elif n==3:
return k**3 + 3*k**2 + k
elif n==4:
return k**4 + 6*k**3 + 7*k**2 + k
elif n==5:
return k**5 + 10*k**4 + 25*k**3 + 15*k**2 + k
elif n==6:
return k**6 + 15*k**5 + 65*k**4 + 90*k**3 + 31*k**2 + k
elif n==7:
return k**7 + 21*k**6 + 140*k**5 + 350*k**4 + 301*k**3 + 63*k**2 + k
elif n==8:
return k**8 + 28*k**7 + 266*k**6 + 1050*k*85 + 1701*k**4 + 966*k**3 + 127*k**2 + k
else:
raise NotImplementedError('currently only support n<=8') | d2af07d550b0cf6ac9a410296b4ec12c78cc1505 | 22,977 |
def drug_encoder(input_smiles):
"""
Drug Encoder
Args:
input_smiles: input drug sequence.
Returns:
v_d: padded drug sequence.
temp_mask_d: masked drug sequence.
"""
temp_d = drug_bpe.process_line(input_smiles).split()
try:
idx_d = np.asarray([drug_idx[i] for i in temp_d])
except:
idx_d = np.array([0])
flag = len(idx_d)
if flag < D_MAX:
v_d = np.pad(idx_d, (0, D_MAX - flag), 'constant', constant_values=0)
temp_mask_d = [1] * flag + [0] * (D_MAX - flag)
else:
v_d = idx_d[:D_MAX]
temp_mask_d = [1] * D_MAX
return v_d, np.asarray(temp_mask_d) | 79f0e391e5cd72f981b9580d105ac41cc53d5f63 | 22,978 |
import os
def load_mapping_files():
"""Load local and remote mapping files."""
mappings = {}
local = ["properties", "countries", "professions",
"latin_countries", "latin_languages"]
remote = ["selibr"]
for title in local:
f = os.path.join(MAPPINGS, '{}.json'.format(title))
mappings[title] = utils.load_json(f)
for title in remote:
mappings[title] = utils.get_wd_items_using_prop(
mappings["properties"][title])
print("Loaded local mappings: {}.".format(", ".join(local)))
print("Loaded remote mappings: {}.".format(", ".join(remote)))
return mappings | 443938c33c725723d6ce047630706171c11c2cbe | 22,979 |
def optimise_acqu_func_mledr(acqu_func, bounds, X_ob, func_gradient=True, gridSize=10000, n_start=5):
"""
Optimise acquisition function built on GP- model with learning dr
:param acqu_func: acquisition function
:param bounds: input space bounds
:param X_ob: observed input data
:param func_gradient: whether to use the acquisition function gradient in optimisation
:param gridSize: random grid size
:param n_start: the top n_start points in the random grid search from which we do gradient-based local optimisation
:return np.array([opt_location]): global optimum input
:return f_opt: global optimum
"""
# Turn the acquisition function to be - acqu_func for minimisation
target_func = lambda x: - acqu_func._compute_acq(x)
# Define a new function combingin the acquisition function and its derivative
def target_func_with_gradient(x):
acqu_f, dacqu_f = acqu_func._compute_acq_withGradients(x)
return -acqu_f, -dacqu_f
# Define bounds for the local optimisers based on the optimal dr
nchannel = acqu_func.model.nchannel
d = acqu_func.model.opt_dr
d_vector = int(acqu_func.model.opt_dr ** 2 * nchannel)
bounds = np.vstack([[-1, 1]] * d_vector)
# Project X_ob to optimal dr learnt
h_d = int(X_ob.shape[1] / acqu_func.model.nchannel)
X_ob_d_r = downsample_projection(acqu_func.model.dim_reduction, X_ob, int(d ** 2), h_d, nchannel=nchannel,
align_corners=True)
# Create grid for random search but split the grid into n_batches to avoid memory overflow
good_results_list = []
random_starts_candidates_list = []
n_batch = 5
gridSize_sub = int(gridSize / n_batch)
for x_grid_idx in range(n_batch):
Xgrid_sub = np.tile(bounds[:, 0], (gridSize_sub, 1)) + np.tile((bounds[:, 1] - bounds[:, 0]),
(gridSize_sub, 1)) * np.random.rand(gridSize_sub,
d_vector)
if x_grid_idx == 0:
Xgrid_sub = np.vstack((Xgrid_sub, X_ob_d_r))
results = target_func(Xgrid_sub)
top_candidates_sub = results.flatten().argsort()[:5] # give the smallest n_start values in the ascending order
random_starts_candidates = Xgrid_sub[top_candidates_sub]
good_results = results[top_candidates_sub]
random_starts_candidates_list.append(random_starts_candidates)
good_results_list.append(good_results)
# Find the top n_start candidates from random grid search to perform local optimisation
results = np.vstack(good_results_list)
X_random_starts = np.vstack(random_starts_candidates_list)
top_candidates_idx = results.flatten().argsort()[
:n_start] # give the smallest n_start values in the ascending order
random_starts = X_random_starts[top_candidates_idx]
f_min = results[top_candidates_idx[0]]
opt_location = random_starts[0]
# Perform multi-start gradient-based optimisation
for random_start in random_starts:
if func_gradient:
x, f_at_x, info = fmin_l_bfgs_b(target_func_with_gradient, random_start, bounds=bounds,
approx_grad=False, maxiter=5000)
else:
x, f_at_x, info = fmin_l_bfgs_b(target_func, random_start, bounds=bounds,
approx_grad=True, maxiter=5000)
if f_at_x < f_min:
f_min = f_at_x
opt_location = x
f_opt = -f_min
return np.array([opt_location]), f_opt | 8a59f9f3c4b7b55a4ae56da93eed1c9820363ef6 | 22,980 |
def get_path_from_pc_name(pc_name):
"""Find out path of a template
Parameters
----------
pc_name : string
Name of template.
Returns
-------
tplPath : string
Path of template
"""
tplPath = pc_name + '.json'
# change path to template if in subdir
for i in pcTplEnv.list_templates(filter_func=filter_func):
if i.split('/')[-1] == tplPath:
tplPath = i
return tplPath | f2ee20f9f8728d672bb0658e80f9f04f2c9f0c11 | 22,981 |
def eq(*, alpha=None, omega):
"""Define dyadic comparison function equal to.
Dyadic case:
3 = 2 3 4
0 1 0
"""
return int(alpha == omega) | 1f8d826711e9d24a3b05de5f42a99b36744f4f38 | 22,982 |
import calendar
def generate_days(year):
"""Generates all tuples (YYYY, MM, DD) of days in a year
"""
cal = calendar.Calendar()
days = []
for m in range(1,13):
days.extend(list(cal.itermonthdays3(year, m)))
days = [d for d in set(days) if d[0] == year]
days.sort()
return days | 6d87910572957d21c9d5df668dfb5f2d02627817 | 22,983 |
import requests
import json
def nounClassifier(word):
"""Classifies noun as actor o object
Parameters
----------
word : str
Lematized noun to be classified (case-insensitive).
"""
word = word.lower()
response_raw = requests.get(
f"{API_URL}senses/search?lemma={word}&&&partOfSpeech=noun&&&&&&"
)
response = json.loads(response_raw.content)
response = [
item for item in response["content"] if item["lemma"]["word"].lower() == word
]
if len(response) == 0:
return None
if any(
item["domain"]["name"][item["domain"]["name"].rfind("_") + 1 :] in ACTOR_DOMAINS
for item in response
):
return IGElement.ACTOR
else:
return IGElement.OBJECT | aef33226b956a0d7b9fcb8b1b751d5a11e9136c4 | 22,984 |
def svn_repos_post_lock_hook(*args):
"""svn_repos_post_lock_hook(svn_repos_t repos, apr_pool_t pool) -> char"""
return _repos.svn_repos_post_lock_hook(*args) | 56bdafc41fa76d2a4d2f5e6b213fa86e8ca9416b | 22,985 |
def libdmtx_function(fname, restype, *args):
"""Returns a foreign function exported by `libdmtx`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function.
"""
prototype = CFUNCTYPE(restype, *args)
return prototype((fname, load_libdmtx())) | ed5f39d435aae453a0aeb8855fc0a21e1db334b8 | 22,986 |
from typing import Any
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
control_unit: ControlUnit = hass.data[DOMAIN][config_entry.entry_id]
diag: dict[str, Any] = {
"config": async_redact_data(config_entry.as_dict(), REDACT_CONFIG)
}
platform_stats, device_types = control_unit.async_get_entity_stats()
diag["platform_stats"] = platform_stats
diag["devices"] = device_types
return diag | ac4495e49745f9211a32cfaf3a15c03203282e50 | 22,987 |
async def instance_set_name_inurl(cluster_id: str, vm_uuid: str, new_name: str):
""" Set Instance (VM/Template) Name """
try:
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
except KeyError as key_error:
raise HTTPException(
status_code=400, detail=f"{key_error} is not a valid path"
)
_vm: VM = VM.get_by_uuid(session=session, uuid=vm_uuid)
if _vm is not None:
ret = dict(success=_vm.set_name(new_name))
else:
ret = dict(success=False)
session.xenapi.session.logout()
return ret
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror) | 3c942af4a57dad0beaef6f629d4b7900421eadbd | 22,988 |
def key_released(key):
"""
Takes a key, that's either a keycode or a character,
and says if it was released this frame.
"""
keycode = _to_keycode(key)
return (keycode not in current_frame_held_buttons) and \
(keycode in last_frame_held_buttons) | 4cb66ba924aae909f20db59eb2aef0ccc77d2860 | 22,989 |
def trim_snakes_old(lcs,ref,Nb,Ne,dat,Mb,Me):
"""Previously found matches can cause problems if they are not optimal.
In such a case sticking with the matches as found prevents subsequent
more advanced diff routines from recovering from an early sub-optimal
choice. To counter this all snakes and pseudo-snakes are trimmed down
such that they involve whole lines only.
The process is:
1. Merge subsequent snakes to build a list in which each pair of
snakes is separated by a non-empty section of mismatching tokens.
2. Trim each snake by increasing the starting point to the first token
on the next line, and decreasing the end point to the last token on
the previous line. If as a result the begin token exceeds the end
token then eliminate the snake.
The routine returns the revised snake list.
"""
#
# Collapse the snake list by merging adjacent snakes.
#
nsnake = len(lcs)
isnake = 0
if nsnake > 0:
lcs_tmp = []
(xi1,yj1,xi2,yj2,itype) = lcs[isnake]
isnake = isnake + 1
while (isnake < nsnake):
(xi3,yj3,xi4,yj4,itype) = lcs[isnake]
isnake = isnake + 1
if (xi2+1 == xi3 and yj2+1 == yj3):
#
# This snake continues from the previous one so merge the two.
#
xi2 = xi4
yj2 = yj4
#
else:
#
# This snake is separated from the previous one so store the
# previous one and restart the merge procedure.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
xi1 = xi3
yj1 = yj3
xi2 = xi4
yj2 = yj4
#
# Store the last snake.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
lcs = lcs_tmp
#
# Trim the snakes to precisely matching lines.
#
nsnake = len(lcs)
isnake = 0
lcs_tmp = []
txi = 0
tyj = 0
while (isnake < nsnake):
(xi1,yj1,xi2,yj2,itype) = lcs[isnake]
isnake = isnake + 1
#
# Move the starting point to the first token on the next line unless
# the token is the first token on the current line.
#
lxi1 = toldiff_tokens.tokenno2lineno(dat,xi1)
txi1 = toldiff_tokens.lineno2tokenno(dat,lxi1)
lyj1 = toldiff_tokens.tokenno2lineno(ref,yj1)
tyj1 = toldiff_tokens.lineno2tokenno(ref,lyj1)
if txi1 != xi1 or tyj1 != yj1:
xi1 = toldiff_tokens.lineno2tokenno(dat,lxi1+1)
yj1 = toldiff_tokens.lineno2tokenno(ref,lyj1+1)
#
# Move the end point to the last token on the previous line unless
# the token is the last token on the current line.
#
lxi2 = toldiff_tokens.tokenno2lineno(dat,xi2)
txi2 = toldiff_tokens.lineno2tokenno(dat,lxi2+1)-1
lyj2 = toldiff_tokens.tokenno2lineno(ref,yj2)
tyj2 = toldiff_tokens.lineno2tokenno(ref,lyj2+1)-1
if txi2 != xi2 or tyj2 != yj2:
xi2 = toldiff_tokens.lineno2tokenno(dat,lxi2)-1
yj2 = toldiff_tokens.lineno2tokenno(ref,lyj2)-1
if xi1-1 <= xi2 and yj1-1 <= yj2 and (xi1 > txi or yj1 > tyj):
#
# There is a non-empty snake remaining so store it.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
txi = max(xi1,xi2)
tyj = max(yj1,yj2)
#
lcs = lcs_tmp
return lcs | 0bf078ca2198bcf0c25e8a925bd0c096cafa4797 | 22,990 |
import asyncio
async def start(actual_coroutine):
"""
Start the testing coroutine and wait 1 second for it to complete.
:raises asyncio.CancelledError when the coroutine fails to finish its work
in 1 second.
:returns: the return value of the actual_coroutine.
:rtype: Any
"""
try:
return await asyncio.wait_for(actual_coroutine, 2)
except asyncio.CancelledError:
pass | 26e3737091ca798dbf8c0f6f2a18a1de4b0ec42b | 22,991 |
def get_node(path):
"""Returns a :class:`Node` instance at ``path`` (relative to the current site) or ``None``."""
try:
current_site = Site.objects.get_current()
except Site.DoesNotExist:
current_site = None
trailing_slash = False
if path[-1] == '/':
trailing_slash = True
try:
node, subpath = Node.objects.get_with_path(path, root=getattr(current_site, 'root_node', None), absolute_result=False)
except Node.DoesNotExist:
return None
if subpath is None:
subpath = ""
subpath = "/" + subpath
if trailing_slash and subpath[-1] != "/":
subpath += "/"
node._path = path
node._subpath = subpath
return node | 516460d05df4139ce5354f2c3ef5cf948d4b8213 | 22,992 |
from datetime import datetime
def new_post(blog_id, username, password, post, publish):
"""
metaWeblog.newPost(blog_id, username, password, post, publish)
=> post_id
"""
user = authenticate(username, password, 'zinnia.add_entry')
if post.get('dateCreated'):
creation_date = datetime.strptime(
post['dateCreated'].value[:18], '%Y-%m-%dT%H:%M:%S')
if settings.USE_TZ:
creation_date = timezone.make_aware(
creation_date, timezone.utc)
else:
creation_date = timezone.now()
entry_dict = {'title': post['title'],
'content': post['description'],
'excerpt': post.get('mt_excerpt', ''),
'publication_date': creation_date,
'creation_date': creation_date,
'last_update': creation_date,
'comment_enabled': post.get('mt_allow_comments', 1) == 1,
'pingback_enabled': post.get('mt_allow_pings', 1) == 1,
'trackback_enabled': post.get('mt_allow_pings', 1) == 1,
'featured': post.get('sticky', 0) == 1,
'tags': 'mt_keywords' in post and post['mt_keywords'] or '',
'slug': 'wp_slug' in post and post['wp_slug'] or slugify(
post['title']),
'password': post.get('wp_password', '')}
if user.has_perm('zinnia.can_change_status'):
entry_dict['status'] = publish and PUBLISHED or DRAFT
entry = Entry.objects.create(**entry_dict)
author = user
if 'wp_author_id' in post and user.has_perm('zinnia.can_change_author'):
if int(post['wp_author_id']) != user.pk:
author = Author.objects.get(pk=post['wp_author_id'])
entry.authors.add(author)
entry.sites.add(Site.objects.get_current())
if 'categories' in post:
entry.categories.add(*[
Category.objects.get_or_create(
title=cat, slug=slugify(cat))[0]
for cat in post['categories']])
return entry.pk | 4bdd8464458bef5797854776222a178e891d6346 | 22,993 |
def upload(userid, filedata):
"""
Creates a preview-size copy of an uploaded image file for a new avatar
selection file.
"""
if filedata:
media_item = media.make_resized_media_item(filedata, (600, 500), 'FileType')
orm.UserMediaLink.make_or_replace_link(userid, 'avatar-source', media_item)
else:
orm.UserMediaLink.clear_link(userid, 'avatar')
return bool(filedata) | 3ffd3d5a26c35f20e5a3885ca597d8c0182ebc8a | 22,994 |
import os
def get_account_2_json(usr, pwd):
"""
将从环境变量获取的账号密码拼接成json
:return: 字典
"""
username = os.popen("env | grep {}".format(usr))
password = os.popen("env | grep {}".format(pwd))
username_list = username.read().split()
password_list = password.read().split()
username_dict = str2dict(";".join(username_list))
password_dict = str2dict(";".join(password_list))
account_dict = {}
for usr_key, pwd_key in zip(username_dict,password_dict):
account_dict[usr_key] = {"email": username_dict[usr_key], "password": password_dict[pwd_key]}
return account_dict | 9eabc688a5da441cb6701eb62b93133ce9f73c56 | 22,995 |
def time_dconv_bn_nolinear(nb_filter, nb_row, nb_col,
stride=(2, 2), activation="relu"):
"""
Create time convolutional Batch Norm layer in decoders.
Parameters:
---------
filter_num : int
number of filters to use in convolution layer.
row_num : int
number of row
col_num : int
number of column
stride : int
size of stride
Returns:
---------
dconv_bn
"""
def _dconv_bn(x):
x = TimeDistributed(UnPooling2D(size=stride))(x)
x = TimeDistributed(ReflectionPadding2D(padding=(int(nb_row/2),
int(nb_col/2))))(x)
x = TimeDistributed(Conv2D(nb_filter, (nb_row, nb_col),
padding='valid',
kernel_regularizer=regularizers.
l2(reg_weights)))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Activation(activation))(x)
return x
return _dconv_bn | 016082d3c09388a4ff9f8add5cb84a63a65775e8 | 22,996 |
import random
def ___generate_random_row_major_GM___(i, j, s=None):
"""Make a random row major sparse matrix of shape (i,j) at sparsity=s.
:param i:
:param j:
:param s:
:return:
"""
if s is None:
s = random.uniform(0, 0.1)
if s < 0.02: s = 0
if rAnk == mAster_rank:
random_list = random.sample(range(0, i), i)
distribution = [i // sIze + (1 if x < i % sIze else 0) for x in range(sIze)]
no_empty_rows = list()
_ = 0
for r in range(sIze):
no_empty_rows.append(random_list[_:_+distribution[r]])
_ += distribution[r]
else:
no_empty_rows = None
no_empty_rows = cOmm.scatter(no_empty_rows, root=mAster_rank)
_ = spspa.random(i, j, s, format='csr')
A = spspa.lil_matrix((i,j))
A[no_empty_rows,:] = _[no_empty_rows,:]
A = A.tocsr()
A = GlobalMatrix(A)
A.IS.regularly_distributed = 'row'
A.___PRIVATE_self_regularity_checker___()
return A | 59eca064d240dc03fdbdc8d1807dbbbb996239d4 | 22,997 |
from typing import Tuple
from typing import Any
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4') | 0766bd7189c5e0cd383d94944dacecd5fbef1320 | 22,998 |
import pathlib
from typing import Optional
def find_path(
start_path: pathlib.Path = pathlib.Path("."),
) -> Optional[pathlib.Path]:
"""Traverse the file system looking for the config file .craftier.ini.
It will stop earlier at the user's home directory, if it encounters a Git or
Mercurial directory, or if it traversed too deep.
"""
home = pathlib.Path.home()
path = start_path.resolve()
for path in [path, *path.parents][:_MAX_SEARCH_DEPTH]:
config_file = path / CONFIG_FILENAME
if config_file.is_file():
return config_file
for stop_dir in _STOP_SEARCH_ON_DIRS:
if (path / stop_dir).is_dir():
return None
if path == home:
return None
return None | 00fbfabc8e0c6dd3c23b190e6278f70af566b25f | 22,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.