content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_emtpy_lines():
"""Empty lines are skipped."""
TEST = """
FOO=BAR
BAR=BAZ
"""
with tempfile.NamedTemporaryFile('w', delete=False) as f:
f.write(TEST)
env = core.read_dotenv(f.name)
assert len(env) == 2
assert env['FOO'] == 'BAR'
assert env['BAR'] == 'BAZ'
| 10,700
|
def magerr2Ivar(flux, magErr):
"""
Estimate the inverse variance given flux and magnitude error.
The reason for this is that we need to correct the magnitude or
flux for Galactic extinction.
Parameters
----------
flux : scalar or array of float
Flux of the obejct.
magErr : scalar or array of float
Error of magnitude measurements.
"""
fluxErr = flux * ((10.0 ** (magErr/2.5)) - 1.0)
return 1.0 / (fluxErr ** 2.0)
| 10,701
|
def create_policy_work_item_linking(repository_id, branch,
blocking, enabled,
branch_match_type='exact',
organization=None, project=None, detect=None):
"""Create work item linking policy.
"""
organization, project = resolve_instance_and_project(
detect=detect, organization=organization, project=project)
policy_client = get_policy_client(organization)
configuration = create_configuration_object(repository_id, branch, blocking, enabled,
'40e92b44-2fe1-4dd6-b3d8-74a9c21d0c6e', [], [], branch_match_type)
return policy_client.create_policy_configuration(configuration=configuration, project=project)
| 10,702
|
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
# XXX: currently scaled to variance=n_samples
if normalize:
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
| 10,703
|
async def get_ipv4_internet_reachability(host, port, timeout):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
socket.close()
return True
except socket.error as ex:
socket.close()
return False
| 10,704
|
def cov_pen(h_j, h_no_j):
"""
Personal implementation of covariance matrix with penalization.
:param h_j:
:param h_no_j:
:return:
"""
final_dim = h_j.shape[1]
cov_matrix = np.empty((final_dim, final_dim))
for row in range(final_dim):
for column in range(final_dim):
h_d = h_j[:, row]
h_d_no_j = h_no_j[:, row]
a = h_d - np.mean(h_d)
if row == column: # Diag
value = np.dot(a.T, a) + np.dot(h_d_no_j.T, h_d_no_j)
else:
h_i = h_j[:, column]
h_i_no_j = h_no_j[:, column]
b = h_i - np.mean(h_i)
value = np.dot(a.T, b) + np.dot(h_d_no_j.T, h_i_no_j)
cov_matrix[row, column] = value
return cov_matrix
| 10,705
|
def get_last_timestamp():
"""
获取当天23:59:59的时间戳
:return:
"""
# 获取明天0点的时间戳
future_timestamp = get_timestamp(-1)
# 明天0点的时间戳-1
last_timestamp = future_timestamp - 1
return last_timestamp
| 10,706
|
def crawl(folder: str, search: str, maxnum: int, num_threads: int, crawlers: [List[str]] = ['GOOGLE', 'BING', 'BAIDU']) -> Dict[str, str]:
"""Crawl web sites for images"""
print('(1) Crawling ...')
# prepare folders
os.makedirs(folder, exist_ok=True)
sources = {}
for c in crawlers:
print(f' -> {c}', end='', flush=True)
run_command = lambda : crawl_run(c, folder, search, maxnum, num_threads)
runtime = timeit.timeit(run_command, 'gc.enable()', number=1)# / float((10**6))
print(f' ({runtime:.2f} sec)')
return {k: v for k, v in CustomDownloader.registry.items() if k is not None}
| 10,707
|
def _get_all_subclasses(typ, # type: Type[T]
recursive=True, # type: bool
_memo=None # type: Set[Type[Any]]
):
# type: (...) -> Iterable[Type[T]]
"""
Returns all subclasses of `typ`
Warning this does not support generic types.
See parsyfiles.get_all_subclasses() if one day generic types are needed (commented lines below)
:param typ:
:param recursive: a boolean indicating whether recursion is needed
:param _memo: internal variable used in recursion to avoid exploring subclasses that were already explored
:return:
"""
_memo = _memo or set()
# if we have collected the subclasses for this already, return
if typ in _memo:
return []
# else remember that we have collected them, and collect them
_memo.add(typ)
# if is_generic_type(typ):
# # We now use get_origin() to also find all the concrete subclasses in case the desired type is a generic
# sub_list = get_origin(typ).__subclasses__()
# else:
sub_list = typ.__subclasses__()
# recurse
result = [] # type: List[Type[T]]
for t in sub_list:
# only keep the origins in the list
# to = get_origin(t) or t
to = t
# noinspection PyBroadException
try:
if to is not typ and to not in result and issubclass(to, typ): # is_subtype(to, typ, bound_typevars={}):
result.append(to)
except Exception:
# catching an error with is_subtype(Dict, Dict[str, int], bound_typevars={})
pass
# recurse
if recursive:
for typpp in sub_list:
for t in _get_all_subclasses(typpp, recursive=True, _memo=_memo):
# unfortunately we have to check 't not in sub_list' because with generics strange things happen
# also is_subtype returns false when the parent is a generic
if t not in sub_list and issubclass(t, typ): # is_subtype(t, typ, bound_typevars={}):
result.append(t)
return result
| 10,708
|
def hdf5_load_frequencies(model, group, encoding):
"""loads the frequencies"""
keys = list(group.keys())
keys.remove('keys')
#spc_ids = _cast(group['keys'])
for freq_id in keys:
ifreq_id = int(freq_id)
cards_group = group[freq_id]
for card_type in cards_group.keys():
sub_group = cards_group[card_type]
#if card_type == 'FREQ':
#mid = _cast(sub_group['mid'])
#else:
fkeys, values = load_cards_from_keys_values(
'frequencies/%s/%s' % (freq_id, card_type),
sub_group, encoding, model.log)
_put_keys_values_into_dict_list(model, 'frequencies', ifreq_id, fkeys, values)
model.card_count[card_type] = len(fkeys)
| 10,709
|
def conditions_summary(conditions):
"""
Return a dict of consumer-level observations, say, for display on a
smart mirror or tablet.
"""
keys = ['timestamp', 'dewpoint', 'barometricPressure', 'windDirection',
'windSpeed', 'windGust', 'precipitationLastHour', 'temperature',
'relativeHumidity', 'heatIndex']
summary = dict()
for key in keys:
try:
summary[key] = conditions['properties'][key]
except Exception as exc:
summary[key] = 'none'
logging.error('Error trying to read summary for key {0}: {1}', key, exc)
return summary
| 10,710
|
def InsertOrganisation(cur, con, entity_name: str = "Organisation") -> int:
""" Inserts a new Organisation into the database """
# Get information about the video game
print(f"Enter new {entity_name}'s details:")
row = {}
row["Name"] = input(f"Enter the name of the {entity_name}: ") or None
row["Headquarters"] = input(
f"Enter the headquarters of {entity_name} (Optional): ") or None
row["Founded"] = input(
f"Enter the date when the {entity_name} was founded in YYYY-MM-DD format: ") or None
row["Earnings"] = input(
f"Enter earnings of {entity_name} in USD (Optional): ") or 0
# Query to be executed
query = """INSERT INTO Organisations (Name, Headquarters,
Founded, Earnings)
VALUES (%(Name)s, %(Headquarters)s,
%(Founded)s, %(Earnings)s)
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, row)
# Get ID of last inserted organisation
cur.execute("SELECT LAST_INSERT_ID() AS OrganisationID")
return cur.fetchone()["OrganisationID"]
| 10,711
|
def main():
"""Entry point for qchess CLI"""
print('Hello, world!')
| 10,712
|
def _rgb_to_hsv(rgbs):
"""Convert Nx3 or Nx4 rgb to hsv"""
rgbs, n_dim = _check_color_dim(rgbs)
hsvs = list()
for rgb in rgbs:
rgb = rgb[:3] # don't use alpha here
idx = np.argmax(rgb)
val = rgb[idx]
c = val - np.min(rgb)
if c == 0:
hue = 0
sat = 0
else:
if idx == 0: # R == max
hue = ((rgb[1] - rgb[2]) / c) % 6
elif idx == 1: # G == max
hue = (rgb[2] - rgb[0]) / c + 2
else: # B == max
hue = (rgb[0] - rgb[1]) / c + 4
hue *= 60
sat = c / val
hsv = [hue, sat, val]
hsvs.append(hsv)
hsvs = np.array(hsvs, dtype=np.float32)
if n_dim == 4:
hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1)
return hsvs
| 10,713
|
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
#print("The {}-th Fibonacci number is {}".format(args.n, trade_for_(args.n)))
_logger.info("Script ends here")
| 10,714
|
def main():
"""Call script functions, measure script run-time, query user"""
start_time = time.time()
InformUser()
SecDegEqn()
end_time = time.time()
print("\nSpeed_check: script run time is {} seconds".format(end_time - start_time))
query = input("\nWould you like to run the script again? \n")
if query == "yes":
print("\nRunning the script again \n")
main()
else:
print("\nOk, the script has ended \n")
| 10,715
|
def find_correlation(convergence_data, lens_data, plot_correlation=False, plot_radii=False, impact=False, key=None):
"""Finds the value of the slope for plotting residuals against convergence. Magnitude of slope and error
quantify correlation between the two.
Inputs:
conv -- convergence.
mu_diff -- residuals.
"""
correlations = []
correlation_errs = []
for cone_radius in RADII[29::2]:
SNe_data = find_mu_diff(lens_data, cone_radius=cone_radius, impact=impact, key=key)
redshift_cut = np.logical_or(SNe_data['z'] > 0.2, SNe_data['z'] > 0.4)
mu_diff = SNe_data["mu_diff"][redshift_cut]
if impact:
if key is None:
conv = np.array(convergence_data[f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut]
else:
conv = np.array(convergence_data[key][f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut]
else:
conv = np.array(convergence_data[f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut]
conv_rank = rankdata(conv)
mu_rank = rankdata(mu_diff)
diff = np.abs(conv_rank - mu_rank)
rho = 1 - 6 / (len(conv) * (len(conv) ** 2 - 1)) * np.sum(diff ** 2)
rho_err = np.sqrt((1 - rho ** 2) / (len(conv) - 1))
correlations.append(rho)
correlation_errs.append(rho_err)
if plot_correlation:
edges = np.linspace(-0.0065, 0.011, 6)
bins = (edges[1:] + edges[:-1]) / 2
mean_dmu = []
standard_error = []
for bin in bins:
dmus = []
for kappa, dmu in zip(conv, mu_diff):
if bin - 0.007 / 4 < kappa <= bin + 0.0007 / 4:
dmus.append(dmu)
mean_dmu.append(np.mean(dmus))
standard_error.append(np.std(dmus) / np.sqrt(len(dmus)))
plt.plot([min(conv), max(conv)], [0, 0], color=grey, linestyle='--')
plt.plot(conv, mu_diff, linestyle='', marker='o', markersize=2, color=colours[0])
plt.errorbar(bins, mean_dmu, standard_error, marker='s', color='r', markersize=3, capsize=3, linestyle='')
plt.xlabel('$\kappa$')
plt.ylabel('$\Delta\mu$')
# plt.xlim([-0.008, 0.011])
# plt.legend(frameon=0, loc='lower right')
# plt.ylim([-0.3, 0.3])
plt.text(0.0038, -0.19, f'$\\rho$ = {round(rho, 3)} $\pm$ {round(rho_err, 3)}', fontsize=16)
# print([convergence_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))])
# print([mu_diff_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))])
# print([SNmu_err_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))])
plt.show()
u_err = [correlations[i] + correlation_errs[i] for i in range(len(correlations))]
d_err = [correlations[i] - correlation_errs[i] for i in range(len(correlations))]
smooth_corr = savgol_filter([correlations[i] for i in range(len(correlations))], 11, 4)
smooth_u_err = savgol_filter(u_err, 11, 4)
smooth_d_err = savgol_filter(d_err, 11, 4)
if plot_radii:
plt.plot([6, 30], [0, 0], color=grey, linestyle='--')
plt.plot(RADII[29::2], smooth_corr, color=colours[0])
plt.plot(RADII[29::2], [correlations[i] for i in range(len(correlations))], marker='x', color=colours[1],
linestyle='')
plt.fill_between(RADII[29::2], smooth_u_err, smooth_d_err, color=colours[0], alpha=0.4)
plt.xlabel('Cone Radius (arcmin)')
plt.ylabel("Spearman's Rank Coefficient")
# plt.xlim([5, 30.1])
# plt.ylim([-0.18, 0.02])
plt.gca().invert_yaxis()
plt.show()
return [correlations, smooth_corr, smooth_u_err, smooth_d_err, np.array(u_err) - np.array(correlations)]
| 10,716
|
def ELCE2_null_estimator(p_err, K, rng):
"""
Compute the ELCE^2_u for one bootstrap realization.
Parameters
----------
p_err: numpy-array
one-dimensional probability error vector.
K: numpy-array
evaluated kernel function.
rng: type(np.random.RandomState())
a numpy random function
return
------
float: an unbiased estimate of ELCE^2_u
"""
idx = rng.permutation(len(p_err))
return ELCE2_estimator(K, p_err[idx])
| 10,717
|
def test_check_generic_constraint_rhs_calculation(casefile):
"""Check NEMDE input constraint RHS matches NEMDE solution RHS"""
constraints = (casefile.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('GenericConstraintCollection')
.get('GenericConstraint'))
for i in constraints:
comparison = calculations.check_generic_constraint_rhs_calculation(
data=casefile, constraint_id=i['@ConstraintID'], intervention='0')
assert comparison['abs_difference'] < 0.1
| 10,718
|
def select_from(stdscr, x, y, value, slist, redraw):
"""
Allows user to select from a list of valid options
:param stdscr: The current screen
:param x: The start x position to begin printing
:param y: The start y position to begin pritning
:param value: The current value chosen
:param slist: A list of values to choose from
:return: A value within :param list
"""
k = 0
padwidth = 100
pad = curses.newpad(1, padwidth)
height, width = stdscr.getmaxyx()
try:
idx = slist.index(value)
except ValueError:
stdscr.clear()
stdscr.refresh()
curses_safe_addstr(stdscr, 0, 0, str(value))
curses_safe_addstr(stdscr, 1, 0, str(type(value)))
curses_safe_addstr(stdscr, 2, 0, ','.join(map(str, slist)))
curses_safe_addstr(stdscr, 3, 0, ','.join(
list(map(str, map(type, slist)))))
stdscr.getch()
stdscr.clear()
stdscr.refresh()
draw_status_bar(stdscr,
"Press 'q' to exit and 'UP' or 'DOWN' to select a value")
while k != KEY_ENTER and k != ord('q'):
pad.clear()
value = str(slist[idx])
if len(value) + x >= width:
value = value[:width - x - 1]
if len(value) > padwidth:
padwidth = len(value) * 2
pad = curses.newpad(1, padwidth)
pad.addstr(0, 0, str(value))
stdscr.move(y, x + len(str(value)))
pad.refresh(0, 0, y, x, y, width - x)
k = stdscr.getch()
if k == curses.KEY_UP and idx > 0:
idx -= 1
elif k == curses.KEY_DOWN and idx < len(slist) - 1:
idx += 1
elif k == curses.KEY_RESIZE:
stdscr.erase()
height, width = stdscr.getmaxyx()
redraw(stdscr)
draw_status_bar(
stdscr,
"Press 'q' to exit and 'UP' or 'DOWN' to select a value")
return slist[idx]
| 10,719
|
def show_errorbox_exception(msg):
"""Show both an error box and raise an Exception"""
show_errorbox(msg)
raise Exception(msg)
| 10,720
|
def describe(r):
"""Return a dictionary with various statistics computed on r:
mean, variance, skew, kurtosis, entropy, median.
"""
stats = {}
stats['mean'] = r.mean()
stats['variance'] = r.var()
stats['skew'] = skew(r)
stats['kurtosis'] = kurtosis(r)
stats['median'] = np.median(r)
stats['entropy'] = entropy(r)
stats['mode'] = mode(r)
return stats
| 10,721
|
def test_s3_write_output_data(mp_s3_tmpdir):
"""Write and read output."""
output_params = dict(
grid="geodetic", format="PNG", path=mp_s3_tmpdir, pixelbuffer=0, metatiling=1
)
output = png.OutputDataWriter(output_params)
assert output.path == mp_s3_tmpdir
assert output.file_extension == ".png"
tp = BufferedTilePyramid("geodetic")
tile = tp.tile(5, 5, 5)
# get_path
assert output.get_path(tile) == os.path.join(
*[mp_s3_tmpdir, "5", "5", "5" + ".png"]
)
# profile
assert isinstance(output.profile(tile), dict)
# write
data = np.ones((1,) + tile.shape) * 128
output.write(tile, data)
# tiles_exist
assert output.tiles_exist(tile)
# read
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert data.shape[0] == 4
assert not data[0].mask.any()
# empty
empty = output.empty(tile)
assert isinstance(empty, ma.MaskedArray)
assert not empty.any()
| 10,722
|
def value_iteration(env,maxiter):
"""
Just like policy_iteration, this employs a similar approach.
Steps (to iterate over):
1) Find your optimum state_value_function, V(s).
2) Keep iterating until convergence
3) Calculate your optimized policy
Outputs:
- Your final state_value_function, V(s)
- Optimal policy 'pi'
- Average reward vector (see note below)
- List of all value functions for all iterations
NOTE: In order to produce the graph showing average reward over each
iteration, the policy was calculated at each iteration. This is not
normally done for Value Iteration. This will slow down the computation
time for Value iteration. To return to traditional value iteration,
comment out the respective lines and remove the appropriate output
"""
# intialize the state-Value function
V = np.zeros(nS)
V_hm = np.copy(V)
V_hm.resize((1,V_hm.size))
V_hm = V_hm.tolist()
# intialize a random policy. Comment out for traditional Value_Iteration
policy = np.random.randint(0, 4, nS)
avg_r_VI_mat = []
n_episode = 100
# Iterate over your optimized function, breaking if not changing or difference < tolerance.
for i in range(maxiter):
prev_V = np.copy(V)
# evaluate given policy
difference, V = Optimum_V(env, prev_V, maxiter, gamma)
# improve policy. Comment out to return to traditional Value Iteration
policy = policy_update(env, policy, V, gamma)
#Play episodes based on the current policy. Comment out to return to traditional Value Iteration
wins_VI, total_reward_VI, avg_reward_VI = play_episodes(env, n_episode, policy, random = False)
avg_r_VI_mat.append(avg_reward_VI)
# save value function to list for animation
V_tmp = np.copy(V)
V_tmp = V_tmp.tolist()
V_hm.append(V_tmp)
# if State Value function has not changed over 10 iterations, it has converged.
if i % 10 == 0:
# if values of 'V' not changing after one iteration
if (np.all(np.isclose(V, prev_V))):
print("")
print('No Changes for 10 iterations. Value converged at iteration %d' %(i+1))
break
elif difference < tol:
print('Tolerance reached. Value converged at iteration %d' %(i+1))
break
# Initialize Optimal Policy
optimal_policy = np.zeros(nS, dtype = 'int8')
# Update your optimal policy based on optimal value function 'V'
optimal_policy = policy_update(env, optimal_policy, V, gamma)
return V, optimal_policy, avg_r_VI_mat, V_hm
| 10,723
|
def is_excluded(branch_name):
"""
We may want to explicitly exclude some BRANCHES from the list
of BRANCHES to be merged, check if the branch name supplied
is excluded if yes then do not perform merging into it.
Args:
branch_name: The branch to check if to be incorporated
in branching or not.
Retruns:
True if branch should be excluded, in this case no merges
will be performed into this branch, otherwise False.
"""
return branch_name in BRANCHES_TO_EXCLUDE
| 10,724
|
def configure_logging(enable_py_logger, level=logging.ERROR):
"""
Configure libyang logging behaviour.
:arg bool enable_py_logger:
If False, configure libyang to store the errors in the context until
they are consumed when Context.error() is called. This is the default
behaviour.
If True, libyang log messages will be sent to the python 'libyang'
logger and will be processed according to the python logging
configuration. Note that by default, the 'libyang' python logger is
created with a NullHandler() which means that all messages are lost
until another handler is configured for that logger.
:arg int level:
Python logging level. By default only ERROR messages are stored/logged.
"""
for ly_lvl, py_lvl in LOG_LEVELS.items():
if py_lvl == level:
lib.ly_verb(ly_lvl)
break
if enable_py_logger:
lib.ly_log_options(lib.LY_LOLOG |lib.LY_LOSTORE)
lib.ly_set_log_clb(lib.lypy_log_cb, True)
else:
lib.ly_log_options(lib.LY_LOSTORE)
lib.ly_set_log_clb(ffi.NULL, False)
| 10,725
|
def test_RNVPVelocityEGCL():
"""
test `RNVPVelocityEGCL` in the aperiodic case. we make sure log_s scales are invariant to rotation/translations while translation elements are
equivariant
"""
render_RNVPVelocityEGCL()
| 10,726
|
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_without_weight_transform(outs)
| 10,727
|
def preprocess_set_ica_comp_fif_to_ts(fif_file, subject_id, n_comp_exclude,
is_sensor_space):
"""Preprocess ICA fif to ts."""
import os
import sys
import mne
from mne.preprocessing import read_ica
from nipype.utils.filemanip import split_filename as split_f
from ephypype.preproc import create_ts
subj_path, basename, ext = split_f(fif_file)
(data_path, sbj_name) = os.path.split(subj_path)
print(('*** SBJ %s' % subject_id + '***'))
# Read raw
current_dir = os.getcwd()
if os.path.exists(os.path.join(current_dir, '../ica',
basename + '_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_ica' + ext)
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_filt_ica' + ext)
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_dsamp_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_filt_dsamp_ica' + ext)
print(('*** raw_ica_file %s' % raw_ica_file + '***'))
raw = mne.io.read_raw_fif(raw_ica_file, preload=True)
# load ICA
if os.path.exists(os.path.join(current_dir, '../ica',
basename + '_ica_solution.fif')):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_ica_solution.fif')
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_ica_solution.fif')):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_filt_ica_solution.fif')
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + "_filt_dsamp_ica_solution."
"fif")):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_filt_dsamp_ica_solution.fif')
if os.path.exists(ica_sol_file) is False:
print(('$$$ Warning, no %s found' % ica_sol_file))
sys.exit()
else:
ica = read_ica(ica_sol_file)
print(('\n *** ica.exclude before set components= ', ica.exclude))
if subject_id in n_comp_exclude:
print(('*** ICA to be excluded for sbj %s ' % subject_id))
print((' ' + str(n_comp_exclude[subject_id]) + '***'))
session_dict = n_comp_exclude[subject_id]
session_names = list(session_dict.keys())
componentes = []
for s in session_names:
componentes = session_dict[s]
if len(componentes) == 0:
print('\n no ICA to be excluded \n')
else:
print(('\n *** ICA to be excluded for session %s ' % s +
' ' + str(componentes) + ' *** \n'))
ica.exclude = componentes
print(('\n *** ica.exclude after set components = ', ica.exclude))
# apply ICA to raw data
new_raw_ica_file = os.path.join(subj_path, basename + '_ica-raw.fif')
raw_ica = ica.apply(raw)
raw_ica.save(new_raw_ica_file, overwrite=True)
# save ICA solution
print(ica_sol_file)
ica.save(ica_sol_file)
(ts_file, channel_coords_file, channel_names_file,
raw.info['sfreq']) = create_ts(new_raw_ica_file)
if is_sensor_space:
return (ts_file, channel_coords_file, channel_names_file,
raw.info['sfreq'])
else:
return (raw_ica, channel_coords_file, channel_names_file,
raw.info['sfreq'])
| 10,728
|
def plot(df, columns, df_clean, df_outliers, plot_cols=4):
"""Plots the dataframe and marks the outliers by a red cross.
Parameters:
----------
columns : str
A string of columns which will be plotted.
df_clean : dataframe
Dataframe without outliers.
df_outliers : dataframe
Dataframe of outliers.
plot_cols : int, default=6
Determines how many columns the plots will form.
"""
plt.style.use('seaborn-white')
if plot_cols > len(columns) - 2:
u.log(u.yellow('ERROR: '), f"Can't use more than {len(columns) - 2} columns in one row.")
plot_cols = len(columns) - 2
# figure size = (width,height)
f1 = plt.figure(figsize=(30, len(columns) * 3))
total_plots = len(columns)
rows = total_plots - plot_cols
for idx, y in enumerate(columns):
idx += 1
ax1 = f1.add_subplot(rows, plot_cols, idx)
sns.regplot(x=df_clean.index,
y=y,
data=df_clean,
scatter=True,
fit_reg=False,
color='lightblue',
)
sns.regplot(x=df_outliers.index,
y=y,
data=df_outliers,
scatter=True,
fit_reg=False,
marker='x',
color='red',
)
| 10,729
|
def _phi(r: FloatTensorLike, order: int) -> FloatTensorLike:
"""Coordinate-wise nonlinearity used to define the order of the
interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op.
order: interpolation order.
Returns:
`phi_k` evaluated coordinate-wise on `r`, for `k = r`.
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with tf.name_scope("phi"):
if order == 1:
r = tf.maximum(r, EPSILON)
r = tf.sqrt(r)
return r
elif order == 2:
return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON))
elif order == 4:
return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON))
elif order % 2 == 0:
r = tf.maximum(r, EPSILON)
return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r)
else:
r = tf.maximum(r, EPSILON)
return tf.pow(r, 0.5 * order)
| 10,730
|
def mmodel():
"""Commands for the MONGOOSE meta study."""
pass
| 10,731
|
def linbin(n, nbin=None, nmin=None):
"""Given a number of points to bin and the number of approximately
equal-sized bins to generate, returns [nbin_out,{from,to}].
nbin_out may be smaller than nbin. The nmin argument specifies
the minimum number of points per bin, but it is not implemented yet.
nbin defaults to the square root of n if not specified."""
if not nbin: nbin = int(np.round(n**0.5))
tmp = np.arange(nbin+1)*n//nbin
return np.vstack((tmp[:-1],tmp[1:])).T
| 10,732
|
def get_soup(url):
"""Gets the soup of the given URL.
:param url: (str) URL the get the soup from.
:return: Soup of given URL.
"""
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'}
return BeautifulSoup(urllib_req.urlopen(urllib_req.Request(url, headers=header)), 'html.parser')
| 10,733
|
def msd_id_to_dirs(msd_id):
"""Given an MSD ID, generate the path prefix.
e.g. TRABCD12345678 -> A/B/C/TRABCD12345678"""
return op.join(msd_id[2], msd_id[3], msd_id[4], msd_id)
| 10,734
|
def get_data_path(sub_path):
"""Returns path to file in data folder."""
return join(_data_folder_path, sub_path)
| 10,735
|
def read_avro_bytes(URL, open_with, start_byte, length, header, nrows=None):
"""Pass a specific file/bytechunk and convert to dataframe with cyavro
Both a python dict version of the header, and the original bytes that
define it, are required. The bytes are prepended to the data, so that the
C avro reader can interpret them.
"""
with open_with(URL, 'rb') as f:
f.seek(start_byte)
if start_byte == 0:
header = read_header(f)
f.seek(header['header_size'])
data = header['head_bytes'] + f.read(length)
if nrows is None:
b = io.BytesIO(data)
header['blocks'] = []
scan_blocks(b, header, len(data))
nrows = sum(b['nrows'] for b in header['blocks'])
f = cyavro.AvroReader()
f.init_bytes(data)
df, arrs = empty(header['dtypes'].values(), nrows, cols=header['dtypes'])
f.init_reader()
f.init_buffers(10000)
for i in range(0, nrows, 10000):
d = f.read_chunk()
for c in d:
s = [f for f in header['schema']['fields'] if f['name'] == c][0]
if 'logicalType' in s:
df[c].values[i:i + 10000] = time_convert(d[c], s)
else:
df[c].values[i:i + 10000] = d[c]
return df
| 10,736
|
def get_launches(method="", **query):
"""Gets launches based on query strings
Gets launches based on query strings from
the API
Parameters
----------
method : str (optional)
the method used for the request
query : keyword args
keyword args based on the API query strings
Returns
-------
list
a list of the launches
"""
return _get("launches", method, query)
| 10,737
|
def save_lang_to_idx(lang_to_idx: dict, ex: Experiment):
"""Saves the lang_to_idx dict as an artifact
Arguments:
lang_to_idx {dict} -- The dict to save in a file
"""
tmpf = tempfile.NamedTemporaryFile(dir="", delete=False, suffix=".pkl")
pickle.dump(lang_to_idx, tmpf)
tmpf.flush()
ex.add_artifact(tmpf.name, "lang_to_idx.pkl")
tmpf.close()
os.unlink(tmpf.name)
| 10,738
|
def test_fileinrewriterstep_bare_encoding_substitutions():
"""Encoding works with substitutions for bare encoding."""
context = Context({
'enc': 'arbenc',
'root': {'in': 'inpathhere',
'encoding': '{enc}'}})
with patch('pypyr.config.config.default_encoding', 'arb'):
obj = FileInRewriterStep('blah.name', 'root', context)
assert obj.path_in == 'inpathhere'
assert not obj.path_out
assert obj.context == context
assert obj.logger.name == 'blah.name'
assert obj.encoding_in == 'arbenc'
assert obj.encoding_out == 'arbenc'
| 10,739
|
def test_main_load_config_not_present_but_needed(capsys):
"""Command ends indicating the return code to be used."""
cmd = create_command("cmdname", needs_config_=True)
with patch("charmcraft.main.COMMAND_GROUPS", [CommandGroup("title", [cmd])]):
retcode = main(["charmcraft", "cmdname", "--project-dir=/whatever"])
assert retcode == 1
out, err = capsys.readouterr()
assert not out
assert err == (
"The specified command needs a valid 'charmcraft.yaml' configuration file (in "
"the current directory or where specified with --project-dir option); see "
"the reference: https://discourse.charmhub.io/t/charmcraft-configuration/4138\n"
)
| 10,740
|
def xrandr_query():
"""
Returns all current available screen resolutions and refresh rate modes as a dictionary.
This method only works with installs X11.
"""
pattern_screens = r'([\w-]+)\s+connected\s+(primary|)?.+\n(\s+[x*+.\d\s]+\n)'
pattern_mode = r'^\s+(\d+)x(\d+)\s+([\d.]+)([ *+]{0,2})'
# xrandr query command
command = "xrandr --current"
output, error, exc = shell_execute(command)
# find screens
screens = re.findall(pattern_screens, output, re.MULTILINE)
# iter screens, find resolutions
for screen in screens:
for modeline in screen[2].split('\n'):
match = re.match(pattern_mode, modeline)
if match:
yield {'width': match.group(1),
'height': match.group(2),
'port': screen[0],
'rate': match.group(3),
'active': '*' in match.group(4),
'preferred': '+' in match.group(4)}
| 10,741
|
def state(obj):
"""Gets the UnitOfWork state of a mapped object"""
return obj.__ming__.state
| 10,742
|
def save_file_in_path(file_path, content):
"""Write the content in a file
"""
try:
with open(file_path, 'w', encoding="utf-8") as f:
f.write(content)
except Exception as err:
print(err)
return None
return file_path
| 10,743
|
def export_entity_for_model_and_options(request):
"""
Export entity list in a list of 'format' type.
@note EntityModelClass.export_list() must return a list of results.
User of the request is used to check for permissions.
"""
limit = int_arg(request.GET.get('limit', 100000))
app_label = request.GET['app_label']
validictory.validate(app_label, Entity.NAME_VALIDATOR)
model = request.GET['model']
validictory.validate(model, Entity.NAME_VALIDATOR)
columns = request.GET.getlist('columns[]', ['id'])
validictory.validate(model, COLUMNS_VALIDATOR)
file_format = request.GET['format']
validictory.validate(model, {"type": "string"})
content_type = ContentType.objects.get_by_natural_key(app_label, model)
entity_model = content_type.model_class()
sort_by = json.loads(request.GET.get('sort_by', '[]'))
if not len(sort_by) or sort_by[-1] not in ('id', '+id', '-id'):
order_by = sort_by + ['id']
else:
order_by = sort_by
if request.GET.get('search'):
search = json.loads(request.GET['search'])
else:
search = None
if request.GET.get('filters'):
filters = json.loads(request.GET['filters'])
else:
filters = None
export_list = getattr(entity_model, 'export_list')
if export_list and callable(export_list):
cursor = None
columns, items = export_list(columns, cursor, search, filters, order_by, limit, request.user)
else:
# nothing to export
columns, items = [], []
exporter = DataExporter(columns, items)
if file_format == 'csv':
data = exporter.export_data_as_csv()
elif file_format == 'xlsx':
data = exporter.export_data_as_xslx()
else:
raise SuspiciousOperation("Invalid format")
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S")
file_name = "%s-%s-%s" % (app_label, model, timestamp) + exporter.file_ext
response = StreamingHttpResponse(data, content_type=exporter.mime_type)
response['Content-Disposition'] = 'attachment; filename="' + file_name + '"'
response['Content-Length'] = exporter.size
return response
| 10,744
|
def table(self, name, cluster=None, node=None):
"""Create a table with given name and on specified cluster, if specified.
"""
if node is None:
node = self.context.node
try:
if cluster:
with Given(f"I create table {name}"):
node.query(f"DROP TABLE IF EXISTS {name}")
node.query(f"CREATE TABLE {name} ON CLUSTER {cluster} (a UInt64) ENGINE = Memory")
else:
with Given(f"I create table {name}"):
node.query(f"DROP TABLE IF EXISTS {name}")
node.query(f"CREATE TABLE {name} (a UInt64) ENGINE = Memory")
yield
finally:
if cluster:
with Finally(f"I delete table {name}"):
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {cluster}")
else:
with Finally(f"I delete role {name}"):
node.query(f"DROP ROLE IF EXISTS {name}")
| 10,745
|
def gen_instance_hv_map(ann, crop_shape):
"""Input annotation must be of original shape.
The map is calculated only for instances within the crop portion
but based on the original shape in original image.
Perform following operation:
Obtain the horizontal and vertical distance maps for each
nuclear instance.
"""
orig_ann = ann.copy() # instance ID map
fixed_ann = fix_mirror_padding(orig_ann)
# re-cropping with fixed instance id map
crop_ann = cropping_center(fixed_ann, crop_shape)
# TODO: deal with 1 label warning
crop_ann = morph.remove_small_objects(crop_ann, min_size=30)
x_map = np.zeros(orig_ann.shape[:2], dtype=np.float32)
y_map = np.zeros(orig_ann.shape[:2], dtype=np.float32)
inst_list = list(np.unique(crop_ann))
inst_list.remove(0) # 0 is background
for inst_id in inst_list:
inst_map = np.array(fixed_ann == inst_id, np.uint8)
inst_box = get_bounding_box(inst_map)
# expand the box by 2px
# Because we first pad the ann at line 207, the bboxes
# will remain valid after expansion
inst_box[0] -= 2
inst_box[2] -= 2
inst_box[1] += 2
inst_box[3] += 2
inst_map = inst_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
if inst_map.shape[0] < 2 or inst_map.shape[1] < 2:
continue
# instance center of mass, rounded to nearest pixel
inst_com = list(measurements.center_of_mass(inst_map))
inst_com[0] = int(inst_com[0] + 0.5)
inst_com[1] = int(inst_com[1] + 0.5)
inst_x_range = np.arange(1, inst_map.shape[1] + 1)
inst_y_range = np.arange(1, inst_map.shape[0] + 1)
# shifting center of pixels grid to instance center of mass
inst_x_range -= inst_com[1]
inst_y_range -= inst_com[0]
inst_x, inst_y = np.meshgrid(inst_x_range, inst_y_range)
# remove coord outside of instance
inst_x[inst_map == 0] = 0
inst_y[inst_map == 0] = 0
inst_x = inst_x.astype("float32")
inst_y = inst_y.astype("float32")
# normalize min into -1 scale
if np.min(inst_x) < 0:
inst_x[inst_x < 0] /= -np.amin(inst_x[inst_x < 0])
if np.min(inst_y) < 0:
inst_y[inst_y < 0] /= -np.amin(inst_y[inst_y < 0])
# normalize max into +1 scale
if np.max(inst_x) > 0:
inst_x[inst_x > 0] /= np.amax(inst_x[inst_x > 0])
if np.max(inst_y) > 0:
inst_y[inst_y > 0] /= np.amax(inst_y[inst_y > 0])
####
x_map_box = x_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
x_map_box[inst_map > 0] = inst_x[inst_map > 0]
y_map_box = y_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
y_map_box[inst_map > 0] = inst_y[inst_map > 0]
hv_map = np.dstack([x_map, y_map])
return x_map, y_map, hv_map
| 10,746
|
def jaccard_loss(true, logits, eps=1e-7):
"""Computes the Jaccard loss, a.k.a the IoU loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the jaccard loss so we
return the negated jaccard loss.
Args:
true: a tensor of shape [B, H, W] or [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
jacc_loss: the Jaccard loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
union = cardinality - intersection
jacc_loss = (intersection / (union + eps)).mean()
return (1 - jacc_loss)
| 10,747
|
def open_inbox():
"""Open currently selected email address at malinator.com."""
try:
selection = list_box.curselection()
slctd_adrs = list_box.get(selection[0])
except IndexError:
messagebox.showerror('Error:', 'Nothing selected')
return
# Remove @malinator.com from value.
value2 = slctd_adrs.replace('@mailinator.com', '')
eml_adrs = BASE_URL+value2
web.open(eml_adrs)
entry_box.focus()
| 10,748
|
def test_dedup_signatures() -> None:
"""Test signature deduplication."""
kp1, kp2 = Keypair(), Keypair()
transfer1 = system_program.transfer(
{"from_pubkey": kp1.pubkey(), "to_pubkey": kp2.pubkey(), "lamports": 123}
)
transfer2 = system_program.transfer(
{"from_pubkey": kp1.pubkey(), "to_pubkey": kp2.pubkey(), "lamports": 123}
)
instructions = [transfer1, transfer2]
message = Message(instructions)
txn = Transaction.new_unsigned(message)
txn.sign([kp1], BLOCKHASH)
| 10,749
|
def _get_distribution_schema():
""" get the schema for distribution type """
return schemas.load(_DISTRIBUTION_KEY)
| 10,750
|
def compute_transforms(rmf_coordinates, mir_coordinates, node=None):
"""Get transforms between RMF and MIR coordinates."""
transforms = {
'rmf_to_mir': nudged.estimate(rmf_coordinates, mir_coordinates),
'mir_to_rmf': nudged.estimate(mir_coordinates, rmf_coordinates)
}
if node:
mse = nudged.estimate_error(transforms['rmf_to_mir'],
rmf_coordinates,
mir_coordinates)
node.get_logger().info(f"Transformation estimate error: {mse}")
return transforms
| 10,751
|
def merge_dicts(*list_of_dicts):
"""Merge a list of dictionaries and combine common keys into a list of values.
args:
list_of_dicts: a list of dictionaries. values within the dicts must be lists
dict = {key: [values]}
"""
output = {}
for dikt in list_of_dicts:
for k, v in dikt.items():
if not output.get(k):
output[k] = v
else:
output[k].extend(v)
output[k] = list(set(output[k]))
return output
| 10,752
|
def highpass_filter(src, size):
"""
highpass_filter(src, size)
ハイパスフィルター
引数
----------
src : AfmImg形式の画像
size : 整数
フィルターのサイズ
戻り値
-------
dst : AfmImg形式の画像
フィルターがかかった画像
"""
def highpass(dft_img_src, *args):
dft_img = dft_img_src.copy()
#マスク作成
mask = __make_filter(dft_img.shape, args[0], True)
#マスキング
dft_img = dft_img.real*mask + dft_img.imag*mask * 1j
return dft_img
dst = __dft_filter(src, highpass, size)
return dst
| 10,753
|
def parse_target(target):
"""
解析目标为ip格式
:param str target: 待解析的目标
:return tuple scan_ip: 解析后的ip和域名
"""
scan_ip = ''
domain_result = ''
main_domain = ''
try:
url_result = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', target)
if url_result == []:
ip_result = re.findall(r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", target)
if ip_result == []:
result = tldextract.extract(target)
main_domain = result.domain + '.' + result.suffix
domain_regex = re.compile(r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z', re.IGNORECASE)
domain_result = domain_regex.findall(target)
if domain_result:
scan_ip = socket.gethostbyname(domain_result[0])
else:
net = IP(target)
#print(net.len())
scan_ip = net
else:
scan_ip = ip_result[0]
else:
url_parse = urlparse(target)
result = tldextract.extract(target)
main_domain = result.domain + '.' + result.suffix
domain_regex = re.compile(r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z', re.IGNORECASE)
domain_result = domain_regex.findall(url_parse.netloc)
scan_ip = socket.gethostbyname(url_parse.hostname)
except Exception as e:
print(e)
finally:
pass
if domain_result:
domain_result = domain_result[0]
return scan_ip, main_domain, domain_result
| 10,754
|
def isWrappedScalarType(typ: Type) -> bool:
"""
Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value.
Since we literally change the type from scalarT to valueT, information is lost.
This function helps build a list of wrapped scalars to save that information
"""
if isinstance(typ, BaseType):
# I am regretting my naming conventions, but now we are wrapping at::scalar in
# lazy value, while preserving other 'scalar' types as scalars in the IR
return typ.name == BaseTy.Scalar
elif isinstance(typ, (OptionalType, ListType)):
return isWrappedScalarType(typ.elem)
return False
| 10,755
|
def manual_decision():
""" Display the controls for allowing user to make manual decision """
col1, col2, col3 = st.columns(3)
successfully_posted_manual_dec = None
with col1:
if st.button("Manual Decision: Version A"):
successfully_posted_manual_dec = utils.post_manual_decision('a')
with col2:
if st.button("Manual Decision: Version B"):
successfully_posted_manual_dec = utils.post_manual_decision('b')
with col3:
if successfully_posted_manual_dec:
st.success("✅")
elif successfully_posted_manual_dec is False:
st.exception("🚨 Something went wrong")
| 10,756
|
def test_hex_binary002_2068_hex_binary002_2068_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : test for simpleType List of hexBinary
"""
assert_bindings(
schema="msData/datatypes/hexBinary002.xsd",
instance="msData/datatypes/hexBinary002.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 10,757
|
def assign_topic(data, doc_topic_distr):
""" Assigns dominant topic to documents of corpus.
:param data: DF of preprocessed and filtered text data
:type data: pd.DataFrame
:param doc_topic_distr: Array of topic distribution per doc of corpus
:type doc_topic_distr: np.array
:return: DF incl assigned topics
:rtype: pd.DataFrame
"""
data["topic_distribution"] = doc_topic_distr.tolist()
data["topic"] = np.argmax(doc_topic_distr, axis=1) + 1
return data
| 10,758
|
def _enable_scan_single_bytecode(code, name):
"""
Part of the ``_enable_scan`` that applies the scan behavior on a single
given list/set comprehension or generator expression code.
"""
bc = bytecode.Bytecode.from_code(code)
Instr = bytecode.Instr
# Updates LOAD_GLOBAL to LOAD_FAST when arg is name
for instr in bc:
if isinstance(instr, Instr) \
and instr.name == "LOAD_GLOBAL" and instr.arg == name:
instr.set("LOAD_FAST", name)
# Some needed information from the first/main FOR_ITER and the heading
# "filter" part of the generator expression or list/set comprehension
for_idx = next(idx for idx, instr in enumerate(bc)
if getattr(instr, "name", None) == "FOR_ITER")
for_instr = bc[for_idx]
begin_label_idx = for_idx - 1
try:
filter_last_idx = last(idx for idx, instr in enumerate(bc)
if isinstance(instr, Instr)
and instr.is_cond_jump()
and instr.arg == begin_label_idx)
except StopIteration:
filter_last_idx = for_idx
# Adds the block before the loop (i.e., first label) to append/add/yield
# the first input directly from FOR_ITER and save the first "prev"
# accordingly
heading_instructions = [("DUP_TOP",),
("STORE_FAST", name)] + {
"<listcomp>": [("LIST_APPEND", 2)],
"<setcomp>": [("SET_ADD", 2)],
"<genexpr>": [("YIELD_VALUE",),
("POP_TOP",)]
}[bc.name]
bc[begin_label_idx:begin_label_idx] = (
[instr.copy() for instr in bc[for_idx:filter_last_idx + 1]] +
[Instr(*args) for args in heading_instructions]
)
# Adds ending block that stores the result to prev before a new iteration
loop_instructions = ["SET_ADD", "LIST_APPEND", "YIELD_VALUE"]
ending_idx = next(-idx for idx, instr in enumerate(reversed(bc), 1)
if isinstance(instr, Instr)
and instr.name in loop_instructions)
ending_instructions = [("DUP_TOP",),
("STORE_FAST", name)]
bc[ending_idx:ending_idx] = \
[Instr(*args) for args in ending_instructions]
return bc.to_code()
| 10,759
|
def composite_rotation(r, p1=qt.QH([1, 0, 0, 0]), p2=qt.QH([1, 0, 0, 0])):
"""A composite function of next_rotation."""
return next_rotation(next_rotation(r, p1), p2)
| 10,760
|
def extract_vectors_ped_feature(residues, conformations, key=None, peds=None, features=None, indexes=False, index_slices=False):
"""
This function allows you to extract information of the ped features from the data structure. In particular allows:
- all rows or a specific subset of them, containing a certain feature (i.e., RD, EN, MED_ASA, etc ...)
- the interval extremes for a certain features (i.e., RD, EN, MED_ASA, etc ...)
- all the feature intervals as slices
:param residues: number of residues in the model
:param conformations: maximum number of conformations available
:param key: the key of the feature or None if considering all of them, default: False
:param peds: the ped id or None if considering all of them, default: False
:param features: matrix of features or None if extracting only the indexes, default: False
:param indexes: return (begin, end) indexes of a feature if it's True, default: False
:param index_slices: return all the intervals of the features if it's True, default: False
:return: begin/end, slices or features
"""
begin = end = -1
residues = int(residues)
conformations = int(conformations)
slices = []
if key == 'PED_ID' or index_slices:
begin = 0
end = 1
slices.append(slice(begin, end))
if key == 'RD' or index_slices:
begin = 1
end = conformations + 1
slices.append(slice(begin, end))
if key == 'EN' or index_slices:
begin = conformations + 1
end = conformations + residues + 1
slices.append(slice(begin, end))
if key == 'MED_ASA' or index_slices:
begin = conformations + residues + 1
end = conformations + 2 * residues + 1
slices.append(slice(begin, end))
if key == 'MED_RMSD' or index_slices:
begin = conformations + 2 * residues + 1
end = conformations + 3 * residues + 1
slices.append(slice(begin, end))
if key == 'MED_DIST' or index_slices:
begin = conformations + 3 * residues + 1
end = int(conformations + 3 * residues + 1 + residues * (residues - 1) / 2)
slices.append(slice(begin, end))
if key == 'STD_DIST' or index_slices:
begin = int(conformations + 3 * residues + 1 + residues * (residues - 1) / 2)
end = None
slices.append(slice(begin, end))
begin = int(begin)
if end is not None:
end = int(end)
if begin == -1:
return None
if index_slices:
return slices
if indexes is True or features is None:
return begin, end
if peds is None:
return features[:, begin:end]
else:
if isinstance(peds, int):
return np.array(features[peds][begin:end])
else:
return features[peds, begin:end]
| 10,761
|
def plot_separate_info_plane_layer_view(MI_object, name, color_l, show_flag, save_flag):
"""
plots information plane separate into different layers
MI_object: mutual information object
name: name of the network
color_l: list of colours for different layers
show_flag:flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
print("creating separate info plane layer view plot")
activations = MI_object.act_func
fig, axes = plt.subplots(len(activations),2,sharex=True,sharey=True)
fig.set_figheight(30)
fig.set_figwidth(15)
plt.subplots_adjust(
top = 0.97,
wspace = 0.05,
)
#fig.suptitle(("Information Plane (" + name + ", score: " + str(MI_object.model_score) + ")"))
fig.suptitle(("Information Plane (score: " + str(MI_object.model_score) + ")"), fontsize=15)
color_list = color_l
cmap = plt.get_cmap('gnuplot')
last_it = np.amax(list(MI_object.mi_x.keys()))
colors = [cmap(i) for i in np.linspace(0, 1, last_it + 1)]
# controls start and stop sign
label_count = 0
sp_label_count = 0
for key in MI_object.mi_x.keys():
# epochview
axes[key[1],1].plot(MI_object.mi_x[key], MI_object.mi_y[key],marker="o",
markersize=9, linewidth=0.2, color=colors[key[0]])
# layerview
if key[0] == 0:
if sp_label_count == 0:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, label='start')
sp_label_count += 1
else:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5)
elif key[0] == list(MI_object.mi_x.keys())[-1][0]:
if sp_label_count == 1:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v", label='end')
sp_label_count += 1
else:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v")
else:
if label_count < len(activations):
axes[key[1],0].scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activations[key[1]])
label_count += 1
else:
axes[key[1],0].scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]])
# unify axes to start from 0
for i in range(len(activations)):
for j in range(2):
axes[i,j].set_xlabel("I(X;T)")
axes[i,j].set_ylabel("I(T;Y)")
axes[i,j].set_xlim(left=0, right=None)
axes[i,j].set_ylim(bottom=0, top=None)
#axes[i,j].set_xbound(lower=-0.05)
#axes[i,j].set_ybound(lower=-0.05)
remove_neg_ticks(axes[i,j], "x")
remove_neg_ticks(axes[i,j], "y")
#fig.tight_layout()
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/LayerviewSplit/"):
try:
os.makedirs("Results/Plots/LayerviewSplit/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/LayerviewSplit/" + name + "_layerviewsplit.png")
if show_flag == True:
plt.show()
else:
plt.close()
| 10,762
|
def Load_File(filename):
"""
Loads a data file
"""
with open(filename) as file:
data = file.readlines()
return data
| 10,763
|
def dadd_ru(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_dadd_ru.html
:param x: Argument.
:type x: float64
:param y: Argument.
:type y: float64
:rtype: float64
"""
| 10,764
|
def massage_isig_and_dim(isig, im, flag, band, nm, nu, fac=None):
"""Construct a WISE inverse sigma image and add saturation to flag.
unWISE provides nice inverse variance maps. These however have no
contribution from Poisson noise from sources, and so underestimate
the uncertainties dramatically in bright regions. This can pull the
whole fit awry in bright areas, since the sky model means that every
pixel feels every other pixel.
It's not clear what the best solution is. We make a goofy inverse
sigma image from the original image and the inverse variance image. It
is intended to be sqrt(ivar) for the low count regime and grow like
sqrt(1/im) for the high count regime. The constant of proportionality
should in principle be worked out; here I set it to 0.15, which worked
once, and it doesn't seem like this should depend much on which
WISE exposure the image came from? It's ultimately something like the gain
or zero point...
"""
if fac is None:
bandfacs = {1: 0.15, 2: 0.3}
bandfloors = {1: 0.5, 2: 2}
fac = bandfacs[band]
floor = bandfloors[band]
satbit = 16 if band == 1 else 32
satlimit = 85000 # if band == 1 else 130000
msat = ((flag & satbit) != 0) | (im > satlimit) | ((nm == 0) & (nu > 1))
from scipy.ndimage import morphology
# dilate = morphology.iterate_structure(
# morphology.generate_binary_structure(2, 1), 3)
xx, yy = numpy.mgrid[-3:3+1, -3:3+1]
dilate = xx**2+yy**2 <= 3**2
msat = morphology.binary_dilation(msat, dilate)
isig[msat] = 0
flag = flag.astype('i8')
# zero out these bits; we claim them for our own purposes.
massagebits = (extrabits['crowdsat'] | crowdsource.nodeblend_maskbit |
crowdsource.sharp_maskbit | extrabits['nebulosity'])
flag &= ~massagebits
flag[msat] |= extrabits['crowdsat']
flag[(flag & nodeblend_bits) != 0] |= crowdsource.nodeblend_maskbit
flag[(flag & sharp_bits) != 0] |= crowdsource.sharp_maskbit
sigma = numpy.sqrt(1./(isig + (isig == 0))**2 + floor**2 +
fac**2*numpy.clip(im, 0, numpy.inf))
sigma[msat] = numpy.inf
sigma[isig == 0] = numpy.inf
return (1./sigma).astype('f4'), flag
| 10,765
|
def sh(arg):
"""
Execute command in a background shell.
Args:
arg (str or list): shell command, or a list of shell commands.
"""
if isinstance(arg, list):
return [sh(a) for a in arg]
else:
return subprocess.check_output(arg, shell=True).decode("utf-8").strip()
| 10,766
|
def uncapitalize(string: str):
"""De-capitalize first character of string
E.g. 'How is Michael doing?' -> 'how is Michael doing?'
"""
if len(string):
return string[0].lower() + string[1:]
return ""
| 10,767
|
def _plot_observations_one_time(
valid_time_string, title_string, annotation_string, output_file_name):
"""Plots observations (NARR predictors and WPC fronts) for one valid time.
:param valid_time_string: Valid time (format "yyyy-mm-dd-HH").
:param title_string: Title (will be placed above figure).
:param annotation_string: Text annotation (will be placed in top left of
figure).
:param output_file_name: Path to output file (figure will be saved here).
"""
(narr_row_limits, narr_column_limits
) = nwp_plotting.latlng_limits_to_rowcol_limits(
min_latitude_deg=MIN_LATITUDE_DEG, max_latitude_deg=MAX_LATITUDE_DEG,
min_longitude_deg=MIN_LONGITUDE_DEG,
max_longitude_deg=MAX_LONGITUDE_DEG,
model_name=nwp_model_utils.NARR_MODEL_NAME)
valid_time_unix_sec = time_conversion.string_to_unix_sec(
valid_time_string, INPUT_TIME_FORMAT)
front_file_name = fronts_io.find_file_for_one_time(
top_directory_name=TOP_FRONT_DIR_NAME,
file_type=fronts_io.POLYLINE_FILE_TYPE,
valid_time_unix_sec=valid_time_unix_sec)
print 'Reading data from: "{0:s}"...'.format(front_file_name)
front_line_table = fronts_io.read_polylines_from_file(front_file_name)
num_narr_fields = len(NARR_FIELD_NAMES)
narr_matrix_by_field = [numpy.array([])] * num_narr_fields
for j in range(num_narr_fields):
if NARR_FIELD_NAMES[j] in WIND_FIELD_NAMES:
this_directory_name = TOP_NARR_WIND_DIR_NAME + ''
else:
this_directory_name = TOP_NARR_DIR_NAME + ''
this_file_name = processed_narr_io.find_file_for_one_time(
top_directory_name=this_directory_name,
field_name=NARR_FIELD_NAMES[j], pressure_level_mb=PRESSURE_LEVEL_MB,
valid_time_unix_sec=valid_time_unix_sec)
print 'Reading data from: "{0:s}"...'.format(this_file_name)
narr_matrix_by_field[j] = processed_narr_io.read_fields_from_file(
this_file_name)[0][0, ...]
narr_matrix_by_field[j] = utils.fill_nans(narr_matrix_by_field[j])
narr_matrix_by_field[j] = narr_matrix_by_field[j][
narr_row_limits[0]:(narr_row_limits[1] + 1),
narr_column_limits[0]:(narr_column_limits[1] + 1)
]
if NARR_FIELD_NAMES[j] == processed_narr_io.WET_BULB_THETA_NAME:
narr_matrix_by_field[j] = (
narr_matrix_by_field[j] - ZERO_CELSIUS_IN_KELVINS
)
_, axes_object, basemap_object = nwp_plotting.init_basemap(
model_name=nwp_model_utils.NARR_MODEL_NAME,
first_row_in_full_grid=narr_row_limits[0],
last_row_in_full_grid=narr_row_limits[1],
first_column_in_full_grid=narr_column_limits[0],
last_column_in_full_grid=narr_column_limits[1])
plotting_utils.plot_coastlines(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_countries(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_states_and_provinces(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_parallels(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lat_deg=-90., upper_right_lat_deg=90.,
parallel_spacing_deg=PARALLEL_SPACING_DEG)
plotting_utils.plot_meridians(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lng_deg=0., upper_right_lng_deg=360.,
meridian_spacing_deg=MERIDIAN_SPACING_DEG)
for j in range(num_narr_fields):
if NARR_FIELD_NAMES[j] in WIND_FIELD_NAMES:
continue
min_colour_value = numpy.percentile(
narr_matrix_by_field[j], MIN_COLOUR_PERCENTILE)
max_colour_value = numpy.percentile(
narr_matrix_by_field[j], MAX_COLOUR_PERCENTILE)
nwp_plotting.plot_subgrid(
field_matrix=narr_matrix_by_field[j],
model_name=nwp_model_utils.NARR_MODEL_NAME, axes_object=axes_object,
basemap_object=basemap_object, colour_map=THERMAL_COLOUR_MAP_OBJECT,
min_value_in_colour_map=min_colour_value,
max_value_in_colour_map=max_colour_value,
first_row_in_full_grid=narr_row_limits[0],
first_column_in_full_grid=narr_column_limits[0])
plotting_utils.add_linear_colour_bar(
axes_object_or_list=axes_object,
values_to_colour=narr_matrix_by_field[j],
colour_map=THERMAL_COLOUR_MAP_OBJECT, colour_min=min_colour_value,
colour_max=max_colour_value, orientation='vertical',
extend_min=True, extend_max=True,
fraction_of_axis_length=LENGTH_FRACTION_FOR_THETA_COLOUR_BAR)
u_wind_index = NARR_FIELD_NAMES.index(
processed_narr_io.U_WIND_EARTH_RELATIVE_NAME)
v_wind_index = NARR_FIELD_NAMES.index(
processed_narr_io.V_WIND_EARTH_RELATIVE_NAME)
nwp_plotting.plot_wind_barbs_on_subgrid(
u_wind_matrix_m_s01=narr_matrix_by_field[u_wind_index],
v_wind_matrix_m_s01=narr_matrix_by_field[v_wind_index],
model_name=nwp_model_utils.NARR_MODEL_NAME, axes_object=axes_object,
basemap_object=basemap_object,
first_row_in_full_grid=narr_row_limits[0],
first_column_in_full_grid=narr_column_limits[0],
plot_every_k_rows=PLOT_EVERY_KTH_WIND_BARB,
plot_every_k_columns=PLOT_EVERY_KTH_WIND_BARB,
barb_length=WIND_BARB_LENGTH, empty_barb_radius=EMPTY_WIND_BARB_RADIUS,
colour_map=WIND_COLOUR_MAP_OBJECT,
colour_minimum_kt=MIN_COLOUR_WIND_SPEED_KT,
colour_maximum_kt=MAX_COLOUR_WIND_SPEED_KT)
num_fronts = len(front_line_table.index)
for i in range(num_fronts):
this_front_type_string = front_line_table[
front_utils.FRONT_TYPE_COLUMN].values[i]
if this_front_type_string == front_utils.WARM_FRONT_STRING_ID:
this_colour = WARM_FRONT_COLOUR
else:
this_colour = COLD_FRONT_COLOUR
front_plotting.plot_polyline(
latitudes_deg=front_line_table[
front_utils.LATITUDES_COLUMN].values[i],
longitudes_deg=front_line_table[
front_utils.LONGITUDES_COLUMN].values[i],
basemap_object=basemap_object, axes_object=axes_object,
front_type=front_line_table[
front_utils.FRONT_TYPE_COLUMN].values[i],
line_width=FRONT_LINE_WIDTH, line_colour=this_colour)
pyplot.title(title_string)
plotting_utils.annotate_axes(
axes_object=axes_object, annotation_string=annotation_string)
print 'Saving figure to: "{0:s}"...'.format(output_file_name)
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
imagemagick_utils.trim_whitespace(input_file_name=output_file_name,
output_file_name=output_file_name)
| 10,768
|
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(os.path.join(config['database']))
rv.row_factory = sqlite3.Row
return rv
| 10,769
|
def _sc_weights_trad(M, M_c, V, N, N0, custom_donor_pool, best_w_pen, verbose=0):
""" Traditional matrix solving. Requires making NxN0 matrices.
"""
#Potentially could be decomposed to not build NxN0 matrix, but the RidgeSolution works fine for that.
sc_weights = np.full((N,N0), 0.)
weight_log_inc = max(int(N/100), 1)
for i in range(N):
if ((i % weight_log_inc) == 0 and verbose>0):
print_progress(i+1, N)
if verbose > 1:
print_memory_snapshot(extra_str="Loop " + str(i))
allowed = custom_donor_pool[i,:]
sc_weights[i,allowed] = _weights(V, M[i,:], M_c[allowed,:], best_w_pen)
if ((N-1) % weight_log_inc) != 0 and verbose > 0:
print_progress(N, N)
return sc_weights
| 10,770
|
def getargs():
"""Parse command line arguments"""
desc = (
"Analyze and query strace log given the strace log in CSV format "
"(STRACE_CSV). See 'strace2csv.py' for converting strace "
"log to the csv format expected by this tool."
)
epil = "Example: ./%s strace.csv summary" % os.path.basename(__file__)
parser = argparse.ArgumentParser(
description=desc, epilog=epil, formatter_class=_SmartFormatter
)
helpstr = "path to strace log in csv format (output from strace2csv.py)"
parser.add_argument("STRACE_CSV", nargs=1, help=helpstr)
helpstr = "R|specify output details, one of the following strings:"
parser.add_argument("COMMAND", nargs=1, help=helpstr + _command_help())
helpstr = "set the verbose level between 0-3 (defaults to --verbose=1)"
parser.add_argument("--verbose", help=helpstr, type=int, default=1)
return parser.parse_args()
| 10,771
|
def generate_on_message(
test_client: "DiscordTestClient", broker_id: int
) -> Callable[[discord.Message], Awaitable[None]]:
"""
Whenever a message comes in, we want our test client to:
1. Filter the message so we are only getting the ones we want.
2. Store received messages so we can inspect them during tests.
3. Wait to receive a certain number of messages, then set an event communicating
that the expected number of messages has been received and we can continue.
"""
async def on_message(message: discord.Message) -> None:
# Toss out any messages not on our expected channels, otherwise we may receive
# messages from other devs running tests concurrently
if message.channel.id not in test_client.channel_id_whitelist:
return
# Print the message for our test logs. We're only going to use the primary
# client to print so we don't double-print each message.
if test_client.is_primary:
print(
f"message received"
f"\nfrom: {test_client.user.display_name}"
f"\nby: {message.author.display_name}"
f"\nchannel: {message.channel.name}"
f"\n{message.content}\n\n"
)
if message.author.id != broker_id:
return
test_client.messages_received.append(message)
if test_client.test_expected_count_received == 0:
raise IOError("Received an unexpected message")
if (
len(test_client.messages_received)
>= test_client.test_expected_count_received
and not test_client.event_messages_received.is_set()
):
test_client.event_messages_received.set()
return on_message
| 10,772
|
def test_suite():
"""
Construct a TestSuite instance for all test cases.
"""
suite = unittest.TestSuite()
for dt, format, expectation in TEST_CASES:
suite.addTest(create_testcase(dt, format, expectation))
return suite
| 10,773
|
def calcScipionScore(modes):
"""Calculate the score from hybrid electron microscopy normal mode analysis (HEMNMA)
[CS14]_ as implemented in the Scipion continuousflex plugin [MH20]_. This score
prioritises modes as a function of mode number and collectivity order.
.. [CS14] Sorzano COS, de la Rosa-Trevín JM, Tama F, Jonić S.
Hybrid Electron Microscopy Normal Mode Analysis graphical interface and protocol.
*J Struct Biol* **2014** 188:134-41.
.. [MH20] Harastani M, Sorzano COS, Jonić S.
Hybrid Electron Microscopy Normal Mode Analysis with Scipion.
*Protein Sci* **2020** 29:223-236.
:arg modes: mode(s) or vector(s)
:type modes: :class:`.Mode`, :class:`.Vector`, :class:`.ModeSet`, :class:`.NMA`
"""
n_modes = modes.numModes()
if n_modes > 1:
collectivityList = list(calcCollectivity(modes))
else:
collectivityList = [calcCollectivity(modes)]
idxSorted = [i[0] for i in sorted(enumerate(collectivityList),
key=lambda x: x[1],
reverse=True)]
score = np.zeros(n_modes)
modeNum = list(range(n_modes))
for i in range(n_modes):
score[idxSorted[i]] = idxSorted[i] + modeNum[i] + 2
score = score / (2.0 * n_modes)
return score
| 10,774
|
def google_base(request):
""" view for Google Base Product feed template; returns XML response """
products = Product.active.all()
template = get_template("marketing/google_base.xml")
xml = template.render(Context(locals()))
return HttpResponse(xml, mimetype="text/xml")
| 10,775
|
def get_agivenn_df(run_list, run_list_sep, **kwargs):
"""DF of mean amplitudes conditiontioned on differnet n values."""
n_simulate = kwargs.pop('n_simulate')
adfam_t = kwargs.pop('adfam_t', None)
adaptive = kwargs.pop('adaptive')
n_list = kwargs.pop('n_list', [1, 2, 3])
comb_vals, comb_val_resamps, sep_vals, sep_val_resamps = (
comb_sep_eval_resamp(
run_list, run_list_sep, get_a_n_mean_given_n, n_simulate,
adaptive=adaptive, n_list=n_list, adfam_t=adfam_t))
col_names = [r'$\mathrm{{E}}[a_{}|N={}]$'.format(n, n) for n in n_list]
return get_sep_comb_df(
comb_vals, comb_val_resamps, sep_vals, sep_val_resamps,
col_names)
| 10,776
|
def friendship_request_list_rejected(request, template_name='friendship/friend/requests_list.html'):
""" View rejected friendship requests """
# friendship_requests = Friend.objects.rejected_requests(request.user)
friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)
return render(request, template_name, {'requests': friendship_requests})
| 10,777
|
def send_email_to_managers(subject=None, body=None,
template_prefix=None, vars=None,
cc=None, bcc=None, frm=None,
attachments=None, headers=None,
as_html=True):
"""
Send an email template to the site managers.
"""
send_email(settings.MANAGERS, subject, body, template_prefix, vars, cc, bcc, frm, attachments, headers, as_html)
| 10,778
|
def test_override_video_view_lists_collection_view_lists(
mock_user_moira_lists, request_data, video_permission, video
):
"""
A video with view lists should by viewable by users in those lists but not users in collection view lists,
if video permissions are enabled
"""
video_list = MoiraListFactory()
collection_list = MoiraListFactory()
video.view_lists.set([video_list])
video.collection.view_lists.set([collection_list])
mock_user_moira_lists.return_value = {video_list.name, collection_list.name}
assert (
video_permission.has_object_permission(
request_data.request, request_data.view, video
)
is True
)
mock_user_moira_lists.return_value = {collection_list.name}
assert (
video_permission.has_object_permission(
request_data.request, request_data.view, video
)
is False
)
| 10,779
|
def test_ep1():
"""
Test against known values.
"""
d = n_mod_m(3, 2)
ep = ExtropyPartition(d)
string = """\
+----------+--------+
| measure | exits |
+----------+--------+
| X[0|1,2] | 0.000 |
| X[1|0,2] | 0.000 |
| X[2|0,1] | 0.000 |
| X[0:1|2] | 0.245 |
| X[0:2|1] | 0.245 |
| X[1:2|0] | 0.245 |
| X[0:1:2] | 0.510 |
+----------+--------+"""
assert str(ep) == string
| 10,780
|
def test_basic() -> None:
"""Test rendering a basic schema with title"""
soup = _generate_case("basic")
_assert_basic_case(soup)
| 10,781
|
def cycle_ctgo(object_type, related_type, related_ids):
""" indirect relationships between Cycles and Objects mapped to CycleTask """
if object_type == "Cycle":
join_by_source_id = db.session.query(CycleTask.cycle_id) \
.join(Relationship, CycleTask.id == Relationship.source_id) \
.filter(
Relationship.source_type == "CycleTaskGroupObjectTask",
Relationship.destination_type == related_type,
Relationship.destination_id.in_(related_ids))
join_by_destination_id = db.session.query(CycleTask.cycle_id) \
.join(Relationship, CycleTask.id == Relationship.destination_id) \
.filter(
Relationship.destination_type == "CycleTaskGroupObjectTask",
Relationship.source_type == related_type,
Relationship.source_id.in_(related_ids))
return join_by_source_id.union(join_by_destination_id)
else:
join_by_source_id = db.session.query(Relationship.destination_id) \
.join(CycleTask, CycleTask.id == Relationship.source_id) \
.filter(
CycleTask.cycle_id.in_(related_ids),
Relationship.source_type == "CycleTaskGroupObjectTask",
Relationship.destination_type == object_type)
join_by_destination_id = db.session.query(Relationship.source_id) \
.join(CycleTask, CycleTask.id == Relationship.destination_id) \
.filter(
CycleTask.cycle_id.in_(related_ids),
Relationship.destination_type == "CycleTaskGroupObjectTask",
Relationship.source_type == object_type)
return join_by_source_id.union(join_by_destination_id)
| 10,782
|
def Min(axis=-1, keepdims=False):
"""Returns a layer that applies min along one tensor axis.
Args:
axis: Axis along which values are grouped for computing minimum.
keepdims: If `True`, keep the resulting size 1 axis as a separate tensor
axis; else, remove that axis.
"""
return Fn('Min', lambda x: jnp.min(x, axis, keepdims=keepdims))
| 10,783
|
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
| 10,784
|
def test_file_sdf_gz(input_sdf):
"""Read a compressed sdf file."""
# without properties
df = load.file(f"{input_sdf}.gz", keep_props=False, in_id='_Name', out_id='idm')
assert len(df.index) == 5
assert list(df.columns.values) == ['idm', 'mol']
assert isinstance(df.iloc[0]['mol'], Mol)
| 10,785
|
def get_expression_arg_names(expression, strip_dots=True):
"""
Parse expression and return set of all argument names. For arguments
with attribute-like syntax (e.g. materials), if `strip_dots` is
True, only base argument names are returned.
"""
args = ','.join(aux.args for aux in parse_definition(expression))
args = [arg.strip() for arg in args.split(',')]
if strip_dots:
for ii, arg in enumerate(args[:]):
aux = arg.split('.')
if len(aux) == 2:
args[ii] = aux[0]
return set(args)
| 10,786
|
def ORDER_CTIME(path: Path) -> int:
"""パスのソート用関数です。作成日時でソートを行います。
"""
return path.stat().st_ctime_ns
| 10,787
|
def format_location(data, year):
""" Format any spatial data. Does nothing yet.
Parameters
----------
data : pd.DataFrame
Data before location formatting.
Returns
-------
data : pd.DataFrame
Data with location formatting.
"""
# No spatial data yet so does nothing.
# data["MSOA"] = "no_location"
# data["location"] = "no_location"
data["region"] = data["region"].astype(str).map(region_dict)
return data
| 10,788
|
def deploy_ingestion_service(
featureset: Union[FeatureSet, str],
source: DataSource = None,
targets: List[DataTargetBase] = None,
name: str = None,
run_config: RunConfig = None,
):
"""Start real-time ingestion service using nuclio function
Deploy a real-time function implementing feature ingestion pipeline
the source maps to Nuclio event triggers (http, kafka, v3io stream, etc.)
example::
source = HTTPSource()
func = mlrun.code_to_function("ingest", kind="serving").apply(mount_v3io())
config = RunConfig(function=func)
fs.deploy_ingestion_service(my_set, source, run_config=config)
:param featureset: feature set object or uri
:param source: data source object describing the online or offline source
:param targets: list of data target objects
:param name: name name for the job/function
:param run_config: service runtime configuration (function object/uri, resources, etc..)
"""
if isinstance(featureset, str):
featureset = get_feature_set_by_uri(featureset)
run_config = run_config.copy() if run_config else RunConfig()
source, run_config.parameters = set_task_params(
featureset, source, targets, run_config.parameters
)
name = name or f"{featureset.metadata.name}_ingest"
if not run_config.function:
function_ref = featureset.spec.function.copy()
if function_ref.is_empty():
function_ref = FunctionReference(name=name, kind=RuntimeKinds.serving)
function_ref.kind = function_ref.kind or RuntimeKinds.serving
if not function_ref.url:
function_ref.code = function_ref.code or ""
run_config.function = function_ref
function = run_config.to_function(
RuntimeKinds.serving, mlrun.mlconf.feature_store.default_job_image
)
function.metadata.project = featureset.metadata.project
function.metadata.name = function.metadata.name or name
# todo: add trigger (from source object)
function.spec.graph = featureset.spec.graph
function.spec.parameters = run_config.parameters
function.spec.graph_initializer = (
"mlrun.feature_store.ingestion.featureset_initializer"
)
function.verbose = True
if run_config.local:
return function.to_mock_server(namespace=get_caller_globals())
return function.deploy()
| 10,789
|
def add_user(email, passwd, admin):
"""adiciona novo usuario"""
# TODO(RichardOkubo): Tratar 'exception' caso 'user' já exista
user = create_user(email=email, password=passwd, admin=admin)
click.echo(f"Usuário {email} criado com sucesso!")
| 10,790
|
def create_app(service: Service):
"""Start a small webserver with the Service."""
app = FastAPI()
@app.post("/query")
def query(params: Params):
"""The main query endpoint."""
return service.query(**params.query, n_neighbors=params.n_neighbors)
return app
| 10,791
|
def test_user_relationship_api():
""" Test to check that user relationship API is not throwing
an index out of range error if user dont have any manager.
Creates a test user without manager and calls the user
relationship api for testing whether it is causing any
index out of range error or not."""
user1_payload = {
"name": "kiran kumar",
"username": "kkumar36",
"password": "12345689",
"email": "kiran36@gmail.com",
}
with requests.Session() as session:
user1_response = create_test_user(session, user1_payload)
user1_result = assert_api_success(user1_response)
user1_id = user1_result["data"]["user"]["id"]
response = session.get(
"http://rbac-server:8000/api/users/" + user1_id + "/relationships"
)
assert response.json()["data"]["managers"] == []
delete_user_by_username("kkumar36")
| 10,792
|
def cnn_prediction(index, prefix, output_prefix, model, dataset):
""" CNN predictions.
Run the CNN on a file and generate the output file but do not
process the file with the template matching code.
"""
logger = logging.getLogger(__name__)
logger.info("making predictions.")
start_time = time.time()
indexstr = "_{:05d}".format(index)
if model is None:
if dataset == 'IR':
#model = os.path.join(cfg.root_dir, './ResUNET/models/Thu Jun 27 14:04:17 2019/78-0.65.hdf5')
#model = os.path.join(cfg.root_dir, '/disks/work/james/deepmars2/ResUNET/models/Fri Jul 12 09:59:22 2019/64-0.68.hdf5')
model = cfg.moon_IR_model
elif dataset == 'DEM':
#model = os.path.join(cfg.root_dir, './ResUNET/models/Tue Jun 18 17:26:59 2019/139-0.59.hdf5')
model = cfg.moon_DEM_model
else:
raise ValueError('dataset must be one of DEM or IR')
if output_prefix is None:
output_prefix = prefix
# Crater Parameters
CP = dict(
dim=256,
datatype=prefix,
n_imgs=-1,
dir_model=model,
dataset=dataset,
dir_data=os.path.join(
cfg.root_dir,
"data/processed/%s_images%s.hdf5" % (prefix, indexstr),
),
dir_preds=os.path.join(
cfg.root_dir,
"data/predictions3/%s/%s_preds%s.hdf5" % (dataset, output_prefix, indexstr),
),
)
get_model_preds(CP)
elapsed_time = time.time() - start_time
logger.info("Time elapsed: {0:.1f} min".format(elapsed_time / 60.0))
| 10,793
|
def establecer_dominio(func_dist: Expr) -> dict:
"""Establece el dominio a partir de una FD.
Parameters
----------
func_dist
Distribución de probabilidad
Returns
-------
dict
Dominio
"""
equations = func_dist.atoms(Eq)
orders = func_dist.atoms(Rel) - equations
dom = {var: EmptySet for var in func_dist.atoms(Symbol)}
for order in orders:
if len(order.atoms(Symbol)) > 1:
continue
var, = order.atoms(Symbol)
val = solveset(order, var, Integers)
dom[var] = dom[var] & val if dom[var] else val
for equation in equations:
var, = equation.atoms(Symbol)
val = solveset(equation, var)
dom[var] = dom[var] | val
return dom
| 10,794
|
def test_check_paths(var_file, input_file, output_file):
"""Test that check_paths works as expected."""
check_paths(variables=var_file, input=input_file, output=output_file)
| 10,795
|
def random_init(n, max_norm):
"""Computes a random initial configuration of n 2D-vectors such that they all
are inside of a circle of radius max_norm
Parameters
----------
n : int
Number of vectors
max_norm : float or int
Radius of the circle or maximum possible distance from the origin of
coordinates that the vectors can have.
Returns
-------
numpy.ndarray
(n, 2) matrix of vectors
"""
X = np.zeros((n, 2))
angles = np.random.rand(n) * 2 * np.pi
norms = np.random.rand(n) * max_norm
for i, angle, norm in zip(range(n), angles, norms):
X[i] = np.array([np.cos(angle), np.sin(angle)]) * norm
return X
| 10,796
|
def calc_cells(serial: int) -> Dict[Tuple[int, int], int]:
"""Calculate the power for all cells
and store them in a dict to retrieve them faster later
"""
r = {}
for i in range(300):
for j in range(300):
r.update({(i, j): calc_power((i, j), serial)})
return r
| 10,797
|
def check_img(img, input_dir):
""" Checks whether the img complies with API`s restrictions.
Parameters
----------
img : str
Image name.
input_dir : str
Path to the dir with the image to check.
Returns
-------
Error message if image does not comply with API`s
restrictions. Otherwise, returns "correct".
"""
img_format = img[img.find("."):].lower()
if img_format not in ALLOWED_FORMATS:
return f"{img},[Error] Unsupported format {img_format}\n"
if os.path.getsize(os.path.join(input_dir, img)) >= IMG_SIZE_LIMIT:
return f"{img},[Error] Size is larger than {IMG_SIZE_LIMIT}B\n"
img_cv2 = cv2.imread(os.path.join(input_dir, img))
img_height, img_width, _ = img_cv2.shape
if (not MAX_IMG_DIM > img_height > MIN_IMG_DIM or
not MAX_IMG_DIM > img_width > MIN_IMG_DIM):
return f"{img},[Error] Img dim must be in between " \
f"{MIN_IMG_DIM}-{MAX_IMG_DIM}\n"
return "correct"
| 10,798
|
def child_files_recursive(root: Union[str, pathlib.Path], ext: str) -> List[str]:
"""
Get all files with a specific extension nested under a root directory.
Parameters
----------
root : pathlib.Path or str
root directory
ext : str
file extension
Returns
-------
List[str]
"""
if not is_string_like(root) and not isinstance(root, pathlib.Path):
raise TypeError(f'filetype is not string-like: {type(root)}')
return list(glob.iglob(str(Path(root).joinpath('**/*' + ext)), recursive=True))
| 10,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.