content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def load_appdata():
"""load application data from json file
"""
try:
_in = open(FNAME)
except FileNotFoundError:
return
with _in:
appdata = json.load(_in)
return appdata | 5,331,600 |
def compute_couplings(models_a, models_b):
"""
Given logistic models for two multiple sequence alignments, calculate all
intermolecular coupling strengths between residues.
The coupling strength between positions i and j is calculated as the 2-norm
of the concatenation of the coefficient submatrices that describe the
relationships between the two positions.
----------------------------------------------------------------------------
Reference:
Ovchinnikov, Sergey, Hetunandan Kamisetty, and David Baker.
"Robust and accurate prediction of residue–residue interactions across
protein interfaces using evolutionary information." Elife 3 (2014): e02030
----------------------------------------------------------------------------
Arguments
---------
models_a: list of SGDClassifier objects, one for each analyzed column in
MSA A
models_b: list of SGDClassifier objects, one for each analyzed column in
MSA B
Returns
-------
couplings: dict, contains intermolecular coupling strengths in the format
{"Ai:Bj":float,...}
contact_mtx: array, 2D matrix of dimensions (models_a, models_b); contains
the value of the coupling strength for each pair of positions
"""
# Dictionary to store couplings between residues
couplings = {}
# To keep track of the submatrix we need to take from the matrix of
# coefficients from protein B
# Iterate over models / columns of MSA A
# Variable to keep track of the submatrix we need to take from the matrix
# of coefficients of models of B
offset_a = 0
contact_mtx = np.zeros((len(models_a), len(models_b)))
for i, model_a in enumerate(models_a):
# Variable to keep track of the submatrix we need to take from the
# matrix of coefficients from protein A
end_point_a = 0
for j, model_b in enumerate(models_b):
# Select the relevant submatrices of coefficients, this is,
# the columns in A that indicate coupling to B and vice versa
# Taking the 2-norm of a vector and a matrix is equivalent. In case
# of mismatching dimensions, flatten the matrices into vectors and
# concatenate them
sel_coefs_a, end_point_a = select_coefs(model_a.coef_, end_point_a)
sel_coefs_a = sel_coefs_a.flatten()
sel_coefs_b, _ = select_coefs(model_b.coef_, offset_a)
sel_coefs_b = sel_coefs_b.flatten()
coef_vector = np.concatenate((sel_coefs_a, sel_coefs_b))
# Calculate coupling strength (as the 2-norm of the vector of
# coefficients) and store the value in the output
coupling = np.linalg.norm(coef_vector)
coupling_name = ''.join(['A', str(i), ':', 'B', str(j)])
couplings[coupling_name] = coupling
contact_mtx[i][j] = coupling
offset_a += 20
return couplings, contact_mtx | 5,331,601 |
def update_checkout_line(request, checkout, variant_id):
"""Update the line quantities."""
if not request.is_ajax():
return redirect("checkout:index")
checkout_line = get_object_or_404(checkout.lines, variant_id=variant_id)
discounts = request.discounts
status = None
form = ReplaceCheckoutLineForm(
request.POST,
checkout=checkout,
variant=checkout_line.variant,
discounts=discounts,
)
manager = request.extensions
if form.is_valid():
form.save()
checkout.refresh_from_db()
# Refresh obj from db and confirm that checkout still has this line
checkout_line = checkout.lines.filter(variant_id=variant_id).first()
line_total = zero_taxed_money(currency=settings.DEFAULT_CURRENCY)
if checkout_line:
line_total = manager.calculate_checkout_line_total(checkout_line, discounts)
subtotal = get_display_price(line_total)
response = {
"variantId": variant_id,
"subtotal": format_money(subtotal),
"total": 0,
"checkout": {"numItems": checkout.quantity, "numLines": len(checkout)},
}
checkout_total = manager.calculate_checkout_subtotal(checkout, discounts)
checkout_total = get_display_price(checkout_total)
response["total"] = format_money(checkout_total)
local_checkout_total = to_local_currency(checkout_total, request.currency)
if local_checkout_total is not None:
response["localTotal"] = format_money(local_checkout_total)
status = 200
elif request.POST is not None:
response = {"error": form.errors}
status = 400
return JsonResponse(response, status=status) | 5,331,602 |
def merge_df(
df: Optional[pd.DataFrame], new_df: Optional[pd.DataFrame], how="left"
):
"""
join two dataframes. Assumes the dataframes are indexed on datetime
Args:
df: optional dataframe
new_df: optional dataframe
Returns:
The merged dataframe
"""
if df is None:
result_df = new_df
elif new_df is None:
result_df = df
else:
try:
result_df = pd.merge_ordered(
df.reset_index(),
new_df.reset_index().drop_duplicates()
)
result_df.set_index("datetime", inplace=True)
result_df.sort_index(inplace=True)
if len(result_df.index.unique()) != len(result_df.index):
LOG.error("Merging did not result in unique indexes. Killing"
" to avoid missing data")
raise ValueError("Issue merging")
except Exception as e:
LOG.error("failed joining dataframes.")
raise e
return result_df | 5,331,603 |
async def test_heartbeat_unload(opp):
"""Test that the heartbeat is deactivated when the last config entry is removed."""
device = get_device("Office")
_, mock_entry = await device.setup_entry(opp)
await opp.async_block_till_done()
await opp.config_entries.async_remove(mock_entry.entry_id)
await opp.async_block_till_done()
with patch(DEVICE_PING) as mock_ping:
async_fire_time_changed(
opp, dt.utcnow() + BroadlinkHeartbeat.HEARTBEAT_INTERVAL
)
assert mock_ping.call_count == 0 | 5,331,604 |
def render(a: Optional[str], b: Optional[str], writer: IO[str]) -> None:
"""
Renders the differences between `a` and `b` to `writer`.
Treats the inputs as marked-up data if possible.
"""
ta, da = deserialize_value(a)
tb, db = deserialize_value(b)
tv = ta or tb
if not tv:
# No changes to render:
return
writer.write(str(tv(da, db))) | 5,331,605 |
def load_settings(settings_path: str = CHAOSTOOLKIT_CONFIG_PATH) -> Settings:
"""
Load chaostoolkit settings as a mapping of key/values or return `None`
when the file could not be found.
"""
if not os.path.exists(settings_path):
logger.debug(
"The Chaos Toolkit settings file could not be found at "
"'{c}'.".format(c=settings_path)
)
return
with open(settings_path) as f:
try:
settings = yaml.safe_load(f.read())
loaded_settings.set(settings)
return settings
except yaml.YAMLError as ye:
logger.error("Failed parsing YAML settings: {}".format(str(ye))) | 5,331,606 |
def _is_leaf(tree: DecisionTreeClassifier, node_id: int) -> bool:
"""
Determines if a tree node is a leaf.
:param tree: an `sklearn` decision tree classifier object
:param node_id: an integer identifying a node in the above tree
:return: a boolean `True` if the node is a leaf, `False` otherwise
"""
return tree.tree_.children_left[node_id] == tree.tree_.children_right[node_id] | 5,331,607 |
async def commission_reset(bot, context):
"""Resets a given user's post cooldown manually."""
advertisement_data = await _get_advertisement_data(bot, context.guild)
deleted_persistence = data.get(
bot, __name__, 'recently_deleted', guild_id=context.guild.id, default={})
user_id = context.arguments[0].id
if user_id in advertisement_data:
del advertisement_data[user_id]
if str(user_id) in deleted_persistence:
del deleted_persistence[str(user_id)]
return Response(
"Reset that user's advertisement cooldown. Their last advertisement post "
"will need to be removed manually if necessary.") | 5,331,608 |
def prog_start(prog):
"""
This functions starts a `prog` by sending a command to the slave.
The program can only be started if the program is currently not running.
If the slave is offline an error will be returned.
Parameters
----------
prog: ProgramModel
A valid `ProgramModel`.
Raises
------
SlaveOfflineError
ProgramRunningError
TypeError:
If `prog` is not an `ProgramModel`
"""
ensure_type("prog", prog, ProgramModel)
if prog.slave.is_online:
if prog.is_running:
raise ProgramRunningError(str(prog.name), str(prog.slave.name))
uuid = uuid4().hex
cmd = Command(
uuid=uuid, # for the command
pid=prog.id,
own_uuid=uuid, # for the function that gets executed
method="execute",
path=prog.path,
arguments=[prog.arguments],
)
LOGGER.info(
"Starting program %s on slave %s",
prog.name,
prog.slave.name,
)
# send command to the client
notify_slave(cmd, prog.slave.id)
# tell webinterface that the program has started
notify({
'program_status': 'started',
'pid': prog.id,
})
# create status entry
ProgramStatusModel(
program=prog, command_uuid=cmd.uuid, start_time=now()).save()
if prog.start_time > 0:
LOGGER.debug(
'started timeout on %s, for %d seconds',
prog.name,
prog.start_time,
)
LOGGER.debug(type(prog.start_time))
FSIM_CURRENT_SCHEDULER.spawn(
prog.start_time,
timer_timeout_program,
prog.id,
)
elif prog.start_time == 0:
timer_timeout_program(prog.id)
else:
raise SlaveOfflineError(
str(prog.name),
"program",
str(prog.slave.name),
"start",
) | 5,331,609 |
def set_out_string():
"""Set output string
This method checks if an output string has been specified and if not
creates and ouput string from the input string and the mode
"""
if isinstance(opts.output, type(None)):
opts.output = splitext(opts.input)[0] + '_' + opts.mode | 5,331,610 |
def test_sample_problems_auto_1d_maximization(max_iter, max_response, error_lim, model_type, capsys):
"""
solve a sample problem in two different conditions.
test that auto method works for a particular single-covariate (univariate) function
"""
# define data
x_input = [(0.5, 0,
1)] # covariates come as a list of tuples (one per covariate: (<initial_guess>, <min>, <max>))
# define response function
def f(x):
return -(6 * x["covar0"].iloc[0] - 2) ** 2 * np.sin(12 * x["covar0"].iloc[0] - 4)
# initialize class instance
cc = TuneSession(covars=x_input, model=model_type)
# run the auto-method
cc.auto(response_samp_func=f, max_iter=max_iter)
# assert
assert cc.model["covars_sampled_iter"] == max_iter
# assert that max value found
THEORETICAL_MAX_COVAR = 0.75725
assert abs(cc.covars_best_response_value[-1].item() - THEORETICAL_MAX_COVAR) < error_lim
# run current_best method
cc.current_best()
captured = capsys.readouterr()
assert abs(cc.best["covars"].values[0][0] - THEORETICAL_MAX_COVAR) < error_lim
assert abs(cc.best["response"].values[0][0] - max_response) < error_lim
assert cc.best["iteration_when_recorded"] == max_iter | 5,331,611 |
def partition_round(elms, percent, exact=-1, total=100, *args, **kwargs):
"""
Partitions dataset in a predictable way.
:param elms: Total Number of elements
:type elms: Integer
:param percent: Percentage of problem space to be processed on one device
:param type: Integer
:param exact: Flag that states whether percentage of problem space is greater than 50 or not (0 for percent < 50, 1 for percent >= 50)
:param type: Integer
:param total: Percentage of total problem space (Default value: 100)
:type total: Integer
:return: Number of elements of partitioned dataset
:rtype: Integer
"""
if elms < 100:
factor = 10
x = elms / 10
else:
factor = 1
x = elms / 100
if exact == -1:
exact = 0 if percent > 50 else 1
if elms % 2 == 0:
if percent == 50:
logging.debug(
"PARTITION: get_slice_values -> multiple_round -> partition_round (if percent=50) returns: %d",
elms / 2)
return elms / 2
elif exact == 0:
b = x * (total - percent) / factor
return partition_round(elms, total) - b if total != 100 else elms - b
elif exact == 1:
logging.debug("PARTITION: get_slice_values -> multiple_round -> partition_round (if exact=1) returns: %d",
x * percent / factor)
return x * percent / factor
else:
if percent > 50:
return partition_round(elms - 1, percent, exact, total)
else:
return partition_round(elms - 1, percent, exact, total) + 1 | 5,331,612 |
def heatmap_numeric_w_dependent_variable(df, dependent_variable):
"""
Takes df, a dependant variable as str
Returns a heatmap of all independent variables' correlations with dependent variable
"""
plt.figure(figsize=(10, 5.5))
figure = sns.heatmap(
df.corr()[[dependent_variable]].sort_values(by=dependent_variable),
annot=True,
cmap="coolwarm",
vmin=-1,
vmax=1,
)
return figure | 5,331,613 |
def compute_stats(
cfg: Dict[str, dict],
dem: xr.Dataset,
ref: xr.Dataset,
final_dh: xr.Dataset,
display: bool = False,
final_json_file: str = None,
):
"""
Compute Stats on final_dh
:param cfg: configuration dictionary
:type cfg: dict
:param dem: dem raster
:type dem: xr.Dataset
:param ref: reference dem raster to be coregistered to dem raster
:type ref: xr.Dataset
:param final_dh: initial alti diff
:type final_dh: xr.Dataset
:param display: choose between plot show and plot save
:type display: boolean
:param final_json_file: filename of final_cfg
:type final_json_file: str
:return:
"""
print("\n[Stats]")
cfg["stats_results"] = {}
cfg["stats_results"]["images"] = {}
cfg["stats_results"]["images"]["list"] = []
print("# DEM diff wave detection")
stats.wave_detection(cfg, final_dh)
print("# Altimetric error stats generation")
stats.alti_diff_stats(
cfg,
dem,
ref,
final_dh,
display=display,
remove_outliers=cfg["stats_opts"]["remove_outliers"],
)
# save results
print("Save final results stats information file:")
print(final_json_file)
initialization.save_config_file(final_json_file, cfg) | 5,331,614 |
def _handle_rpm(
rpm: Rpm,
universe: str,
repo_url: str,
rpm_table: RpmTable,
all_snapshot_universes: Set[str],
cfg: DownloadConfig,
) -> Tuple[Rpm, MaybeStorageID, float]:
"""Fetches the specified RPM from the repo DB and downloads it if needed.
Returns a 3-tuple of the hydrated RPM, storage ID or exception if one was
caught, and bytes downloaded, if a download occurred (used for reporting).
"""
# Read-after-write consitency is not needed here as this is the first read
# in the execution model. It's possible another concurrent snapshot is
# running that could race with this read, but that's not critical as this
# section should be idempotent, and at worst we'll duplicate some work by
# re-downloading the RPM.
with cfg.new_db_ctx(readonly=True, force_master=False) as ro_repo_db:
# If we get no `storage_id` back, there are 3 possibilities:
# - `rpm.nevra()` was never seen before.
# - `rpm.nevra()` was seen before, but it was hashed with
# different algorithm(s), so we MUST download and
# compute the canonical checksum to know if its contents
# are the same.
# - `rpm.nevra()` was seen before, **AND** one of the
# prior checksums used `rpm.checksum.algorithms`, but
# produced a different hash value. In other words, this
# is a `MutableRpmError`, because the same NEVRA must
# have had two different contents. We COULD explicitly
# detect this error here, and avoid the download.
# However, this severe error should be infrequent, and we
# actually get valuable information from the download --
# this lets us know whether the file is wrong or the
# repodata is wrong.
with timeit(
partial(log_sample, LogOp.RPM_QUERY, rpm=rpm, universe=universe)
):
(
storage_id,
canonical_chk,
) = ro_repo_db.get_rpm_storage_id_and_checksum(rpm_table, rpm)
# If the RPM is already stored with a matching checksum, just update its
# `.canonical_checksum`. Note that `rpm` was parsed from repodata, and thus
# it's guaranteed to not yet have a `canonical_checksum`.
if storage_id:
rpm = rpm._replace(canonical_checksum=canonical_chk)
# This is a very common case and thus noisy log, so we write to debug
log.debug(f"Already stored under {storage_id}: {rpm}")
return rpm, storage_id, 0
# We have to download the RPM.
try:
with timeit(
partial(log_sample, LogOp.RPM_DOWNLOAD, rpm=rpm, universe=universe)
):
rpm, storage_id = _download_rpm(rpm, repo_url, rpm_table, cfg)
return rpm, storage_id, rpm.size
# RPM checksum validation errors, HTTP errors, etc
except ReportableError as ex:
# This "fake" storage_id is stored in `storage_id_to_rpm`, so the
# error is propagated to sqlite db through the snapshot. It isn't
# written to repo_db however as that happens in the *_impl function
return rpm, ex, 0 | 5,331,615 |
def main(options,args):
"""Extract mutations from a multiple sequence alignment"""
import PEATDB.sequence_alignment as SA
if not options.fasta:
alignment=SA.read_clustal_aln_file(args[0])
else:
alignment=SA.read_fasta_aln_file(args[0])
print sorted(alignment.keys())
HEWL_seq=alignment[options.wt]
fd=open('mutations.txt','w')
fd2=open('frequencies.csv','w')
aas=convert.keys()
aas.sort()
import string
fd2.write('WT Residue number, %s\n' %(string.join(aas,',')))
#
real_pos=0
lines = []
PDB_mutations={}
#
for position in range(0,len(HEWL_seq)):
res_observed={}
if HEWL_seq[position]=='-':
continue
real_pos=real_pos+1
#print 'Now looking at position',real_pos
for seq in alignment.keys():
res_pos=alignment[seq][position]
if not res_observed.has_key(res_pos):
res_observed[res_pos]=0
res_observed[res_pos]=res_observed[res_pos]+1
#
# Calculate frequencies of observation
#
total=sum(res_observed.values())
text='%3d' %real_pos
#print res_observed.keys()
for aa in aas:
if res_observed.has_key(aa):
text=text+',%5.1f' %(float(res_observed[aa])/total*100.0)
else:
text=text+',%5.1f' %(0)
fd2.write(text+'\n')
#
# -----
#
lines += ['%3d %d\n' %(real_pos,len(res_observed.keys()))]
for mut in res_observed.keys():
if mut=='-':
continue
if mut==HEWL_seq[position]:
continue
#
org_res=HEWL_seq[position]
new_res=mut
import string
if org_res=='X' or new_res=='X':
continue
#
# Within the PDB file?
#
if real_pos<options.start_aa or real_pos>options.end_aa:
pass
else:
PDB_residue='%s:%s' %(options.CID,string.zfill(real_pos+options.offset-options.noffset,4))
if not PDB_mutations.has_key(PDB_residue):
PDB_mutations[PDB_residue]=[]
PDB_mutations[PDB_residue].append(convert[new_res])
muttext='%s:%s:%s:%s' %(options.CID,string.zfill(real_pos+options.offset-options.noffset,4),convert[org_res],convert[new_res])
fd.write('%s,%s\n' %(muttext,muttext))
#print muttext
fd.close()
fd2.close()
#
# Read PDB file?
#
if options.pdbfile:
import Protool
PI=Protool.structureIO()
PI.readpdb(options.pdbfile)
#
# Plot the figure?
#
if options.plotfigure:
xs=[]
ys=[]
zs=[]
for residue in sorted(PDB_mutations.keys()):
resnum=int(residue.split(':')[1])
xs.append(resnum)
ys.append(len(PDB_mutations[residue]))
zs.append(PI.dist(options.atom,residue+':CA'))
import pylab
pylab.plot(zs,ys,'ro')
pylab.xlabel('Distance from %s' %(options.atom))
pylab.ylabel('Number of mutations')
pylab.show()
return | 5,331,616 |
def test_add_list_s3_repos() -> None:
"""
Tests adding and listing repos from S3.
"""
client = RedunClient()
s3_client = boto3.client("s3", region_name="us-east-1")
s3_client.create_bucket(Bucket="example-repo")
file = File("s3://example-repo/potato/redun.ini")
file.write(DEFAULT_REDUN_INI.format(db_uri="sqlite:///other.db"))
with pytest.raises(FileNotFoundError):
client.execute(["redun", "repo", "add", "carrots", "s3://example-repo/carrots"])
client.execute(["redun", "repo", "add", "potato", "s3://example-repo/potato"])
output = run_command(client, ["redun", "repo", "list"]).split("\n")
assert " potato: s3://example-repo/potato" in output | 5,331,617 |
def numpy2seq(Z, val=-1):
"""Appends the minimal required amount of zeroes at the end of each
array in the jagged array `M`, such that `M` looses its jagedness."""
seq = []
for z in t2n(Z).astype(int):
i = np.where(z==val)[0]
if i.size == 0:
seq += [z.tolist()]
else:
seq += [z[:min(i)].tolist()]
return seq | 5,331,618 |
def winner(board):
"""Detirmine the game's winner."""
WAYS_TO_WIN = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None | 5,331,619 |
def compute_inv_propensity(train_file, A, B):
"""
Compute Inverse propensity values
Values for A/B:
Wikpedia-500K: 0.5/0.4
Amazon-670K, Amazon-3M: 0.6/2.6
Others: 0.55/1.5
"""
train_labels = data_utils.read_sparse_file(train_file)
inv_propen = xc_metrics.compute_inv_propesity(train_labels, A, B)
return inv_propen | 5,331,620 |
def test_ap_wpa2_eap_gpsk(dev, apdev):
"""WPA2-Enterprise connection using EAP-GPSK"""
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
hostapd.add_ap(apdev[0]['ifname'], params)
id = eap_connect(dev[0], apdev[0], "GPSK", "gpsk user",
password="abcdefghijklmnop0123456789abcdef")
eap_reauth(dev[0], "GPSK")
logger.info("Test forced algorithm selection")
for phase1 in [ "cipher=1", "cipher=2" ]:
dev[0].set_network_quoted(id, "phase1", phase1)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10)
if ev is None:
raise Exception("EAP success timed out")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Association with the AP timed out")
logger.info("Test failed algorithm negotiation")
dev[0].set_network_quoted(id, "phase1", "cipher=9")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10)
if ev is None:
raise Exception("EAP failure timed out")
logger.info("Negative test with incorrect password")
dev[0].request("REMOVE_NETWORK all")
eap_connect(dev[0], apdev[0], "GPSK", "gpsk user",
password="ffcdefghijklmnop0123456789abcdef",
expect_failure=True) | 5,331,621 |
def tanh(x):
"""
Returns the cos of x.
Args:
x (TensorOp): A tensor.
Returns:
TensorOp: The tanh of x.
"""
return TanhOp(x) | 5,331,622 |
def importBodyCSVDataset(testSplit: float, local_import: bool):
"""Import body dataset as numpy arrays from GitHub if available, or local dataset otherwise.
Args:
testSplit (float, optional): Percentage of the dataset reserved for testing. Defaults to 0.15. Must be between 0.0 and 1.0.
"""
assert 0.0 <= testSplit <= 1.0
datasetPath = DATASETS_PATH / "BodyPose_Dataset.csv"
datasetURL = "https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/pose_classification_kit/datasets/BodyPose_Dataset.csv"
if local_import:
dataset_df = pd.read_csv(datasetPath)
else:
dataset_df = pd.read_csv(datasetURL)
bodyLabels_df = dataset_df.groupby("label")
labels = list(dataset_df.label.unique())
# Find the minimum number of samples accross categories to uniformly distributed sample sets
total_size_cat = bodyLabels_df.size().min()
test_size_cat = int(total_size_cat * testSplit)
train_size_cat = total_size_cat - test_size_cat
x_train = []
x_test = []
y_train = []
y_test = []
# Iterate over each labeled group
for label, group in bodyLabels_df:
# remove irrelevant columns
group_array = group.drop(["label", "accuracy"], axis=1).to_numpy()
np.random.shuffle(group_array)
group_array_2D = [np.array((x[::2], x[1::2])).T for x in group_array]
x_train.append(group_array_2D[:train_size_cat])
y_train.append([label] * train_size_cat)
x_test.append(group_array_2D[train_size_cat : train_size_cat + test_size_cat])
y_test.append([label] * test_size_cat)
# Concatenate sample sets as numpy arrays
x_train = np.concatenate(x_train, axis=0)
x_test = np.concatenate(x_test, axis=0)
y_train = np.concatenate(y_train, axis=0)
y_test = np.concatenate(y_test, axis=0)
return x_train, x_test, y_train, y_test, labels | 5,331,623 |
def download_extract(database_name, data_path):
"""
Download and extract database
:param database_name: Database name
"""
DATASET_ML1M = 'ml-1m'
if database_name == DATASET_ML1M:
url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
hash_code = 'c4d9eecfca2ab87c1945afe126590906'
extract_path = os.path.join(data_path, 'ml-1m')
save_path = os.path.join(data_path, 'ml-1m.zip')
extract_fn = _unzip
if os.path.exists(extract_path):
print('Found {} Data'.format(database_name))
return
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(save_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:
urlretrieve(
url,
save_path,
pbar.hook)
assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
'{} file is corrupted. Remove the file and try again.'.format(save_path)
os.makedirs(extract_path)
try:
extract_fn(save_path, extract_path, database_name, data_path)
except Exception as err:
shutil.rmtree(extract_path) # Remove extraction folder if there is an error
raise err
print('Done.')
# Remove compressed data | 5,331,624 |
def list_systeminsights_hardware_json():
"""print: get_systeminsights_system_info_json."""
skip = 0
limit = 100
idlist = get_systems_id()
for system_id in idlist:
response = get_systeminsights_system_info_json(system_id, limit, skip)
if len(response) == 0:
response = {'system_id': system_id}
print(json.dumps(response, sort_keys=False, indent=4)) | 5,331,625 |
def mvstdtprob(a, b, R, df, ieps=1e-5, quadkwds=None, mvstkwds=None):
"""
Probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration.
"""
kwds = dict(args=(a, b, R, df), epsabs=1e-4, epsrel=1e-2, limit=150)
if not quadkwds is None:
kwds.update(quadkwds)
lower, upper = chi.ppf([ieps, 1 - ieps], df)
res, err = integrate.quad(funbgh2, lower, upper, **kwds)
prob = res * bghfactor(df)
return prob | 5,331,626 |
def build_dataloaders(
cfg: CfgNode,
batch_size: Union[int, Iterable[int]],
) -> Dict[str, Callable]:
"""
Get iterators of built-in datasets.
Args:
cfg: CfgNode instance that requests built-in datasets.
batch_size (int or sequence): The number of examples in one mini-batch. If batch_size is
a sequence like (b1, b2, b3), batch sizes of train/valid/test splits will be matched
to b1/b2/b3, respectively.
Returns:
A dictionary with keys 'dataloader', 'trn_loader', 'val_loader', and 'tst_loader'.
Example:
>>> dataloaders = build_dataloaders(cfg, batch_size=[128, 200, 200])
>>> for epoch_idx in enumerate(range(10), start=1):
>>> rng, data_rng = jax.random.split(rng)
>>>
>>> trn_loader = dataloaders['dataloader'](rng=data_rng)
>>> trn_loader = jax_utils.prefetch_to_device(trn_loader, size=2)
>>> for batch_idx, batch in enumerate(trn_loader, start=1):
>>> (...)
>>>
>>> val_loader = dataloaders['val_loader'](rng=None)
>>> val_loader = jax_utils.prefetch_to_device(val_loader, size=2)
>>> for batch_idx, batch in enumerate(val_loader, start=1):
>>> (...)
"""
name = cfg.DATASETS.NAME
if name in ['MNIST', 'KMNIST', 'FashionMNIST',]:
indices = list(range(60000))
if cfg.DATASETS.MNIST.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.MNIST.TRAIN_INDICES[0] : cfg.DATASETS.MNIST.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.MNIST.VALID_INDICES[0] : cfg.DATASETS.MNIST.VALID_INDICES[1]]
elif name in ['CIFAR10', 'CIFAR100',]:
indices = list(range(50000))
if cfg.DATASETS.CIFAR.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.CIFAR.TRAIN_INDICES[0] : cfg.DATASETS.CIFAR.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.CIFAR.VALID_INDICES[0] : cfg.DATASETS.CIFAR.VALID_INDICES[1]]
elif name in ['TinyImageNet200',]:
indices = list(range(100000))
if cfg.DATASETS.TINY.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.TINY.TRAIN_INDICES[0] : cfg.DATASETS.TINY.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.TINY.VALID_INDICES[0] : cfg.DATASETS.TINY.VALID_INDICES[1]]
elif name in ['ImageNet1k_x32', 'ImageNet1k_x64',]:
indices = list(range(1281167))
if cfg.DATASETS.DOWNSAMPLED_IMAGENET.SHUFFLE_INDICES:
random.Random(cfg.DATASETS.SEED).shuffle(indices)
trn_indices = indices[cfg.DATASETS.DOWNSAMPLED_IMAGENET.TRAIN_INDICES[0] : cfg.DATASETS.DOWNSAMPLED_IMAGENET.TRAIN_INDICES[1]]
val_indices = indices[cfg.DATASETS.DOWNSAMPLED_IMAGENET.VALID_INDICES[0] : cfg.DATASETS.DOWNSAMPLED_IMAGENET.VALID_INDICES[1]]
trn_images = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/train_images.npy'))
trn_labels = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/train_labels.npy'))
tst_images = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/test_images.npy'))
tst_labels = np.load(os.path.join(cfg.DATASETS.ROOT, f'{name}/test_labels.npy'))
# validation split
if val_indices:
val_images, val_labels = trn_images[val_indices], trn_labels[val_indices]
trn_images, trn_labels = trn_images[trn_indices], trn_labels[trn_indices]
else:
val_images, val_labels = tst_images, tst_labels
trn_images, trn_labels = trn_images[trn_indices], trn_labels[trn_indices]
# specify mini-batch settings
if isinstance(batch_size, int):
batch_size = (batch_size, batch_size, batch_size,)
trn_batch_size, val_batch_size, tst_batch_size = batch_size
if len(val_images) % val_batch_size != 0:
warnings.warn(f'val_batch_size={val_batch_size} cannot utilize all {len(val_images)} examples.')
if len(tst_images) % tst_batch_size != 0:
warnings.warn(f'tst_batch_size={tst_batch_size} cannot utilize all {len(tst_images)} examples.')
trn_steps_per_epoch = len(trn_images) // trn_batch_size
val_steps_per_epoch = len(val_images) // val_batch_size
tst_steps_per_epoch = len(tst_images) // tst_batch_size
# build dataloaders
dataloaders = {
'dataloader': functools.partial(
_build_dataloader,
images = trn_images,
labels = trn_labels,
batch_size = trn_batch_size,
steps_per_epoch = trn_steps_per_epoch,
shuffle = True,
transform = jax.jit(jax.vmap(DATA_AUGMENTATION[cfg.DATASETS.DATA_AUGMENTATION][name])),
),
'trn_loader': functools.partial(
_build_dataloader,
images = trn_images,
labels = trn_labels,
batch_size = trn_batch_size,
steps_per_epoch = trn_steps_per_epoch,
shuffle = False,
transform = jax.jit(jax.vmap(ToTensorTransform())),
),
'val_loader': functools.partial(
_build_dataloader,
images = val_images,
labels = val_labels,
batch_size = val_batch_size,
steps_per_epoch = val_steps_per_epoch,
shuffle = False,
transform = jax.jit(jax.vmap(ToTensorTransform())),
),
'tst_loader': functools.partial(
_build_dataloader,
images = tst_images,
labels = tst_labels,
batch_size = tst_batch_size,
steps_per_epoch = tst_steps_per_epoch,
shuffle = False,
transform = jax.jit(jax.vmap(ToTensorTransform())),
),
'trn_steps_per_epoch': trn_steps_per_epoch,
'val_steps_per_epoch': val_steps_per_epoch,
'tst_steps_per_epoch': tst_steps_per_epoch,
}
return dataloaders | 5,331,627 |
def get_activity(
iterator,
*,
perspective,
garbage_class,
dtype=np.bool,
non_sil_alignment_fn=None,
debug=False,
use_ArrayIntervall=False,
):
"""
perspective:
Example:
'global_worn' -- global perspective for worn ('P')
'worn' -- return perspective for each speaker ('P01', ...)
'array' -- return perspective for each array ('U01', ...)
garbage_class: True, False, None
True: garbage_class is always one
False: garbage_class is always zero
None: the number of classes is 4 and not 5
non_sil_alignment_fn: None or a function with the signature:
value = non_sil_alignment_fn(ex, perspective_mic_array)
where
ex is one example in iterator
perspective_mic_array is in ['U01', ..., 'P01', ..., 'P']
value is a 1d array indicating if at a sample the source is active
or not
use_ArrayIntervall: ArrayIntervall is a special datatype to reduce
memory usage
returns:
dict[session_id][mic_perspective][speaker_id] = array(dtype=bool)
session_id e.g.: 'S02', ...
mic_perspective e.g.: 'P', 'P05', 'U01', ...
speaker_id e.g.: 'P05', ...
>>> from pb_chime5.database.chime5 import Chime5
>>> import textwrap
>>> db = Chime5()
>>> def display_activity(activity):
... print(tuple(activity.keys()))
... print(' '*2, tuple(activity['S02'].keys()))
... print(' '*4, tuple(activity['S02']['P'].keys()))
... print(' '*6, activity['S02']['P']['P05'])
... print(' '*6, activity['S02']['P']['Noise'])
>>> def display_activity(activity, indent=0):
... indent_print = lambda x: print(textwrap.indent(str(x), ' '*indent))
... if isinstance(activity, dict):
... for i, (k, v) in enumerate(activity.items()):
... if i == 0 or k in ['Noise']:
... indent_print(f'{k}:')
... display_activity(v, indent=indent+2)
... else:
... indent_print(f'{k}: ...')
... else:
... indent_print(activity)
>>> activity = get_activity(db.get_datasets('S02'), perspective='global_worn', garbage_class=True)
>>> display_activity(activity)
S02:
P:
P05:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
Noise:
[ True True True ... True True True]
>>> activity = get_activity(db.get_datasets('S02'), perspective='worn', garbage_class=False)
>>> display_activity(activity)
S02:
P05:
P05:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
Noise:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
>>> activity = get_activity(db.get_datasets('S02'), perspective='array', garbage_class=None)
>>> display_activity(activity)
S02:
U01:
P05:
[False False False ... False False False]
P06: ...
P07: ...
P08: ...
U02: ...
U03: ...
U04: ...
U05: ...
U06: ...
"""
dict_it_S = iterator.groupby(lambda ex: ex['session_id'])
# Dispatcher is a dict with better KeyErrors
all_acitivity = Dispatcher()
for session_id, it_S in dict_it_S.items():
if perspective == 'worn':
perspective_tmp = mapping.session_to_speakers[session_id]
elif perspective == 'global_worn':
perspective_tmp = ['P'] # Always from target speaker
elif perspective == 'array':
# The mapping considers missing arrays
perspective_tmp = mapping.session_to_arrays[session_id]
else:
perspective_tmp = perspective
if not isinstance(perspective_tmp, (tuple, list)):
perspective_tmp = [perspective_tmp, ]
speaker_ids = mapping.session_to_speakers[session_id]
if use_ArrayIntervall:
assert dtype == np.bool, dtype
zeros = ArrayIntervall
def ones(shape):
arr = zeros(shape=shape)
arr[:] = 1
return arr
else:
import functools
zeros = functools.partial(np.zeros, dtype=dtype)
ones = functools.partial(np.ones, dtype=dtype)
all_acitivity[session_id] = Dispatcher({
p: Dispatcher({
s: zeros(shape=[mapping.session_array_to_num_samples[f'{session_id}_{p}']])
# s: ArrayIntervall(shape=[num_samples])
for s in speaker_ids
})
for p in perspective_tmp
})
if garbage_class is True:
for p in perspective_tmp:
num_samples = mapping.session_array_to_num_samples[
f'{session_id}_{p}']
all_acitivity[session_id][p]['Noise'] = ones(
shape=[num_samples],
)
elif garbage_class is False:
for p in perspective_tmp:
num_samples = mapping.session_array_to_num_samples[
f'{session_id}_{p}']
all_acitivity[session_id][p]['Noise'] = zeros(
shape=[num_samples]
)
elif garbage_class is None:
pass
elif isinstance(garbage_class, int) and garbage_class > 0:
for noise_idx in range(garbage_class):
for p in perspective_tmp:
num_samples = mapping.session_array_to_num_samples[
f'{session_id}_{p}'
]
all_acitivity[session_id][p][f'Noise{noise_idx}'] = ones(
shape=[num_samples]
)
else:
raise ValueError(garbage_class)
missing_count = 0
for ex in it_S:
for pers in perspective_tmp:
if ex['transcription'] == '[redacted]':
continue
target_speaker = ex['speaker_id']
# example_id = ex['example_id']
if pers == 'P':
perspective_mic_array = target_speaker
else:
perspective_mic_array = pers
if perspective_mic_array.startswith('P'):
start = ex['start']['worn'][perspective_mic_array]
end = ex['end']['worn'][perspective_mic_array]
else:
if not perspective_mic_array in ex['audio_path']['observation']:
continue
start = ex['start']['observation'][perspective_mic_array]
end = ex['end']['observation'][perspective_mic_array]
if non_sil_alignment_fn is None:
value = 1
else:
value = non_sil_alignment_fn(ex, perspective_mic_array)
if value is 1:
missing_count += 1
if debug:
all_acitivity[session_id][pers][target_speaker][start:end] += value
else:
all_acitivity[session_id][pers][target_speaker][start:end] = value
if missing_count > len(it_S) // 2:
raise RuntimeError(
f'Something went wrong.\n'
f'Expected {len(it_S) * len(perspective_tmp)} times a '
f'finetuned annotation for session {session_id}, but '
f'{missing_count} times they are missing.\n'
f'Expect that at least {len(it_S) // 2} finetuned annotations '
f'are available, when non_sil_alignment_fn is given.\n'
f'Otherwise assume something went wrong.'
)
del it_S
return all_acitivity | 5,331,628 |
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim, re.IGNORECASE)
closeRE = re.compile(closeDelim, re.IGNORECASE)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -= 1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text) | 5,331,629 |
def is_rotational(block_device: str) -> bool:
"""
Checks if given block device is "rotational" (spinning rust) or
solid state block device.
:param block_device: Path to block device to check
:return: True if block device is a rotational block device,
false otherwise
"""
base_name = os.path.basename(block_device)
rotational_file = f'/sys/block/{base_name}/queue/rotational'
if not os.path.exists(rotational_file):
# Maybe given path is not the base block device
# -> Get disk for given block devices and try again
disk = base_disk_for_block_device(block_device)
if disk != block_device:
return is_rotational(disk)
raise Exception('Could not find file {}!'.format(rotational_file))
with open(rotational_file, 'r') as f_obj:
content = f_obj.read(1)
if content == '1':
return True
if content == '0':
return False
raise Exception('Unknown value in {}!'.format(rotational_file)) | 5,331,630 |
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None | 5,331,631 |
def make_dpl_from_construct(construct,showlabels=None):
""" This function creats a dictionary suitable for
input into dnaplotlib for plotting constructs.
Inputs:
construct: a DNA_construct object
showlabels: list of part types to show labels for. For example, [AttachmentSite,Terminator]"""
#TODO make showlabels more general
if(showlabels is None):
showlabels = []
outdesign = []
if(HAVE_MATPLOTLIB):
cmap = cm.Set1(range(len(construct.parts_list)*2))
pind = 0
for part in construct.parts_list:
pcolor = part.color
pcolor2 = part.color2
if(HAVE_MATPLOTLIB):
if(type(pcolor)==int):
c1 = cmap[pcolor][:-1]
else:
c1 = cmap[pind][:-1]
if(type(pcolor2)==int):
c2 = cmap[pcolor2][:-1]
else:
c2 = cmap[random.choice(list(range(len(construct.parts_list))))][:-1]
showlabel = False
if(type(part) in showlabels):
showlabel = True
outdesign+=make_dpl_from_part(part,direction = part.direction=="forward",\
color=c1,color2 =c2 ,showlabel=showlabel)
pind+=1
return outdesign | 5,331,632 |
def load_config(path: str, env=None):
"""
Load a YAML config file and replace variables from the environment
Args:
path (str): The resource path in the form of `dir/file` or `package:dir/file`
Returns:
The configuration tree with variable references replaced, or `False` if the
file is not found
"""
try:
with load_resource(path) as resource:
cfg = yaml.load(resource, Loader=yaml.FullLoader)
except FileNotFoundError:
return False
cfg = expand_tree_variables(cfg, env or os.environ)
return cfg | 5,331,633 |
def ls(manager: Manager):
"""List network names, versions, and optionally, descriptions."""
for n in manager.list_networks():
click.echo('{}\t{}\t{}'.format(n.id, n.name, n.version)) | 5,331,634 |
def update_service(
*, db_session: Session = Depends(get_db), service_id: PrimaryKey, service_in: ServiceUpdate
):
"""Update an existing service."""
service = get(db_session=db_session, service_id=service_id)
if not service:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "A service with this id does not exist."}],
)
try:
service = update(db_session=db_session, service=service, service_in=service_in)
except IntegrityError:
raise ValidationError(
[ErrorWrapper(ExistsError(msg="A service with this name already exists."), loc="name")],
model=ServiceUpdate,
)
return service | 5,331,635 |
def plotsetup():
### PARAMETERS FOR MATPLOTLIB :
"""
Parameters
----------
TODO
Returns
-------
TODO
"""
mpl.rcParams['font.size'] = 12.
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['axes.labelsize'] = 12.
mpl.rcParams['xtick.labelsize'] = 12.
mpl.rcParams['ytick.labelsize'] = 12. | 5,331,636 |
def import_flow_by_ref(flow_strref):
"""Return flow class by flow string reference."""
app_label, flow_path = flow_strref.split('/')
return import_string('{}.{}'.format(get_app_package(app_label), flow_path)) | 5,331,637 |
def display_cusum(
df: pd.DataFrame,
target: str,
threshold: float,
drift: float,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Cumulative sum algorithm (CUSUM) to detect abrupt changes in data
Parameters
----------
df : pd.DataFrame
Dataframe
target : str
Column of data to look at
threshold : float
Threshold value
drift : float
Drift parameter
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
"""
target_series = df[target].values
# The code for this plot was adapted from detecta's sources because at the
# time of writing this detect_cusum had a bug related to external axes support.
# see https://github.com/demotu/detecta/pull/3
tap, tan = 0, 0
ta, tai, taf, _ = detect_cusum(
x=target_series,
threshold=threshold,
drift=drift,
ending=True,
show=False,
)
# Thus some variable names are left unchanged and unreadable...
gp, gn = np.zeros(target_series.size), np.zeros(target_series.size)
for i in range(1, target_series.size):
s = target_series[i] - target_series[i - 1]
gp[i] = gp[i - 1] + s - drift # cumulative sum for + change
gn[i] = gn[i - 1] - s - drift # cumulative sum for - change
if gp[i] < 0:
gp[i], tap = 0, i
if gn[i] < 0:
gn[i], tan = 0, i
if gp[i] > threshold or gn[i] > threshold: # change detected!
ta = np.append(ta, i) # alarm index
tai = np.append(tai, tap if gp[i] > threshold else tan) # start
gp[i], gn[i] = 0, 0 # reset alarm
if external_axes is None:
_, axes = plt.subplots(
2,
1,
sharex=True,
figsize=plot_autoscale(),
dpi=PLOT_DPI,
)
(ax1, ax2) = axes
else:
if len(external_axes) != 2:
logger.error("Expected list of two axis items.")
console.print("[red]Expected list of 2 axis items./n[/red]")
return
(ax1, ax2) = external_axes
target_series_indexes = range(df[target].size)
ax1.plot(target_series_indexes, target_series)
if len(ta):
ax1.plot(
tai,
target_series[tai],
">",
markerfacecolor=theme.up_color,
markersize=5,
label="Start",
)
ax1.plot(
taf,
target_series[taf],
"<",
markerfacecolor=theme.down_color,
markersize=5,
label="Ending",
)
ax1.plot(
ta,
target_series[ta],
"o",
markerfacecolor=theme.get_colors()[-1],
markeredgecolor=theme.get_colors()[-2],
markeredgewidth=1,
markersize=3,
label="Alarm",
)
ax1.legend()
ax1.set_xlim(-0.01 * target_series.size, target_series.size * 1.01 - 1)
ax1.set_ylabel("Amplitude")
ymin, ymax = (
target_series[np.isfinite(target_series)].min(),
target_series[np.isfinite(target_series)].max(),
)
y_range = ymax - ymin if ymax > ymin else 1
ax1.set_ylim(ymin - 0.1 * y_range, ymax + 0.1 * y_range)
ax1.set_title(
"Time series and detected changes "
+ f"(threshold= {threshold:.3g}, drift= {drift:.3g}): N changes = {len(tai)}",
fontsize=10,
)
theme.style_primary_axis(ax1)
ax2.plot(target_series_indexes, gp, label="+")
ax2.plot(target_series_indexes, gn, label="-")
ax2.set_xlim(-0.01 * target_series.size, target_series.size * 1.01 - 1)
ax2.set_xlabel("Data points")
ax2.set_ylim(-0.01 * threshold, 1.1 * threshold)
ax2.axhline(threshold)
theme.style_primary_axis(ax2)
ax2.set_title(
"Time series of the cumulative sums of positive and negative changes",
fontsize=10,
)
ax2.legend()
if external_axes is None:
theme.visualize_output() | 5,331,638 |
def _escape_value(value):
"""Escape a value."""
value = value.replace(b"\\", b"\\\\")
value = value.replace(b"\n", b"\\n")
value = value.replace(b"\t", b"\\t")
value = value.replace(b'"', b'\\"')
return value | 5,331,639 |
def find(*objects: Iterable[object]):
"""Sometimes you know the inputs and outputs for a procedure, but you don't remember the name.
methodfinder.find tries to find the name.
>>> import methodfinder
>>> import itertools
>>> methodfinder.find([1,2,3]) == 6
sum([1, 2, 3])
>>> methodfinder.find('1 + 1') == 2
eval('1 + 1')
>>> methodfinder.find(0.0) == 1.0
math.cos(0.0)
math.cosh(0.0)
math.erfc(0.0)
math.exp(0.0)
>>> methodfinder.find(0) == 1
0.denominator
math.factorial(0)
>>> import numpy as np
>>> methodfinder.find(np, 3) == np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
numpy.eye(3)
numpy.identity(3)
"""
# Just call the wrapper function so that the == sign can be used to specify
# the desired result
return _Foo(objects) | 5,331,640 |
def join_returns(cfg, arg_names, function_ast=None):
"""Joins multiple returns in a CFG into a single block
Given a CFG with multiple return statements, this function will replace the
returns by gotos to a common join block.
"""
join_args = [ir.Argument(function_ast, info=n, name=n) for n in arg_names]
join = ir.Block(function_ast, join_args, info="MERGE RETURNS")
returns = list(of_type[ir.Return](cfg.graph.nodes))
if returns:
cfg += CfgSimple.statement(join)
# Replace returns with gotos to joining block
for ret in returns:
assert len(ret.returns) == len(arg_names), (ret.returns, arg_names)
goto = ir.Goto(ret.ast_node, join, ret.returns)
cfg = cfg.replace(ret, goto)
cfg = cfg + (goto, join)
return cfg, join_args | 5,331,641 |
def _cmor_reformat(config, obs_list):
"""Run the cmorization routine."""
logger.info("Running the CMORization scripts.")
# master directory
raw_obs = config["rootpath"]["RAWOBS"][0]
# set the reformat scripts dir
reformat_scripts = os.path.dirname(os.path.abspath(__file__))
logger.info("Using cmorizer scripts repository: %s", reformat_scripts)
# datasets dictionary of Tier keys
datasets = _assemble_datasets(raw_obs, obs_list)
if not datasets:
logger.warning("Check input: could not find required %s in %s",
obs_list, raw_obs)
logger.info("Processing datasets %s", datasets)
# loop through tier/datasets to be cmorized
failed_datasets = []
for tier in datasets:
for dataset in datasets[tier]:
reformat_script_root = os.path.join(
reformat_scripts,
'cmorize_obs_' + dataset.lower().replace('-', '_'),
)
# in-data dir; build out-dir tree
in_data_dir = os.path.join(raw_obs, tier, dataset)
logger.info("Input data from: %s", in_data_dir)
out_data_dir = os.path.join(config['output_dir'], tier, dataset)
logger.info("Output will be written to: %s", out_data_dir)
if not os.path.isdir(out_data_dir):
os.makedirs(out_data_dir)
# all operations are done in the working dir now
os.chdir(out_data_dir)
# figure out what language the script is in
logger.info("Reformat script: %s", reformat_script_root)
if os.path.isfile(reformat_script_root + '.ncl'):
reformat_script = reformat_script_root + '.ncl'
_run_ncl_script(
in_data_dir,
out_data_dir,
config['run_dir'],
dataset,
reformat_script,
config['log_level'],
)
elif os.path.isfile(reformat_script_root + '.py'):
_run_pyt_script(in_data_dir, out_data_dir, dataset, config)
else:
logger.error('Could not find cmorizer for %s', dataset)
failed_datasets.append(dataset)
raise Exception('Could not find cmorizers for %s datasets ' %
' '.join(failed_datasets)) | 5,331,642 |
def write_block_summary_report(course_data):
"""
Generate a CSV file containing a summary of the xBlock usage
Arguments:
course_data (list of dicts): a list of course_data objects
Returns:
Nothing
"""
(block_summary_counts, unique_course_counts) = _get_block_summary_totals(course_data)
# Open and start writing the data into the CSV
with open('xblock_summary_counts.csv', 'wb') as csvfile:
summary_writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
summary_writer.writerow(['XBLOCK_NAME', 'UNIQUE_COURSES', 'NUM_TOTAL_INSTANCES'])
for block_type in sorted(block_summary_counts):
block_count = block_summary_counts.get(block_type)
summary_writer.writerow([block_type, str(unique_course_counts[block_type]), str(block_count)])
csvfile.close() | 5,331,643 |
def save_json(object: Any, path: Union[str, Path]) -> None:
"""Save .json file to given path"""
import json
path = Path(path).resolve()
path.parent.mkdir(parents=True, exist_ok=True)
with open(str(path), 'w') as f:
json.dump(object, f) | 5,331,644 |
def test_command_missing_args(run_line):
"""
Runs get-identities without values, confirms exit_code 2
"""
result = run_line("globus get-identities", assert_exit_code=2)
assert "Missing argument" in result.stderr | 5,331,645 |
def get_all():
"""
Returns list of all tweets from this server.
"""
return jsonify([t.to_dict() for t in tweet.get_all()]) | 5,331,646 |
def proportion_sig_beg_to_start_of_ground(ds):
"""
The total energy from signal beginning to the start of the ground peak,
normalized by total energy of the waveform. Ground peak assumed to be the last peak.
"""
from carbonplan_trace.v1.glas_preprocess import select_valid_area # avoid circular import
ds = get_dist_metric_value(ds, metric='start_of_ground_peak_dist')
# the processed wf is from sig beg to sig end, select sig beg to ground peak
sig_beg_to_ground = select_valid_area(
bins=ds.rec_wf_sample_dist,
wf=ds.processed_wf,
signal_begin_dist=ds.sig_begin_dist,
signal_end_dist=ds.start_of_ground_peak_dist,
)
# make sure dimensions matches up
dims = ds.processed_wf.dims
sig_beg_to_ground = sig_beg_to_ground.transpose(dims[0], dims[1])
# total energy of the smoothed waveform
total = ds.processed_wf.sum(dim="rec_bin")
return sig_beg_to_ground.sum(dim="rec_bin") / total | 5,331,647 |
def post_file(url, file_path, username, password):
"""Post an image file to the classifier."""
kwargs = {}
if username:
kwargs['auth'] = requests.auth.HTTPBasicAuth(username, password)
file = {'file': open(file_path, 'rb')}
response = requests.post(
url,
files=file,
**kwargs
)
if response.status_code == HTTP_OK:
return response
return None | 5,331,648 |
def bias_init_with_prob(prior_prob):
""" initialize conv/fc bias value according to giving probablity"""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init | 5,331,649 |
def parse_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple C{(val, end_position)}
containing the value of the number and the position where it ends.
Otherwise, raise a L{ParseError}.
"""
m = _PARSE_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ParseError('number', start_position)
if m.group(2): return float(m.group()), m.end()
else: return int(m.group()), m.end() | 5,331,650 |
def training_data_provider(train_s, train_t):
"""
Concatenates two lists containing adata files
# Parameters
train_s: `~anndata.AnnData`
Annotated data matrix.
train_t: `~anndata.AnnData`
Annotated data matrix.
# Returns
Concatenated Annotated data matrix.
# Example
```python
import scgen
import anndata
train_data = anndata.read("./data/train_kang.h5ad")
test_data = anndata.read("./data/test.h5ad")
whole_data = training_data_provider(train_data, test_data)
```
"""
train_s_X = []
train_s_diet = []
train_s_groups = []
for i in train_s:
train_s_X.append(i.X.A)
train_s_diet.append(i.obs["condition"].tolist())
train_s_groups.append(i.obs["cell_type"].tolist())
train_s_X = np.concatenate(train_s_X)
temp = []
for i in train_s_diet:
temp = temp + i
train_s_diet = temp
temp = []
for i in train_s_groups:
temp = temp + i
train_s_groups = temp
train_t_X = []
train_t_diet = []
train_t_groups = []
for i in train_t:
train_t_X.append(i.X.A)
train_t_diet.append(i.obs["condition"].tolist())
train_t_groups.append(i.obs["cell_type"].tolist())
temp = []
for i in train_t_diet:
temp = temp + i
train_t_diet = temp
temp = []
for i in train_t_groups:
temp = temp + i
train_t_groups = temp
train_t_X = np.concatenate(train_t_X)
train_real = np.concatenate([train_s_X, train_t_X]) # concat all
train_real = anndata.AnnData(train_real)
train_real.obs["condition"] = train_s_diet + train_t_diet
train_real.obs["cell_type"] = train_s_groups + train_t_groups
return train_real | 5,331,651 |
def _build_trees(base_estimator, estimator_params, params, X, y, sample_weight,
tree_state, n_trees, verbose=0, class_weight=None,
bootstrap=False):
""" Fit a single tree in parallel """
tree = _make_estimator(
_get_value(base_estimator), estimator_params,
params=params, random_state=tree_state
)
if bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree | 5,331,652 |
def _add_to_arguments(arg_dict, argument_name, argument_value):
"""Add a variable to the argument dict that will be requested to the event generation API.
Args:
arg_dict: dictionary with the arguments.
argument_name: name of the variable in the API (key in the dict).
argument_value: value of the variable in the API (value in the dict).
"""
if argument_value is not None:
arg_dict[argument_name] = str(argument_value) | 5,331,653 |
def select_privilege():
"""Provide a select Privilege model for testing."""
priv = Privilege(
database_object=DatabaseObject(name="one_table", type=DatabaseObjectType.TABLE),
action=Action.SELECT,
)
return priv | 5,331,654 |
def plot_gaia_sources_on_survey(
tpf,
target_gaiaid,
gaia_sources=None,
fov_rad=None,
depth=0.0,
kmax=1.0,
sap_mask="pipeline",
survey="DSS2 Red",
verbose=True,
ax=None,
outline_color="C6", # pink
figsize=None,
pix_scale=TESS_pix_scale,
**mask_kwargs,
):
"""Plot (superpose) Gaia sources on archival image
Parameters
----------
target_coord : astropy.coordinates
target coordinate
gaia_sources : pd.DataFrame
gaia sources table
fov_rad : astropy.unit
FOV radius
survey : str
image survey; see from astroquery.skyview import SkyView;
SkyView.list_surveys()
verbose : bool
print texts
ax : axis
subplot axis
outline_color : str
aperture outline color (default=C6)
kwargs : dict
keyword arguments for aper_radius, percentile
Returns
-------
ax : axis
subplot axis
TODO: correct for proper motion difference between
survey image and gaia DR2 positions
"""
assert target_gaiaid is not None
ny, nx = tpf.flux.shape[1:]
if fov_rad is None:
diag = np.sqrt(nx ** 2 + ny ** 2)
fov_rad = (0.4 * diag * pix_scale).to(u.arcmin)
target_coord = SkyCoord(ra=tpf.ra * u.deg, dec=tpf.dec * u.deg)
if gaia_sources is None:
print(
"Querying Gaia sometimes hangs. Provide `gaia_sources` if you can."
)
gaia_sources = Catalogs.query_region(
target_coord, radius=fov_rad, catalog="Gaia", version=2
).to_pandas()
assert len(gaia_sources) > 1, "gaia_sources contains single entry"
# make aperture mask
mask = parse_aperture_mask(tpf, sap_mask=sap_mask, **mask_kwargs)
maskhdr = tpf.hdu[2].header
# make aperture mask outline
contour = np.zeros((ny, nx))
contour[np.where(mask)] = 1
contour = np.lib.pad(contour, 1, PadWithZeros)
highres = zoom(contour, 100, order=0, mode="nearest")
extent = np.array([-1, nx, -1, ny])
if verbose:
print(
f"Querying {survey} ({fov_rad:.2f} x {fov_rad:.2f}) archival image"
)
# -----------create figure---------------#
if ax is None:
# get img hdu for subplot projection
try:
hdu = SkyView.get_images(
position=target_coord.icrs.to_string(),
coordinates="icrs",
survey=survey,
radius=fov_rad,
grid=False,
)[0][0]
except Exception:
errmsg = "survey image not available"
raise FileNotFoundError(errmsg)
fig = pl.figure(figsize=figsize)
# define scaling in projection
ax = fig.add_subplot(111, projection=WCS(hdu.header))
# plot survey img
if str(target_coord.distance) == "nan":
target_coord = SkyCoord(ra=target_coord.ra, dec=target_coord.dec)
nax, hdu = plot_finder_image(
target_coord, ax=ax, fov_radius=fov_rad, survey=survey, reticle=False
)
imgwcs = WCS(hdu.header)
mx, my = hdu.data.shape
# plot mask
_ = ax.contour(
highres,
levels=[0.5],
extent=extent,
origin="lower",
linewidths=[3],
colors=outline_color,
transform=ax.get_transform(WCS(maskhdr)),
)
idx = gaia_sources["source_id"].astype(int).isin([target_gaiaid])
target_gmag = gaia_sources.loc[idx, "phot_g_mean_mag"].values[0]
for index, row in gaia_sources.iterrows():
marker, s = "o", 100
r, d, mag, id = row[["ra", "dec", "phot_g_mean_mag", "source_id"]]
pix = imgwcs.all_world2pix(np.c_[r, d], 1)[0]
if int(id) != int(target_gaiaid):
gamma = 1 + 10 ** (0.4 * (mag - target_gmag))
if depth > kmax / gamma:
# too deep to have originated from secondary star
edgecolor = "C1"
alpha = 1 # 0.5
else:
# possible NEBs
edgecolor = "C3"
alpha = 1
else:
s = 200
edgecolor = "C2"
marker = "s"
alpha = 1
nax.scatter(
pix[0],
pix[1],
marker=marker,
s=s,
edgecolor=edgecolor,
alpha=alpha,
facecolor="none",
)
# orient such that north is up; left is east
ax.invert_yaxis()
if hasattr(ax, "coords"):
ax.coords[0].set_major_formatter("dd:mm")
ax.coords[1].set_major_formatter("dd:mm")
# set img limits
pl.setp(
nax,
xlim=(0, mx),
ylim=(0, my),
title="{0} ({1:.2f}' x {1:.2f}')".format(survey, fov_rad.value),
)
return ax | 5,331,655 |
def getLinkToSong(res):
"""
getLinkToSong(res): link to all songs
:param: res: information about the playlist -> getResponse(pl_id)
:returns: list of links to each song
"""
return res['items'][0]['track']['external_urls']['spotify'] | 5,331,656 |
def entropy_sampling(classifier, X, n_instances=1):
"""Entropy sampling query strategy, uses entropy of all probabilities as score.
This strategy selects the samples with the highest entropy in their prediction
probabilities.
Args:
classifier: The classifier for which the labels are to be queried.
X: The pool of samples to query from.
n_instances: Number of samples to be queried.
Returns:
The indices of the instances from X chosen to be labelled;
the instances from X chosen to be labelled.
"""
classwise_uncertainty = _get_probability_classes(classifier, X)
entropies = np.transpose(entropy(np.transpose(classwise_uncertainty)))
index = np.flip(np.argsort(entropies))[:n_instances]
return index, entropies[index] | 5,331,657 |
def browser(browserWsgiAppS):
"""Fixture for testing with zope.testbrowser."""
assert icemac.addressbook.testing.CURRENT_CONNECTION is not None, \
"The `browser` fixture needs a database fixture like `address_book`."
return icemac.ab.calexport.testing.Browser(wsgi_app=browserWsgiAppS) | 5,331,658 |
def parse_playing_now_message(playback):
"""parse_playing_now_message
:param playback: object
:returns str
"""
track = playback.get("item", {}).get("name", False)
artist = playback.get("item", {}).get("artists", [])
artist = map(lambda a: a.get("name", ""), artist)
artist = ", ".join(list(artist))
message = "Playing '%s' from '%s' now!" % (track, artist)
if not track:
message = "Could not get current track!"
return message | 5,331,659 |
def test_in_execution(test_plan_uuid):
"""
Executor->Curator
Test in execution: executor responses with the Test ID that can be used in a future test cancellation
{ "test-id": <test_id> }(?)
:param test_plan_uuid:
:return:
"""
# app.logger.debug(f'Callback received {request.path}, contains {request.get_data()}, '
# f'Content-type: {request.headers["Content-type"]}')
_LOG.debug(f'Callback received {request.path}, contains {request.get_data()}, '
f'Content-type: {request.headers["Content-type"]}')
try:
executor_payload = request.get_json()
context['test_preparations'][test_plan_uuid]['updated_at'] = datetime.utcnow().replace(microsecond=0)
test_index = next(
(index for (index, d) in
enumerate(context['test_preparations'][test_plan_uuid]['augmented_descriptors'])
if d['test_uuid'] == executor_payload['test_uuid']), None)
(context['test_preparations'][test_plan_uuid]['augmented_descriptors']
[test_index]['test_status']) = executor_payload['status'] if 'status' in executor_payload.keys() \
else 'RUNNING'
return make_response('{}', OK, {'Content-Type': 'application/json'})
except Exception as e:
return make_response(json.dumps({'exception': e.args}), INTERNAL_ERROR, {'Content-Type': 'application/json'}) | 5,331,660 |
def reset_state(environ):
"""Reset module and class level runtime state.
To make sure that each test has the same starting conditions, we reset
module or class level datastructures that maintain runtime state.
This resets:
- ``model.Property._FIND_METHODS_CACHE``
- ``model.Model._kind_map``
"""
yield
model.Property._FIND_METHODS_CACHE.clear()
model.Model._kind_map.clear()
del _runstate.states.stack[:] | 5,331,661 |
def load_remote_image(image_url):
"""Loads a remotely stored image into memory as an OpenCV/Numpy array
Args:
image_url (str): the URL of the image
Returns:
numpy ndarray: the image in OpenCV format (a [rows, cols, 3] BGR numpy
array)
"""
response = requests.get(image_url, stream=True)
img = Image.open(BytesIO(response.content))
image = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
return image | 5,331,662 |
def test_adjacent_vectors_not_found(vector_field, flawed_adjacency):
"""
Adjacency is wrong.
"""
with pytest.raises(KeyError):
for key in vector_field.keys():
adjacent_vectors(vector_field, flawed_adjacency[key]) | 5,331,663 |
def checkNotice(bot):
"""
주기적으로 Notice를 읽어 최신 정보가 있으면, 사용자들에게 전송한다.
:return:
"""
global g_notice_list
global g_chat_id_db
updateNoticeList()
# dict_chat_id = updateListenerList(bot)
l = g_chat_id_db.getAllChatIdDb()
for n_item in g_notice_list:
tmp_msg_1 = makeNoticeSummary(g_notice_list.index(n_item), n_item)
# logger.info(tmp_msg_1)
for t_chat_id in l.keys():
temp_date_str = l[t_chat_id]
if n_item['published'] > temp_date_str:
logger.info("sendMessage to %d (%s : %s)" % (t_chat_id, n_item['published'], n_item['title']))
sendBotMessage(bot, t_chat_id, tmp_msg_1)
g_chat_id_db.updateChatId(t_chat_id, n_item['published']) | 5,331,664 |
def evlt(inp : str) -> int:
""" Evaluates the passed string and returns the value if
successful, otherwise raises an error """
operand = [] # stack for operands
operator = [] # stack for operators + parentheses
i = 0 # loop variable, cannot do range because have to increment dynamically
if inp.count('(') != inp.count(')'):
raise TooManyBracketsException()
while i < len(inp): # while not EOF
if inp[i].isdigit(): # if character is a digit
num = ""
while i < len(inp) and inp[i].isdigit(): # Logic to fetch an entire number,
num += inp[i]
i += 1
if int(num) >= 2**31 - 1:
raise OverflowError()
operand.append(int(num)) # push operand to stack
elif inp[i] == '(': # if opening brace, push to stack
operator.append(inp[i])
i += 1
elif inp[i] in operators:
try: # if operator, pop all operators having a higher precedence
while len(operator) and precedence(operator[-1]) >= precedence(inp[i]):
b = operand.pop()
a = operand.pop()
op = operator.pop()
operand.append(evlexp(a, b, op)) # evaluate them with the last 2 values in operand stack and append to itself
operator.append(inp[i]) # append operator to operator stack)
i += 1
except:
raise TooManyOperatorsException
elif inp[i] == ')': # if closing brace, evaluate all operators in between
while len(inp) != 0 and operator[-1] != '(': # while not EOF and the last(recent) item is not opening bracket
b = operand.pop()
a = operand.pop()
op = operator.pop()
operand.append(evlexp(a, b, op)) # pop the operator in order and evaluate and push to operand stack
operator.pop() # pop (
i += 1
else:
i += 1
continue
while len(operator) != 0: # while operator is not empty
op = operator.pop()
b = operand.pop()
a = operand.pop()
operand.append(evlexp(a, b, op)) # pop and evaluate operators till its empty and append to operand
# if there are no more elements in top of stack, and only one (possibly the answer)
if len(operand) == 1:
return operand[-1]
# if there's more than one element and no more operators, something wrong!
else:
raise TooManyOperandsException() | 5,331,665 |
def test_push_doesnt_happen_when_fetched_repo_has_zero_commits(instance, monkeypatch):
"""Test if push is not called when repository is correctly fetched and contains zero commits"""
def raise_(*args, **kwargs):
raise Exception()
fake_remote = flexmock(push=raise_)
monkeypatch.setattr(instance.git_cmd, 'create_remote', fake_remote)
monkeypatch.setattr(instance.git_cmd.git, 'rev_list', lambda x, y: 0)
instance.run()
assert not instance.running | 5,331,666 |
def is_dir(dirname):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname | 5,331,667 |
def _create_preactivation_hook(activations):
"""
when we add this hook to a model's layer, it is called whenever
it is about to make the forward pass
"""
def _linear_preactivation_hook(module, inputs):
activations.append(inputs[0].cpu())
return _linear_preactivation_hook | 5,331,668 |
def import_by_path(path):
"""
Given a dotted/colon path, like project.module:ClassName.callable,
returns the object at the end of the path.
"""
module_path, object_path = path.split(":", 1)
target = importlib.import_module(module_path)
for bit in object_path.split("."):
target = getattr(target, bit)
return target | 5,331,669 |
def ordered_links(d, k0, k1):
"""
find ordered links starting from the link (k0, k1)
Parameters
==========
d : dict for the graph
k0, k1: adjacents nodes of the graphs
Examples
========
>>> from active_nodes import ordered_links
>>> d = {0:[1,4], 1:[0,2], 2:[1,3], 3:[2,4], 4:[0,3]}
>>> ordered_links(d, 0, 1)
[(0, 1), (0, 4), (1, 2), (2, 3), (3, 4)]
"""
assert k0 in d
assert k1 in d[k0]
dx = defaultdict(list)
links = []
_append_link(dx, links, k0, k1)
r = _add_links1(links, d, dx)
while 1:
active = [k for k in dx if 0 < len(dx[k]) < len(d[k])]
if not active:
break
a1 = _short_path_active_nodes_all(d, dx, active)
if a1 is None:
break
a2 = _add_paths(d, dx, links, a1)
return links | 5,331,670 |
def _exceeded_threshold(number_of_retries: int, maximum_retries: int) -> bool:
"""Return True if the number of retries has been exceeded.
Args:
number_of_retries: The number of retry attempts made already.
maximum_retries: The maximum number of retry attempts to make.
Returns:
True if the maximum number of retry attempts have already been
made.
"""
if maximum_retries is None:
# Retry forever.
return False
return number_of_retries >= maximum_retries | 5,331,671 |
def main(argv):
"""Go main."""
date = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
os.chdir("/i/0/cli")
for zone in glob.glob("*"):
os.chdir(zone)
for fn in glob.glob("*.cli"):
do(fn, date)
os.chdir("..") | 5,331,672 |
def _get_and_check_response(method, host, url, body=None, headers=None, files=None, data=None, timeout=30):
"""Wait for the HTTPS response and throw an exception if the return
status is not OK. Return either a dict based on the
HTTP response in JSON, or if the response is not in JSON format,
return a tuple containing the data in the body and the content type.
"""
url = 'https://' + host + url
# print(url)
if files:
res = https_session.post(url, files=files, data=data, timeout=timeout)
elif method == 'POST':
res = https_session.post(url, body, headers, timeout=timeout)
else:
res = https_session.get(url, timeout=timeout)
res.raise_for_status()
content_type = res.headers.get(CONTENT_TYPE, None)
content = res.text
if content_type and content_type.startswith(CONTENT_TYPE_JSON):
# Quickfix to remove second key in bad API response
key = '"FaxContainerFile":'
if content.count(key) == 2:
content = content[:content.rfind(key)].rstrip(',') + "}"
return json.loads(content)
else:
return (content, content_type) | 5,331,673 |
def moveb_m_human(agents, self_state, self_name, c, goal):
"""
This method implements the following block-stacking algorithm:
If there's a block that can be moved to its final position, then
do so and call move_blocks recursively. Otherwise, if there's a
block that needs to be moved and can be moved to the table, then
do so and call move_blocks recursively. Otherwise, no blocks need
to be moved.
"""
if self_name in self_state.isReachableBy[c] and c in goal.isOnStack and goal.isOnStack[c] and not self_state.isOnStack[c]:
return [("human_pick", c), ("human_stack",)]
return [] | 5,331,674 |
def test_bootstrap_transformers_panel_format(
transformer_class, return_actual, expected_index
):
"""Tests that the final panel has the right index."""
transformer = transformer_class(n_series=2, return_actual=return_actual)
y_hat = transformer.fit_transform(y)
assert expected_index.equals(y_hat.index) and (y_hat.columns[0] == y.name) | 5,331,675 |
def send_credential_without_confirmed_password(page_users, new_user) -> None:
"""I send the credential without the confirmed password."""
page_users.set_user(new_user)
# Fill the fields
p_action = FillUserAction(_page=page_users)
p_action.fill_name() \
.fill_password()
del p_action
# Create
p_action = CreateUserAction(_page=page_users)
p_action.click() | 5,331,676 |
def create_initialized_headless_egl_display():
"""Creates an initialized EGL display directly on a device."""
devices = EGL.eglQueryDevicesEXT()
if os.environ.get("EGL_DEVICE_ID", None) is not None:
devices = [devices[int(os.environ["EGL_DEVICE_ID"])]]
for device in devices:
display = EGL.eglGetPlatformDisplayEXT(
EGL.EGL_PLATFORM_DEVICE_EXT, device, None)
if display != EGL.EGL_NO_DISPLAY and EGL.eglGetError() == EGL.EGL_SUCCESS:
# `eglInitialize` may or may not raise an exception on failure depending
# on how PyOpenGL is configured. We therefore catch a `GLError` and also
# manually check the output of `eglGetError()` here.
try:
initialized = EGL.eglInitialize(display, None, None)
except error.GLError:
pass
else:
if initialized == EGL.EGL_TRUE and EGL.eglGetError() == EGL.EGL_SUCCESS:
return display
return EGL.EGL_NO_DISPLAY | 5,331,677 |
def BooleanVar(default, callback=None):
"""
Return a new (initialized) `tkinter.BooleanVar`.
@param default the variable initial value
@param callback function to invoke whenever the variable changes its value
@return the created variable
"""
return _var(tkinter.BooleanVar, default, callback) | 5,331,678 |
def parse_header(source):
"""Copied from textgrid.parse_header"""
header = source.readline() # header junk
m = re.match('File type = "([\w ]+)"', header)
if m is None or not m.groups()[0].startswith('ooTextFile'):
raise ValueError('The file could not be parsed as a Praat text file as '
'it is lacking a proper header.')
short = 'short' in m.groups()[0]
file_type = parse_line(source.readline(), short, '') # header junk
t = source.readline() # header junk
return file_type, short | 5,331,679 |
async def setup_private_registry_async(
loop: asyncio.BaseEventLoop,
table_client: azure.storage.table.TableService,
ipaddress: str, container: str, registry_archive: str,
registry_image_id: str) -> None:
"""Set up a docker private registry if a ticket exists
:param asyncio.BaseEventLoop loop: event loop
:param azure.storage.table.TableService table_client: table client
:param str ipaddress: ip address
:param str container: container holding registry
:param str registry_archive: registry archive file
:param str registry_image_id: registry image id
"""
# first check if we've registered before
try:
entity = table_client.get_entity(
_STORAGE_CONTAINERS['table_registry'], _PARTITION_KEY, _NODEID)
exists = True
print('private registry row already exists: {}'.format(entity))
except azure.common.AzureMissingResourceHttpError:
exists = False
# install/start docker registy container
await _start_private_registry_instance_async(
loop, container, registry_archive, registry_image_id)
# register self into registry table
if not exists:
entity = {
'PartitionKey': _PARTITION_KEY,
'RowKey': _NODEID,
'IpAddress': ipaddress,
'Port': _DEFAULT_PRIVATE_REGISTRY_PORT,
'StorageAccount': _SHIPYARD_STORAGEACCOUNT,
'Container': container,
}
table_client.insert_or_replace_entity(
_STORAGE_CONTAINERS['table_registry'], entity=entity) | 5,331,680 |
def create_amsterdam(*args):
"""
Creates a new droplet with sensible defaults
Usage:
[name]
Arguments:
name: (optional) name to give the droplet; if missing, current timestamp
"""
name = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%S.%f")
try:
name = args[0]
except:
pass
return create_small_droplet(name, 'ams2', 'ubuntu-17-04-x64') | 5,331,681 |
def convert_inp(float_inp):
"""
Convert inp from decimal value (0.000, 0.333, 0.667, etc) to (0.0, 0.1, 0.2) for cleaner display.
:param float float_inp: inning pitching float value
:return:
"""
# Split inp into integer and decimal parts
i_inp, d_inp = divmod(float_inp, 1)
d_inp = d_inp*10
# Look at first digit of decimal part
# NOTE: repr(3)[0] = 2 and repr(6) = 5, not sure why?
if int(repr(d_inp)[0]) == 0:
disp_inp = i_inp + 0.0
elif int(repr(d_inp)[0]) == 3 or int(repr(d_inp)[0]) == 2:
disp_inp = i_inp + 0.1
elif int(repr(d_inp)[0]) == 6 or int(repr(d_inp)[0]) == 7 or int(repr(d_inp)[0]) == 5:
disp_inp = i_inp + 0.2
else:
print "{0} innings is not a standard amount!".format(float_inp)
return None
return disp_inp | 5,331,682 |
def plot(pulse: PulseTemplate,
parameters: Dict[str, Parameter]=None,
sample_rate: Real=10,
axes: Any=None,
show: bool=True,
plot_channels: Optional[Set[ChannelID]]=None,
plot_measurements: Optional[Set[str]]=None,
stepped: bool=True,
maximum_points: int=10**6,
time_slice: Tuple[Real, Real]=None,
**kwargs) -> Any: # pragma: no cover
"""Plots a pulse using matplotlib.
The given pulse template will first be turned into a pulse program (represented by a Loop object) with the provided
parameters. The render() function is then invoked to obtain voltage samples over the entire duration of the pulse which
are then plotted in a matplotlib figure.
Args:
pulse: The pulse to be plotted.
parameters: An optional mapping of parameter names to Parameter
objects.
sample_rate: The rate with which the waveforms are sampled for the plot in
samples per time unit. (default = 10)
axes: matplotlib Axes object the pulse will be drawn into if provided
show: If true, the figure will be shown
plot_channels: If specified only channels from this set will be plotted. If omitted all channels will be.
stepped: If true pyplot.step is used for plotting
plot_measurements: If specified measurements in this set will be plotted. If omitted no measurements will be.
maximum_points: If the sampled waveform is bigger, it is not plotted
time_slice: The time slice to be plotted. If None, the entire pulse will be shown.
kwargs: Forwarded to pyplot. Overwrites other settings.
Returns:
matplotlib.pyplot.Figure instance in which the pulse is rendered
Raises:
PlottingNotPossibleException if the sequencing is interrupted before it finishes, e.g.,
because a parameter value could not be evaluated
all Exceptions possibly raised during sequencing
"""
from matplotlib import pyplot as plt
channels = pulse.defined_channels
if parameters is None:
parameters = dict()
program = pulse.create_program(parameters=parameters,
channel_mapping={ch: ch for ch in channels},
measurement_mapping={w: w for w in pulse.measurement_names})
if program is not None:
times, voltages, measurements = render(program,
sample_rate,
render_measurements=bool(plot_measurements),
time_slice=time_slice)
else:
times, voltages, measurements = np.array([]), dict(), []
duration = 0
if times.size == 0:
warnings.warn("Pulse to be plotted is empty!")
elif times.size > maximum_points:
# todo [2018-05-30]: since it results in an empty return value this should arguably be an exception, not just a warning
warnings.warn("Sampled pulse of size {wf_len} is lager than {max_points}".format(wf_len=times.size,
max_points=maximum_points))
return None
else:
duration = times[-1]
if time_slice is None:
time_slice = (0, duration)
legend_handles = []
if axes is None:
# plot to figure
figure = plt.figure()
axes = figure.add_subplot(111)
if plot_channels is not None:
voltages = {ch: voltage
for ch, voltage in voltages.items()
if ch in plot_channels}
for ch_name, voltage in voltages.items():
label = 'channel {}'.format(ch_name)
if stepped:
line, = axes.step(times, voltage, **{**dict(where='post', label=label), **kwargs})
else:
line, = axes.plot(times, voltage, **{**dict(label=label), **kwargs})
legend_handles.append(line)
if plot_measurements:
measurement_dict = dict()
for name, begin, length in measurements:
if name in plot_measurements:
measurement_dict.setdefault(name, []).append((begin, begin+length))
color_map = plt.cm.get_cmap('plasma')
meas_colors = {name: color_map(i/len(measurement_dict))
for i, name in enumerate(measurement_dict.keys())}
for name, begin_end_list in measurement_dict.items():
for begin, end in begin_end_list:
poly = axes.axvspan(begin, end, alpha=0.2, label=name, edgecolor='black', facecolor=meas_colors[name])
legend_handles.append(poly)
axes.legend(handles=legend_handles)
max_voltage = max((max(channel, default=0) for channel in voltages.values()), default=0)
min_voltage = min((min(channel, default=0) for channel in voltages.values()), default=0)
# add some margins in the presentation
axes.set_xlim(-0.5+time_slice[0], time_slice[1] + 0.5)
voltage_difference = max_voltage-min_voltage
if voltage_difference>0:
axes.set_ylim(min_voltage - 0.1*voltage_difference, max_voltage + 0.1*voltage_difference)
axes.set_xlabel('Time (ns)')
axes.set_ylabel('Voltage (a.u.)')
if pulse.identifier:
axes.set_title(pulse.identifier)
if show:
axes.get_figure().show()
return axes.get_figure() | 5,331,683 |
def is_ipv4(line):
"""检查是否是IPv4"""
if line.find("ipv4") < 6: return False
return True | 5,331,684 |
def format_ica_lat(ff_lat):
"""
conversão de uma latitude em graus para o formato GGMM.mmmH
@param ff_lat: latitude em graus
@return string no formato GGMM.mmmH
"""
# logger
# M_LOG.info(">> format_ica_lat")
# converte os graus para D/M/S
lf_deg, lf_min, lf_seg = deg2dms(ff_lat)
# converte para GGMM.mmm
lf_deg = (abs(lf_deg) * 100) + lf_min + (lf_seg / 60.)
# return latitude
# return "{}{:4.3f}".format('S' if ff_lat <= 0 else 'N', lf_deg)
return "{:4.3f}{}".format(lf_deg, 'S' if ff_lat <= 0 else 'N') | 5,331,685 |
def fixture_git_dir():
"""Create tmpdir and return its file name."""
tmpdir = tempfile.mkdtemp()
yield tmpdir
# Cleanup
try:
os.rmdir(tmpdir)
except FileNotFoundError:
pass | 5,331,686 |
def loadData (x_file="ass1_data/linearX.csv", y_file="ass1_data/linearY.csv"):
"""
Loads the X, Y matrices.
Splits into training, validation and test sets
"""
X = np.genfromtxt(x_file)
Y = np.genfromtxt(y_file)
Z = [X, Y]
Z = np.c_[X.reshape(len(X), -1), Y.reshape(len(Y), -1)]
np.random.shuffle(Z)
# Partition the data into three sets
size = len(Z)
training_size = int(0.8 * size)
validation_size = int(0.1 * size)
test_size = int(0.1 * size)
training_Z = Z[0:training_size]
validation_Z = Z[training_size:training_size+validation_size]
test_Z = Z[training_size+validation_size:]
return (Z[:,0], Z[:,1]) | 5,331,687 |
async def retrieve_users():
"""
Retrieve all users in collection
"""
users = []
async for user in user_collection.find():
users.append(user_parser(user))
return users | 5,331,688 |
def config():
"""
Commands for the configuration.
Default config file: ~/.padre.cfg
""" | 5,331,689 |
def get_file_hash(path):
"""파일 해쉬 구하기."""
hash = None
md5 = hashlib.md5()
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
hash = md5.hexdigest()
info("get_file_hash from {}: {}".format(path, hash))
return hash | 5,331,690 |
def apply_repro_analysis(dataset, thresholds=[3.0], method = 'crfx'):
"""
perform the reproducibility analysis according to the
"""
from nipy.labs.spatial_models.discrete_domain import \
grid_domain_from_binary_array
n_subj, dimx, dimy = dataset.shape
func = np.reshape(dataset,(n_subj, dimx * dimy)).T
var = np.ones((dimx * dimy, n_subj))
domain = grid_domain_from_binary_array(np.ones((dimx, dimy, 1)))
ngroups = 5
sigma = 2.0
csize = 10
niter = 5
verbose = 0
swap = False
kap, clt, pkd = [], [], []
for threshold in thresholds:
kappa, cls, pks = [], [], []
kwargs = {'threshold':threshold, 'csize':csize}
for i in range(niter):
k = voxel_reproducibility(func, var, domain, ngroups,
method, swap, verbose, **kwargs)
kappa.append(k)
cld = cluster_reproducibility(func, var, domain, ngroups, sigma,
method, swap, verbose, **kwargs)
cls.append(cld)
pk = peak_reproducibility(func, var, domain, ngroups, sigma,
method, swap, verbose, **kwargs)
pks.append(pk)
kap.append(np.array(kappa))
clt.append(np.array(cls))
pkd.append(np.array(pks))
kap = np.array(kap)
clt = np.array(clt)
pkd = np.array(pkd)
return kap, clt, pkd | 5,331,691 |
def api_root(request):
"""
Logging root
"""
rtn = dict(
message="Hello, {}. You're at the logs api index.".format(request.user.username),
)
return Response(rtn) | 5,331,692 |
def test_weekly__Weekly____call____3(DateTime, interval_start, interval_end):
"""It returns an empty iterable if the recurrence should start after ...
... `interval_end`.
"""
assert ([] == list(Weekly(
DateTime(2014, 5, 1, 21, 45))(interval_start, interval_end))) | 5,331,693 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df.dropna(inplace=True)
df.drop(['long', 'date', 'lat', 'id'], axis=1, inplace=True)
df = df.drop(df.index[df.bedrooms <= 0])
df = df.drop(df.index[df.sqft_living <= 0])
df = df.drop(df.index[df.floors <= 0])
df = df.drop(df.index[df.bathrooms < 0])
df = df.drop(df.index[df.price < 0])
# df = pd.get_dummies(df, columns=['zipcode'])
df['yr_built_or_renovated'] = df[['yr_built', 'yr_renovated']].max(axis=1)
df.drop(['yr_built', 'yr_renovated'], axis=1, inplace=True)
price = df.pop('price')
return df, price | 5,331,694 |
def parse_esim_inst(line):
"""Parse a single line of an e-sim trace.
Keep the original line for debugging purposes.
>>> i0 = parse_esim_inst('0x000000 b.l 0x0000000000000058 - pc <- 0x58 - nbit <- 0x0')
>>> ex0 = {'pc': 0, 'AN': False, 'instruction': 'b.l', 'line': '0x000000 b.l 0x0000000000000058 - pc <- 0x58 - nbit <- 0x0'}
>>> i0 == ex0
True
>>> i1 = parse_esim_inst('0x0000b0 --- _epiphany_star strd r2,[r0],+0x1 - memaddr <- 0x2f8, memory <- 0x0, memaddr <- 0x2fc, memory <- 0x0, registers <- 0x300')
>>> ex1 = {'instruction': 'strd', 'line': '0x0000b0 --- _epiphany_star strd r2,[r0],+0x1 - memaddr <- 0x2f8, memory <- 0x0, memaddr <- 0x2fc, memory <- 0x0, registers <- 0x300', 'mem': [(760, 0), (764, 0)], 'pc': 176, 'reg': [768]}
>>> i1 == ex1
True
"""
inst = dict()
tokens = line.split()
if not tokens:
return inst
inst['line'] = line
inst['pc'] = int(tokens[0], 16)
inst['instruction'] = tokens[3] if tokens[1] == '---' else tokens[1]
for index, tok in enumerate(tokens[1:]):
if tok == 'registers' or tok == 'core-registers': # Writing to a register.
value = int(tokens[1:][index + 2].split(',')[0], 16)
if 'reg' in inst:
inst['reg'].append(value)
else:
inst['reg'] = [value]
elif tok == 'memaddr': # Writing to memory.
addr = tokens[1:][index + 2].split(',')[0]
addr = int(addr, 16)
value = tokens[1:][index + 5].split(',')[0]
value = int(value, 16)
if 'mem' in inst:
inst['mem'].append((addr, value))
else:
inst['mem'] = [(addr, value)]
else: # Next tok might be a flag.
if tok in _e_flags.keys():
state = tokens[1:][index + 2].split(',')[0]
inst[_e_flags[tok]] = state == '0x1'
# Otherwise ignore and continue.
return inst | 5,331,695 |
def getcollength(a):
"""
Get the length of a matrix view object
"""
t=getType(a)
f={'mview_f':vsip_mgetcollength_f,
'mview_d':vsip_mgetcollength_d,
'mview_i':vsip_mgetcollength_i,
'mview_si':vsip_mgetcollength_si,
'mview_uc':vsip_mgetcollength_uc,
'cmview_f':vsip_cmgetcollength_f,
'cmview_d':vsip_cmgetcollength_d,
'mview_bl':vsip_mgetcollength_bl }
assert t[0] and t[1] in f,'Type <:%s:> not a supported type for for getcollength'%t[1]
return f[t[1]](a) | 5,331,696 |
def process_files( optD, fileL, fileG, **kwargs ):
"""
Apply -g and -d options to the 'fileL' list, in place. The order
of 'fileL' is retained, but each glob list is sorted by ascending file
date stamp. If 'fileG' is not None, it will be filled with the files
glob'ed using the -G option, if present.
The -d option applies to files of form "results.YYYY_MM_DD.*".
The -p option to form "results.YYYY_MM_DD.platform.*".
The -o option to form "results.YYYY_MM_DD.platform.options.*", where the
options are separated by a plus sign.
The -t option to form "results.YYYY_MM_DD.platform.options.tag".
If '-d' is not in 'optD' and 'default_d' is contained in 'kwargs', then
that value is used for the -d option.
"""
if '-g' in optD:
gL = []
for pat in optD['-g']:
L = [ (os.path.getmtime(f),f) for f in glob.glob( pat ) ]
L.sort()
gL.extend( [ f for t,f in L ] )
tmpL = gL + fileL
del fileL[:]
fileL.extend( tmpL )
fLL = [ fileL ]
if fileG != None and '-G' in optD:
for pat in optD['-G']:
L = [ (os.path.getmtime(f),f) for f in glob.glob( pat ) ]
L.sort()
fileG.extend( [ f for t,f in L ] )
fLL.append( fileG )
for fL in fLL:
dval = optD.get( '-d', kwargs.get( 'default_d', None ) )
if dval != None:
dval = int(dval)
# filter out results files that are too old
cutoff = fmtresults.date_round_down( int( time.time() - dval*24*60*60 ) )
newL = []
for f in fL:
ft,plat,opts,tag = fmtresults.parse_results_filename( f )
if ft == None or ft >= cutoff:
newL.append( f )
del fL[:]
fL.extend( newL )
platL = None
if '-p' in optD or '--plat' in optD:
platL = optD.get( '-p', [] ) + optD.get( '--plat', [] )
xplatL = optD.get( '-P', None )
if platL != None or xplatL != None:
# include/exclude results files based on platform name
newL = []
for f in fL:
ft,plat,opts,tag = fmtresults.parse_results_filename( f )
if plat == None or \
( platL == None or plat in platL ) and \
( xplatL == None or plat not in xplatL ):
newL.append( f )
del fL[:]
fL.extend( newL )
if '-o' in optD:
# keep results files that are in the -o list
optnL = '+'.join( optD['-o'] ).split('+')
newL = []
for f in fL:
ft,plat,opts,tag = fmtresults.parse_results_filename( f )
if opts != None:
# if at least one of the -o values from the command line
# is contained in the file name options, then keep the file
foptL = opts.split('+')
for op in optnL:
if op in foptL:
newL.append( f )
break
else:
newL.append( f ) # don't apply filter to this file
del fL[:]
fL.extend( newL )
if '-O' in optD:
# exclude results files that are in the -O list
optnL = '+'.join( optD['-O'] ).split('+')
newL = []
for f in fL:
ft,plat,opts,tag = fmtresults.parse_results_filename( f )
if opts != None:
# if at least one of the -O values from the command line is
# contained in the file name options, then exclude the file
foptL = opts.split('+')
keep = True
for op in optnL:
if op in foptL:
keep = False
break
if keep:
newL.append( f )
else:
newL.append( f ) # don't apply filter to this file
del fL[:]
fL.extend( newL )
tagL = optD.get( '-t', None )
xtagL = optD.get( '-T', None )
if tagL != None or xtagL != None:
# include/exclude based on tag
newL = []
for f in fL:
ft,plat,opts,tag = fmtresults.parse_results_filename( f )
if tag == None or \
( tagL == None or tag in tagL ) and \
( xtagL == None or tag not in xtagL ):
newL.append( f )
del fL[:]
fL.extend( newL ) | 5,331,697 |
def test_list_date_white_space_nistxml_sv_iv_list_date_white_space_1_3(mode, save_output, output_format):
"""
Type list/date is restricted by facet whiteSpace with value collapse.
"""
assert_bindings(
schema="nistData/list/date/Schema+Instance/NISTSchema-SV-IV-list-date-whiteSpace-1.xsd",
instance="nistData/list/date/Schema+Instance/NISTXML-SV-IV-list-date-whiteSpace-1-3.xml",
class_name="NistschemaSvIvListDateWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,331,698 |
def test_plotcals():
"""calibration param inspector routine with sensible default values"""
mydat = mkh5.mkh5(TEST_H5)
mydat.reset_all() # start fresh
mydat.create_mkdata(S01["gid"], S01["eeg_f"], S01["log_f"], S01["yhdr_f"])
# This is a pre-calibration inspector ... viewer only like garv
mydat.plotcals(TEST_H5, S01["gid"], **CAL_ARGS)
os.remove(TEST_H5) | 5,331,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.