content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def convert_hdf_to_gaintable(f):
""" Convert HDF root to a GainTable
:param f:
:return:
"""
assert f.attrs['ARL_data_model'] == "GainTable", "Not a GainTable"
receptor_frame = ReceptorFrame(f.attrs['receptor_frame'])
frequency = numpy.array(f.attrs['frequency'])
data = numpy.array(f['data'])
gt = GainTable(data=data, receptor_frame=receptor_frame, frequency=frequency)
return gt
| 5,339,100
|
def test_buffer_lag_increasing() -> None:
"""Test that each state only learns from observations buffer_timeout prior to the most recent observations.
Test using the increasing scheduler."""
buffer_timeout = 50
increase_rate = 1.0
buffered_classifier = BaseBufferedAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
buffer_timeout_max=buffer_timeout,
buffer_timeout_scheduler=get_increasing_buffer_scheduler(increase_rate),
)
dataset_0 = synth.STAGGER(classification_function=0, seed=0)
dataset_1 = synth.STAGGER(classification_function=1, seed=0)
dataset_2 = synth.STAGGER(classification_function=2, seed=0)
t = 0
for dataset in [dataset_0, dataset_1, dataset_2] * 3:
for x, y in dataset.take(500):
initial_seen_weight = buffered_classifier.get_active_state().seen_weight
_ = buffered_classifier.predict_one(x, t)
buffered_classifier.learn_one(x, y, timestep=t)
assert (
buffered_classifier.get_active_state().last_trained_active_timestep
< 0 # At the very start our test will be off due to initialization
or buffered_classifier.get_active_state().weight_since_last_active
!= 0 # When we transition to a new state will be off until we see an ob
or buffered_classifier.get_active_state().seen_weight
== 0 # When we transition to a new state will be off until we see an ob
or buffered_classifier.get_active_state().last_trained_active_timestep
== t
- min(
round(initial_seen_weight * increase_rate), buffer_timeout
) # Test that we never train on unbuffered obs
# This last check tests that each observation we train on has been buffered at least
# initial_seen_weight * increase_rate observations, up to the buffer_timeout_max.
)
t += 1
| 5,339,101
|
def list_files(directory):
"""Returns all files in a given directory
"""
return [f for f in pathlib.Path(directory).iterdir() if f.is_file() and not f.name.startswith('.')]
| 5,339,102
|
def test_bgp_attributes_for_evpn_address_family_p1(request, attribute):
"""
BGP attributes for EVPN address-family.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
check_router_status(tgen)
reset_config_on_routers(tgen)
add_default_routes(tgen)
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
step(
"Advertise prefixes from VNF routers R1 and R2 in associated "
"VRFs for both address-family."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r1": {
"static_routes": [
{
"network": NETWORK1_1[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED",
}
]
},
"r2": {
"static_routes": [
{
"network": NETWORK2_1[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE",
},
{
"network": NETWORK3_1[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "GREEN",
},
]
},
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
topo_local = deepcopy(topo)
logger.info("Modifying topology b/w e1 and d1 from iBGP to eBGP")
step("Delete BGP config for vrf RED.")
if attribute == "locPrf":
input_dict_vni = {
"d1": {
"vrfs": [
{"name": "RED", "no_vni": VNI_1},
{"name": "BLUE", "no_vni": VNI_2},
{"name": "GREEN", "no_vni": VNI_3},
]
}
}
result = create_vrf_cfg(tgen, topo, input_dict=input_dict_vni)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
input_dict_2 = {}
for dut in ["d1"]:
temp = {dut: {"bgp": []}}
input_dict_2.update(temp)
INDEX = [0, 1, 2, 3]
VRFS = ["RED", "BLUE", "GREEN", None]
AS_NUM = [100, 100, 100, 100]
for index, vrf, as_num in zip(INDEX, VRFS, AS_NUM):
topo_local["routers"][dut]["bgp"][index]["local_as"] = 200
if vrf:
temp[dut]["bgp"].append(
{"local_as": as_num, "vrf": vrf, "delete": True}
)
else:
temp[dut]["bgp"].append({"local_as": as_num, "delete": True})
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} on d1 :Failed \n Error: {}".format(
tc_name, result
)
result = create_router_bgp(tgen, topo_local["routers"])
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step("Advertise VRF routes in EVPN address-family from DCG-1 " "and DCG-2 routers.")
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r3": {
"static_routes": [
{
"network": NETWORK1_2[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED",
}
]
},
"r4": {
"static_routes": [
{
"network": NETWORK1_3[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE",
},
{
"network": NETWORK1_4[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "GREEN",
},
]
},
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Redistribute static in (IPv4 and IPv6) address-family "
"on Edge-1 for all VRFs."
)
input_dict_2 = {}
for dut in ["r3", "r4"]:
temp = {dut: {"bgp": []}}
input_dict_2.update(temp)
if dut == "r3":
VRFS = ["RED"]
AS_NUM = [3]
if dut == "r4":
VRFS = ["BLUE", "GREEN"]
AS_NUM = [4, 4]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step(
"Verify on router Edge-1 that EVPN routes corresponding to "
"all VRFs are received from both routers DCG-1 and DCG-2"
)
for addr_type in ADDR_TYPES:
input_routes = {
"r3": {
"static_routes": [
{
"network": NETWORK1_2[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED",
}
]
},
"r4": {
"static_routes": [
{
"network": NETWORK1_3[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE",
},
{
"network": NETWORK1_4[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "GREEN",
},
]
},
}
result = verify_rib(tgen, addr_type, "e1", input_routes)
assert result is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step(
"Configure a route-map on Edge-1 to modify below BGP attributes "
"for EVPN address-family:"
)
if attribute == "path":
input_dict_1 = {
"e1": {
"route_maps": {
"rmap_d1": [
{
"action": "permit",
"set": {
attribute: {
"as_num": "123 231 321",
"as_action": "prepend",
}
},
}
],
"rmap_d2": [
{
"action": "permit",
"set": {
attribute: {"as_num": "121", "as_action": "prepend"}
},
}
],
}
}
}
else:
input_dict_1 = {
"e1": {
"route_maps": {
"rmap_d1": [{"action": "permit", "set": {attribute: 120}}],
"rmap_d2": [{"action": "permit", "set": {attribute: 150}}],
}
}
}
result = create_route_maps(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
input_dict_2 = {
"e1": {
"bgp": [
{
"local_as": "100",
"address_family": {
"l2vpn": {
"evpn": {
"neighbor": {
"d1": {
"ipv4": {
"e1-link1": {
"route_maps": [
{
"name": "rmap_d1",
"direction": "in",
}
]
}
}
},
"d2": {
"ipv4": {
"e1-link1": {
"route_maps": [
{
"name": "rmap_d2",
"direction": "in",
}
]
}
}
},
}
}
}
},
}
]
}
}
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step(
"Verify on router Edge-1 that EVPN routes are preferred via"
" DCG-1 or DCG-2 based on best path selection criteria "
"(according to the configured BGP attribute values in route-map)."
)
for addr_type in ADDR_TYPES:
input_routes = {
"r3": {
"static_routes": [
{
"network": NETWORK1_2[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED",
}
]
},
"r4": {
"static_routes": [
{
"network": NETWORK1_3[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE",
},
{
"network": NETWORK1_4[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "GREEN",
},
]
},
}
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, "e1", input_routes, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
| 5,339,103
|
def post_process(ctx, result_folder, the_executor):
"""
This method is invoked once the test generation is over.
"""
# Plot the stats on the console
log.info("Test Generation Statistics:")
log.info(the_executor.get_stats())
# Generate the actual summary files
create_experiment_description(result_folder, ctx.params)
# Generate the other reports
create_summary(result_folder, the_executor.get_stats())
| 5,339,104
|
def condition_conjunction(conditions):
"""Do conjuction of conditions if there are more than one, otherwise just
return the single condition."""
if not conditions:
return None
elif len(conditions) == 1:
return conditions[0]
else:
return sql.expression.and_(*conditions)
| 5,339,105
|
def get_claimed_referrals(char):
""" Return how many claimed referrals this character has. """
return db((db.referral.referrer==char) & (db.referral.claimed==True)).count()
| 5,339,106
|
def groupsplit(X, y, valsplit):
"""
Used to split the dataset by datapoint_id into train and test sets.
The data is split to ensure all datapoints for each datapoint_id occurs completely in the respective dataset split.
Note that where there is validation set, data is split with 80% for training and 20% for test set.
Otherwise, the test set is split further with 60% as test set and 40% as validation set.
Args:
X: data excluding the target_variable
y: target variable with datapoint_id
valsplit: flag to indicate if there is a dataframe for the validation set. Accepeted values are "yes" or "no"
Returns:
X_train: X trainset
y_train: y trainset
X_test: X testset
y_test_complete: Dataframe containing the target variable with corresponding datapointid
"""
logger.info("groupsplit with valsplit: %s", valsplit)
if valsplit == 'yes':
gs = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42)
else:
gs = GroupShuffleSplit(n_splits=2, test_size=.4, random_state=42)
train_ix, test_ix = next(gs.split(X, y, groups=X.datapoint_id))
X_train = X.loc[train_ix]
y_train = y.loc[train_ix]
X_test = X.loc[test_ix]
y_test_complete = y.loc[test_ix]
return X_train, y_train, X_test, y_test_complete
| 5,339,107
|
def multiprocessed_read():
"""
Get all the symbols from the database
Assign chunks of 50 symbols to each process worker and let them read all rows for the given symbol
1000 symbols, 100 rows
"""
conn = sqlite3.connect(os.path.realpath('database.db'))
symbols = conn.execute("SELECT DISTINCT SYMBOL from database_threading_test").fetchall()
with ProcessPoolExecutor(max_workers=8) as e:
results = e.map(read_task, symbols, chunksize=50)
for result in results:
pass
| 5,339,108
|
def efficientnet_b0(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""EfficientNet-B0"""
model_name = "tf_efficientnet_b0"
default_cfg = default_cfgs[model_name]
# NOTE for train, drop_rate should be 0.2
# kwargs['drop_connect_rate'] = 0.2 # set when training, TODO add as cmd arg
model = _gen_efficientnet(
model_name=model_name,
channel_multiplier=1.0,
depth_multiplier=1.0,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfgs[model_name], num_classes)
return model
| 5,339,109
|
def application(environ, start_response):
"""
make Passenger interpret PATH_INFO the same way that the WSGI standard
does
"""
environ["PATH_INFO"] = urllib.parse.unquote(environ["PATH_INFO"])
return app.app(environ, start_response)
| 5,339,110
|
def run_experiment_here(
experiment_function,
variant=None,
exp_id=0,
seed=0,
use_gpu=True,
# Logger params:
exp_prefix="default",
snapshot_mode='last',
snapshot_gap=1,
git_infos=None,
script_name=None,
logger=default_logger,
trial_dir_suffix=None,
randomize_seed=False,
**setup_logger_kwargs
):
"""
Run an experiment locally without any serialization.
:param experiment_function: Function. `variant` will be passed in as its
only argument.
:param exp_prefix: Experiment prefix for the save file.
:param variant: Dictionary passed in to `experiment_function`.
:param exp_id: Experiment ID. Should be unique across all
experiments. Note that one experiment may correspond to multiple seeds,.
:param seed: Seed used for this experiment.
:param use_gpu: Run with GPU. By default False.
:param script_name: Name of the running script
:param log_dir: If set, set the log directory to this. Otherwise,
the directory will be auto-generated based on the exp_prefix.
:return:
"""
if variant is None:
variant = {}
variant['exp_id'] = str(exp_id)
if randomize_seed or (seed is None and 'seed' not in variant):
seed = random.randint(0, 100000)
variant['seed'] = str(seed)
reset_execution_environment(logger=logger)
actual_log_dir = setup_logger(
exp_prefix=exp_prefix,
variant=variant,
exp_id=exp_id,
seed=seed,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
git_infos=git_infos,
script_name=script_name,
logger=logger,
trial_dir_suffix=trial_dir_suffix,
**setup_logger_kwargs
)
set_seed(seed)
from railrl.torch.pytorch_util import set_gpu_mode
set_gpu_mode(use_gpu)
run_experiment_here_kwargs = dict(
variant=variant,
exp_id=exp_id,
seed=seed,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
git_infos=git_infos,
script_name=script_name,
**setup_logger_kwargs
)
save_experiment_data(
dict(
run_experiment_here_kwargs=run_experiment_here_kwargs
),
actual_log_dir
)
return experiment_function(variant)
| 5,339,111
|
def normalize_chunks(chunks: Tuple[Tuple[int, int]]) -> Tuple[Tuple[int, int]]:
"""
Minimize the amount of chunks needed to describe a smaller portion of a file.
:param chunks: A tuple with (start, end,) offsets
:return: A tuple containing as few as possible (start, end,) offsets
"""
out = []
start1, end1 = chunks[0]
if len(chunks) > 1:
for start2, end2 in chunks[1:]:
if start2 == end1:
end1 = end2
else:
out.append((start1, end1))
start1, end1 = start2, end2
out.append((start1, end1))
return tuple(out)
| 5,339,112
|
def learning_rate_schedule(adjusted_learning_rate, lr_warmup_init,
lr_warmup_step, first_lr_drop_step,
second_lr_drop_step, global_step):
"""Handles linear scaling rule, gradual warmup, and LR decay."""
# lr_warmup_init is the starting learning rate; the learning rate is linearly
# scaled up to the full learning rate after `lr_warmup_steps` before decaying.
linear_warmup = (lr_warmup_init +
(tf.cast(global_step, dtype=tf.float32) / lr_warmup_step *
(adjusted_learning_rate - lr_warmup_init)))
learning_rate = tf.where(global_step < lr_warmup_step,
linear_warmup, adjusted_learning_rate)
lr_schedule = [[1.0, lr_warmup_step],
[0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate
| 5,339,113
|
def test_vgroup_remove(using_opengl_renderer):
"""Test the VGroup remove method."""
a = OpenGLVMobject()
c = OpenGLVMobject()
b = VGroup(c)
obj = VGroup(a, b)
assert len(obj.submobjects) == 2
assert len(b.submobjects) == 1
obj.remove(a)
b.remove(c)
assert len(obj.submobjects) == 1
assert len(b.submobjects) == 0
obj.remove(b)
assert len(obj.submobjects) == 0
| 5,339,114
|
def set_power_state_server(power_state: ServerPowerState) -> List[float]:
"""Record the current power limit and set power limit using nvidia-smi."""
# Record current power limits.
if power_state.power_limit:
cmd = "nvidia-smi --query-gpu=power.limit --format=csv,noheader,nounits"
logging.info(f"Getting current GPU power limits: {cmd}")
output = run_command(cmd, get_output=True, tee=False)
current_limits = [float(line) for line in output]
# Set power limit to the specified value.
cmd = f"sudo nvidia-smi -pl {power_state.power_limit}"
logging.info(f"Setting current GPU power limits: {cmd}")
run_command(cmd)
if power_state.cpu_freq:
set_cpufreq(power_state.cpu_freq)
return ServerPowerState(current_limits, None)
| 5,339,115
|
def score_normalization(extracted_score: Union[str, None]):
"""
Sofa score normalization.
If available, returns the integer value of the SOFA score.
"""
score_range = list(range(0, 30))
if (extracted_score is not None) and (int(extracted_score) in score_range):
return int(extracted_score)
| 5,339,116
|
def get_session():
"""Creates an authorized Requests Session."""
credentials = service_account.Credentials.from_service_account_file(
filename=os.environ["GOOGLE_APPLICATION_CREDENTIALS"],
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
# Create a requests Session object with the credentials.
session = requests.AuthorizedSession(credentials)
return session
| 5,339,117
|
def wrap(wrapping_key_public, plaintext):
"""
RSA-OAEP key wrapping.
Args:
wrapping_key_public: The public key of the RSA wrapping key
plaintext: The plaintext key to wrap
"""
rsa_cipher = PKCS1_OAEP.new(
key=wrapping_key_public, hashAlgo=SHA256, mgfunc=lambda x, y: pss.MGF1(x, y, SHA1))
return rsa_cipher.encrypt(plaintext)
| 5,339,118
|
def get_output_filenames(output_path: str):
"""Returns a dict of output filenames."""
now = datetime.datetime.now()
now_string = now.strftime("%Y%m%d_%H%M%S")
filenames ={
'train': os.path.join(output_path, "train_split_"+now_string+".csv"),
'val': os.path.join(output_path, "val_split_"+now_string+".csv")
}
write_file("/tmp/train.txt", filenames['train'])
write_file("/tmp/val.txt", filenames['val'])
return filenames
| 5,339,119
|
def compute_horizontal_vessel_purchase_cost(W, D, F_M):
"""
Return the purchase cost [Cp; in USD] of a horizontal vessel,
including thes cost of platforms and ladders.
Parameters
----------
W : float
Weight [lb].
D : float
Diameter [ft].
F_M : float
Vessel material factor.
Notes
-----
The purchase cost is given by [1]_. See source code for details.
The purchase cost is scaled according to BioSTEAM's Chemical
Plant Cost Index, `biosteam.CE`.
References
----------
.. [1] Seider, W. D., Lewin, D. R., Seader, J. D., Widagdo, S., Gani, R.,
& Ng, M. K. (2017). Product and Process Design Principles. Wiley.
Cost Accounting and Capital Cost Estimation (Chapter 16)
"""
# C_v: Vessel cost
# C_pl: Platforms and ladders cost
C_v = exp(5.6336 - 0.4599*ln(W) + 0.00582*ln(W)**2)
C_pl = 2275*D**0.20294
return bst.CE/567 * (F_M * C_v + C_pl)
| 5,339,120
|
def format_dev_sub_dev_id(pciIdPair):
"""
pciIdPair (int pci device id, int pci sub device id or None)
"""
if pciIdPair[1] is None:
return "(0x%08X, None)" % pciIdPair[0]
return "(0x%08X, 0x%08X)" % pciIdPair
| 5,339,121
|
def mychats():
"""
Show Chats where I can write
:return:
{
error: 0,
chats: [...Chat]
}
"""
result = {
'error': 0,
'chats': []
}
if 'user_id' in session:
chats_rows = query_db('SELECT * FROM chats WHERE user1_id = ? OR user2_id = ?', [session['user_id'], session['user_id']])
result['chats'] = chats_rows
# for chat in query_db('select * from chats'):
# print(chat['name'])
return result
| 5,339,122
|
def polynom_prmzt(x, t, order):
"""
Polynomial (deterministic) parameterization of fast variables (Y).
NB: Only valid for system settings of Wilks'2005.
Note: In order to observe an improvement in DA performance w
higher orders, the EnKF must be reasonably tuned with
There is very little improvement gained above order=1.
"""
if order == 4:
# From Wilks
d = 0.262 + 1.45*x - 0.0121*x**2 - 0.00713*x**3 + 0.000296*x**4
elif order == 3:
# From Arnold
d = 0.341 + 1.30*x - 0.0136*x**2 - 0.00235*x**3
elif order == 1:
# From me -- see AdInf/illust_parameterizations.py
d = 0.74 + 0.82*x
elif order == 0:
# From me -- see AdInf/illust_parameterizations.py
d = 3.82
elif order == -1:
# Leave as dxdt_trunc
d = 0
else:
raise NotImplementedError
return d
| 5,339,123
|
def new_binning(xmin, xmax, nbin=25, bin_type='lin', out_type=int, custom_bins=None):
"""
Define the new binning.
Parameters
----------
Returns
-------
array
the array with the edges of the new binning
"""
if bin_type == 'lin' and custom_bins is None:
binning_ = np.linspace(xmin, xmax, num=nbin+1, dtype=out_type)
elif bin_type == 'log' and custom_bins is None:
if xmin == 0:
xmin = 1
binning_ = np.logspace(np.log10(xmin), np.log10(xmax), num=nbin+1, dtype=out_type)
elif type(custom_bins) == list or type(custom_bins) == np.ndarray:
binning_ = np.array(custom_bins)
else:
logger.info('ERROR: Invalid binning type. Choose lin or log, or customize it.')
sys.exit()
logger.info('Multipole binning:%s'%str(binning_))
return binning_
| 5,339,124
|
def GenerateSurfaceAndBuriedResiduesVisualization():
"""Generate visualization for surface and buried residues."""
Outfile = OptionsInfo["PMLOutfile"]
OutFH = open(Outfile, "w")
if OutFH is None:
MiscUtil.PrintError("Failed to open output fie %s " % Outfile)
MiscUtil.PrintInfo("\nGenerating file %s..." % Outfile)
# Setup header...
WritePMLHeader(OutFH, ScriptName)
WritePyMOLParameters(OutFH)
# Load reffile for alignment..
if OptionsInfo["Align"]:
WriteAlignReference(OutFH)
# Setup view for each input file...
FirstComplex = True
FirstComplexFirstChainName = None
for FileIndex in range(0, len(OptionsInfo["InfilesInfo"]["InfilesNames"])):
# Setup PyMOL object names...
PyMOLObjectNames = SetupPyMOLObjectNames(FileIndex)
# Setup complex view...
WriteComplexView(OutFH, FileIndex, PyMOLObjectNames, FirstComplex)
SpecifiedChainsAndLigandsInfo = OptionsInfo["InfilesInfo"]["SpecifiedChainsAndLigandsInfo"][FileIndex]
FirstChain = True
for ChainID in SpecifiedChainsAndLigandsInfo["ChainIDs"]:
if FirstComplex and FirstChain:
FirstComplexFirstChainName = PyMOLObjectNames["Chains"][ChainID]["ChainAlone"]
WriteChainView(OutFH, FileIndex, PyMOLObjectNames, ChainID)
# Setup ligand views...
FirstLigand = True
for LigandID in SpecifiedChainsAndLigandsInfo["LigandIDs"][ChainID]:
WriteChainLigandView(OutFH, FileIndex, PyMOLObjectNames, ChainID, LigandID)
# Set up ligand level group...
Enable, Action = [False, "close"]
if FirstLigand:
FirstLigand = False
Enable, Action = [True, "open"]
GenerateAndWritePMLForGroup(OutFH, PyMOLObjectNames["Ligands"][ChainID][LigandID]["ChainLigandGroup"], PyMOLObjectNames["Ligands"][ChainID][LigandID]["ChainLigandGroupMembers"], Enable, Action)
# Setup Chain level group...
Enable, Action = [False, "close"]
if FirstChain:
FirstChain = False
Enable, Action = [True, "open"]
GenerateAndWritePMLForGroup(OutFH, PyMOLObjectNames["Chains"][ChainID]["ChainGroup"], PyMOLObjectNames["Chains"][ChainID]["ChainGroupMembers"], Enable, Action)
# Set up complex level group...
Enable, Action = [False, "close"]
if FirstComplex:
FirstComplex = False
Enable, Action = [True, "open"]
GenerateAndWritePMLForGroup(OutFH, PyMOLObjectNames["PDBGroup"], PyMOLObjectNames["PDBGroupMembers"], Enable, Action)
# Delete empty PyMOL objects...
DeleteEmptyPyMOLObjects(OutFH, FileIndex, PyMOLObjectNames)
if OptionsInfo["Align"]:
DeleteAlignReference(OutFH)
if FirstComplexFirstChainName is not None:
OutFH.write("""\ncmd.orient("%s", animate = -1)\n""" % FirstComplexFirstChainName)
else:
OutFH.write("""\ncmd.orient("visible", animate = -1)\n""")
OutFH.close()
# Generate PSE file as needed...
if OptionsInfo["PSEOut"]:
GeneratePyMOLSessionFile()
| 5,339,125
|
def decode(s):
"""
Deserialize an EDS object from an EDS string.
"""
lexer = _EDSLexer.lex(s.splitlines())
return _decode_eds(lexer)
| 5,339,126
|
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
import errno
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
| 5,339,127
|
def allowed_file(filename):
""" Verifies if file extension is compatible """
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
| 5,339,128
|
def clap_convert(txt):
"""convert string of clap values on medium to actualy number
Args:
txt (str): claps values
Returns:
number on claps (int)
"""
# Medium annotation
if txt[-1] == "K":
output = int(float(txt[:-1]) * 1000)
return output
else:
return int(txt)
| 5,339,129
|
def hello(count: int, name: str):
""" Simple program that greets NAME for a total of COUNT times.
:param count: The number of times to repeat
:param name: The name to repeat"""
for _ in range(count):
print(f"Hello, {name}!")
| 5,339,130
|
def write_phase1_capsummary(inst, isStringIO=True):
"""
Write out a multiweek summary of capacity, demand, understaffing.
:param inst: Model instance
:param isStringIO: True (default) to return StringIO object, False to return string
:return: capacity summary as StringIO object or a string.
"""
param = 'period,day,week,dmd,cap,us1,us2,ustot\n'
rows = [(i, j, w,
inst.dmd_staff[i, j, w],
inst.cov[i, j, w].value,
inst.under1[i, j, w].value,
inst.under2[i, j, w].value,
inst.under1[i, j, w].value + inst.under2[i, j, w].value)
for i in inst.PERIODS
for j in inst.DAYS
for w in inst.WEEKS
]
for row in rows:
row = [str(r) for r in row]
data_row = ','.join(row)
data_row += '\n'
param += data_row
if isStringIO:
param_out = io.StringIO()
param_out.write(param)
return param_out.getvalue()
else:
return param
| 5,339,131
|
def glorot_uniform(shape):
"""
:param shape: tuple with the shape of the wanted output (filters_amount, depth, height, width)
:return: array (it's shape=param shape) with initialized values using 'glorot uniform' initializer
"""
fan_in, fan_out = _calc_fans(shape)
scale = 1. / ((fan_in + fan_out) / 2.)
limit = np.sqrt(3.0 * scale)
return np.random.uniform(low=-limit, high=limit, size=shape)
| 5,339,132
|
def _test_atomic_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
objname = 'testobj'
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
# verify A's
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
| 5,339,133
|
def main():
"""Creates a directory with a file for each article
:return: directory with a file for each article
"""
os.system('mkdir -p intermediary_corpus || true')
get_pdf_file_content('PDF_files/', 'intermediary_corpus/')
edit_text('intermediary_corpus/', 'corpus_B/')
os.system('rm -rf intermediary_corpus/')
return
| 5,339,134
|
def get_server() -> str:
"""Generate a server information.
:return: server info
:rtype: str
"""
uname = platform.uname()
fmt_plat = f"OS: {uname.system} {uname.release} v{uname.version}\n"
fmt_plat += f"CPU: {uname.processor} ({os.cpu_count()} threads)\n"
fmt_plat += f"PID: {os.getpid()}"
return quote(fmt_plat, True, "py")
| 5,339,135
|
def second_order_moments(n_components, e2, m1, alpha0):
"""Second-Order Moments
To prevent creating 2nd order moments explicitly, we construct its
decomposition with `n_components`. check reference [?] section 5.2
for details.
Parameters
----------
n_components: int
Number of components
e2: sparse matrix, shape=(n_features, n_features)
Expectation of word pairs. e2[i, j] is the expectation of word `i`
and `j` in the same document.
m1: array, shape=(n_features,)
Expectation of each words.
alpha0: double
Sum of topic topic concentration parameter
Returns
-------
m2_vals : array, shape=(n_components,)
eigen values of sencond-order moments
m2_vecs : array, shape=(n_features, n_components)
eigen values of sencond-order moments
"""
# eigen values and vectors of E2
n_features = e2.shape[0]
#print("%d ; %d" % (n_features, n_components))
if n_components == n_features:
# run full svd, convert e2 to dense array first
e2_vecs, e2_vals, _ = LA.svd(e2.toarray())
else:
#e2_vals, e2_vecs = sp.linalg.eigsh(e2, k=n_components, which='LM')
e2_vecs, e2_vals, _ = sp.linalg.svds(e2, k=n_components, which='LM',
return_singular_vectors=True)
e2_vals *= (alpha0 + 1.)
m1_p = np.dot(e2_vecs.T, m1)
# section 5.2 part 1.
m2_p = (-1. * alpha0) * (m1_p * m1_p[:, np.newaxis])
m2_p[np.diag_indices_from(m2_p)] += e2_vals
# section 5.2 part 1.
# eigen values and vectors of M2 prime
try:
m2p_vecs, m2p_vals, _ = LA.svd(m2_p)
m2_vals = m2p_vals
m2_vecs = np.dot(e2_vecs, m2p_vecs)
except LA.LinAlgError:
# In order to pass `check_estimator` test.
# convert this error to warnings.
warnings.warn("SVD in second_order_moments did not converge. "
"the algorithm will not work.",
ConvergenceWarning)
m2_vals = np.ones(m2_p.shape[0])
m2_vecs = m2_p
return (m2_vals, m2_vecs)
| 5,339,136
|
def callback_query_wrapper(func):
"""Create a session, handle permissions and exceptions for callback queries."""
def wrapper(update, context):
user = None
if context.user_data.get("ban"):
return
temp_ban_time = context.user_data.get("temporary-ban-time")
if temp_ban_time is not None and temp_ban_time == date.today():
update.callback_query.answer(i18n.t("callback.spam"))
return
session = get_session()
try:
user, statistic = get_user(session, update.callback_query.from_user)
# Cache ban value, so we don't have to lookup the value in our database
if user.banned:
context.user_data["ban"] = True
return
# Cache temporary-ban time, so we don't have to create a connection to our database
if statistic.votes > config["telegram"]["max_user_votes_per_day"]:
update.callback_query.answer(
i18n.t("callback.spam", locale=user.locale)
)
context.user_data["temporary-ban-time"] = date.today()
return
func(context.bot, update, session, user)
session.commit()
except Exception as e:
if not ignore_exception(e):
if config["logging"]["debug"]:
traceback.print_exc()
sentry.captureException()
locale = "English"
if user is not None:
locale = user.locale
update.callback_query.answer(i18n.t("callback.error", locale=locale))
finally:
session.close()
return wrapper
| 5,339,137
|
def remove_names(df: pd.DataFrame) -> pd.DataFrame:
"""Convert personal names to numerical values."""
df = df.reset_index()
df.drop(columns='Name', inplace=True)
return df
| 5,339,138
|
def handle_epoch_metrics(step_metrics, epoch_labels, epoch_predictions):
"""
Function that handles the metrics per epoch.
Inputs:
step_metrics - Dictionary containing the results of the steps of an epoch
epoch_labels - List of labels from the different steps
epoch_predictions - List of predictions from the different steps
Outputs:
epoch_merics - Dictionary containing the averaged results of an epoch
"""
# compute the loss
loss = torch.mean(torch.stack(step_metrics['losses'], dim=0), dim=0)
loss = round(loss.item(), 4)
# compute the accuracy and f1
accuracy, f1 = compute_accuracy_f1(step_metrics['predictions'], step_metrics['labels'])
# create a new epoch dictionary
epoch_metrics = {'loss': loss, 'accuracy': accuracy, 'f1': f1}
# return the epoch dictionary
return epoch_metrics
| 5,339,139
|
def project_disk_sed(bulge_sed, disk_sed):
"""Project the disk SED onto the space where it is bluer
For the majority of observed galaxies, it appears that
the difference between the bulge and the disk SEDs is
roughly monotonic, making the disk bluer.
This projection operator projects colors that are redder onto
the same difference in color as the previous wavelength,
similar to the way monotonicity works for the morphological
`S` matrix of the model.
While a single iteration of this model is unlikely to yield
results that are as good as those in `project_disk_sed_mean`,
after many iterations it is expected to converge to a better value.
"""
new_sed = disk_sed.copy()
diff = bulge_sed - disk_sed
for s in range(1, len(diff)-1):
if diff[s]<diff[s-1]:
new_sed[s] = new_sed[s] + diff[s-1]
diff[s] = diff[s-1]
return new_sed
| 5,339,140
|
def load_image_url(image_url, image_size=(256, 256), preserve_aspect_ratio=True):
"""Loads and preprocesses images from a given url."""
# Cache image file locally.
image_path = tf.keras.utils.get_file(
os.path.basename(image_url)[-128:], image_url)
# Load and convert to float32 numpy array, add batch dimension, and normalize to range [0, 1].
img = plt.imread(image_path).astype(np.float32)[np.newaxis, ...]
if img.max() > 1.0:
img = img / 255.
if len(img.shape) == 3:
img = tf.stack([img, img, img], axis=-1)
img = crop_center(img)
img = tf.image.resize(img, image_size, preserve_aspect_ratio=True)
return img
| 5,339,141
|
def qlog_numpy(q):
"""
Applies logarithm map to q
:param q: (4,)
:return: (3,)
"""
if all(q[1:] == 0):
q = np.zeros(3)
else:
q = np.arccos(q[0]) * q[1:] / np.linalg.norm(q[1:])
return q
| 5,339,142
|
def ngrams(n, word):
"""
Generator of all *n*-grams of *word*.
Args:
n (int): The length of character ngrams to be extracted
word (str): The word of which the ngrams are to be extracted
Yields:
str: ngram
"""
for i in range(len(word)-n-1):
yield word[i:i+n]
| 5,339,143
|
def merge_flow_relationship(flow_data, tx=None):
"""
This function focuses on just creating the starting/ending switch relationship for a flow.
"""
query = (
"MERGE " # MERGE .. create if doesn't exist .. being cautious
" (src:switch {{name:'{src_switch}'}}) "
" ON CREATE SET src.state = 'inactive' "
"MERGE "
" (dst:switch {{name:'{dst_switch}'}}) "
" ON CREATE SET dst.state = 'inactive' "
"MERGE (src)-[f:flow {{" # Should only use the relationship primary keys in a match
" flowid:'{flowid}', "
" cookie: {cookie} }} ]->(dst) "
"SET "
" f.meter_id = {meter_id}, "
" f.bandwidth = {bandwidth}, "
" f.ignore_bandwidth = {ignore_bandwidth}, "
" f.src_port = {src_port}, "
" f.dst_port = {dst_port}, "
" f.src_switch = '{src_switch}', "
" f.dst_switch = '{dst_switch}', "
" f.src_vlan = {src_vlan}, "
" f.dst_vlan = {dst_vlan}, "
" f.transit_vlan = {transit_vlan}, "
" f.description = '{description}', "
" f.last_updated = '{last_updated}', "
" f.flowpath = '{flowpath}' "
)
flow_data['flowpath'].pop('clazz', None) # don't store the clazz info, if it is there.
flow_data['last_updated'] = calendar.timegm(time.gmtime())
flow_data['flowpath'] = json.dumps(flow_data['flowpath'])
if tx:
tx.run(query.format(**flow_data))
else:
graph.run(query.format(**flow_data))
| 5,339,144
|
def get_ML_features(df: pd.DataFrame, protease: str='trypsin', **kwargs) -> pd.DataFrame:
"""
Uses the specified score in df to filter psms and to apply the fdr_level threshold.
Args:
df (pd.DataFrame): psms table of search results from alphapept.
protease (str, optional): string specifying the protease that was used for proteolytic digestion. Defaults to 'trypsin'.
Returns:
pd.DataFrame: df including additional scores for subsequent ML.
"""
df['decoy'] = df['sequence'].str[-1].str.islower()
df['abs_delta_m_ppm'] = np.abs(df['delta_m_ppm'])
df['naked_sequence'] = df['sequence'].apply(lambda x: ''.join([_ for _ in x if _.isupper()]))
df['n_AA']= df['naked_sequence'].str.len()
df['matched_ion_fraction'] = df['hits']/(2*df['n_AA'])
df['n_missed'] = df['naked_sequence'].apply(lambda x: count_missed_cleavages(x, protease))
df['n_internal'] = df['naked_sequence'].apply(lambda x: count_internal_cleavages(x, protease))
df['x_tandem'] = get_x_tandem_score(df)
return df
| 5,339,145
|
def revcomp(sequence):
"""
Find reverse complementary sequence
:param sequence: The RNA sequence in string form
:return: The reverse complement sequence in string form
"""
complement = {"A": "U", "U": "A", "C": "G", "G": "C", "N": "N"}
revcompseq = ""
sequence_list = list(sequence)
sequence_list.reverse()
for letter in sequence_list:
revcompseq += complement[letter.upper()]
return revcompseq
| 5,339,146
|
def J(X, mean, r):
"""K-meansの目的関数(最小化を目指す)"""
summation = 0.0
for n in range(len(X)):
temp = 0.0
for k in range(K):
temp += r[n, k] * np.linalg.norm(X[n] - mean[k]) ** 2
summation += temp
return summation
| 5,339,147
|
def get_all_subs():
""" Temporary function until we work out a better autocomplete
for createpost """
# TODO
return [x.name for x in Sub.select(Sub.name)]
| 5,339,148
|
def timesince():
"""
Get the amount of time since 00:00 on 1 January 1970,
the raw date before formatting it.
"""
return time.time()
| 5,339,149
|
def gencoords_outside(N, d, rad=None, truncmask=False, trunctype='circ'):
""" generate coordinates of all points in an NxN..xN grid with d dimensions
coords in each dimension are [-N/2, N/2)
N should be even"""
if not truncmask:
_, truncc, _ = gencoords_outside(N, d, rad, True)
return truncc
c = geometry.gencoords_base(N, d)
if rad is not None:
if trunctype == 'circ':
r2 = np.sum(c**2, axis=1)
trunkmask = r2 > (rad*N/2.0)**2
elif trunctype == 'square':
r = np.max(np.abs(c), axis=1)
trunkmask = r > (rad*N/2.0)
truncc = c[trunkmask, :]
else:
trunkmask = np.ones((c.shape[0],), dtype=np.bool8)
truncc = c
return c, truncc, trunkmask
| 5,339,150
|
def map_orientation(cur_orientation, cur_count):
""" . . . . . x
. . . . . x
. . . . . x
. . . . . x
. . . . . x
. . . . . x
"""
right_edge = 34905131040
""" . . . . . .
. . . . . .
. . . . . .
. . . . . .
. . . . . .
x x x x x x
"""
bottom_edge = 67645734912
""" we will check if each position of the game peice is valid
by investigating if it touches the right edge or the bottom edge
using a logica AND (&) operation. The & will be 0 if there is
no overlap and <> 0 if there is
Pass in peices positioned in the upper left corner so that this
check can walk right and down to checkk all conditions
"""
room_to_move_right = True
room_to_move_down = True
safe_down = True
while safe_down:
room_to_move_right = True
safe_right = True
row_start = cur_orientation
while safe_right:
peice_orientation_list[cur_count] = cur_orientation
cur_count += 1
""" moving piece right 1 bit is the same as multiplying by 2^1
. x . . . .
x x x . . . = 450
. . x . . .
. x x x . . = 900
"""
if room_to_move_right:
cur_orientation = cur_orientation << 1
room_to_move_right = ((cur_orientation & right_edge) == 0)
else:
safe_right = False
""" moving down is the same as shifting right 6 times or multiplying by 2^6, aka 64
. x . . . .
x x x . . . = 450
. x . . . .
x x x . . . = 28,800
"""
if room_to_move_down:
cur_orientation = row_start << 6
room_to_move_down = ((cur_orientation & bottom_edge) == 0)
else:
safe_down = False
return cur_count
| 5,339,151
|
def glGetShaderInfoLog( baseOperation, obj ):
"""Retrieve the shader's error messages as a Python string
returns string which is '' if no message
"""
target = GLsizei()
glGetShaderiv(obj, GL_INFO_LOG_LENGTH,target)
length = target.value
if length > 0:
log = ctypes.create_string_buffer(length)
baseOperation(obj, length, None, log)
return log.value.strip(_NULL_8_BYTE) # null-termination
return ''
| 5,339,152
|
def _CheckGrdTranslations(grd_file, grd_lines, wanted_locales):
"""Check all <file> elements that correspond to an .xtb output file.
Args:
grd_file: Input .grd file path.
grd_lines: List of input .grd lines.
wanted_locales: set of wanted Chromium locale names.
Returns:
List of error message strings. Empty on success.
"""
wanted_locales = wanted_locales - set([_DEFAULT_LOCALE])
intervals = _BuildIntervalList(grd_lines, _IsTranslationGrdOutputLine)
errors = []
for start, end in intervals:
errors += _CheckGrdElementRangeLang(grd_lines, start, end, wanted_locales)
errors += _CheckGrdTranslationElementRange(grd_lines, start, end,
wanted_locales)
return errors
| 5,339,153
|
def choose_diverging_palette(as_cmap=False):
"""Launch an interactive widget to choose a diverging color palette.
This corresponds with the :func:`diverging_palette` function. This kind
of palette is good for data that range between interesting low values
and interesting high values with a meaningful midpoint. (For example,
change scores relative to some baseline value).
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
diverging_palette : Create a diverging color palette or colormap.
choose_colorbrewer_palette : Interactively choose palettes from the
colorbrewer set, including diverging palettes.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_diverging_palette(
h_neg=IntSlider(min=0,
max=359,
value=220),
h_pos=IntSlider(min=0,
max=359,
value=10),
s=IntSlider(min=0, max=99, value=74),
l=IntSlider(min=0, max=99, value=50), # noqa: E741
sep=IntSlider(min=1, max=50, value=10),
n=(2, 16),
center=["light", "dark"]
):
if as_cmap:
colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)
palplot(pal)
if as_cmap:
return cmap
return pal
| 5,339,154
|
def get_lm_model(args, device, config):
"""Get language model(based on GPT-2) used for sequence prediction."""
ninp = config["ninp"]
nhead = config["nhead"]
initrange = config["initrange"]
dropout = config["dropout"]
vocab_size = config["vocab_size"]
nhid = config["nhid"]
ndecoder = config["num_decoder_layers"]
if args.ssd_offload:
return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder)
else:
return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
| 5,339,155
|
def get_meminfo():
"""
Get and format the content of /proc/meminfo
"""
buf = open('/proc/meminfo').read()
buf = ','.join([v.replace(' ', '') for v in
buf.split('\n') if v])
return buf
| 5,339,156
|
def effective_area(true_energy, reco_energy, simu_area):
"""
Compute the effective area from a list of simulated energy and reconstructed energy
Parameters
----------
true_energy: 1d numpy array
reco_energy: 1d numpy array
simu_area: float - area on which events are simulated
Returns
-------
float = effective area
"""
return simu_area * len(reco_energy) / len(true_energy)
| 5,339,157
|
def remove_vlan(duthost, vlan_id):
"""
Remove VLANs configuraation on DUT
Args:
duthost: DUT host object
vlan_id: VLAN id
"""
duthost.shell('config vlan del {}'.format(vlan_id))
pytest_assert(wait_until(3, 1, __check_vlan, duthost, vlan_id, True),
"VLAN RIF Vlan{} didn't remove as expected".format(vlan_id))
| 5,339,158
|
def execute(queries, arglists, fetchone=False):
"""Execute multiple queries to the sqlite3 jobtracker database.
All queries will be executed as a single transaction.
Return the result of the last query, or the ID of the last
INSERT, whichever is applicaple.
Inputs:
queries: A list of queries to be execute.
arglists: A list (same length as queries).
Each entry contains the paramters to be
substituted into the corresponding query.
fetchone: If True, fetch and return only a single row.
Otherwise, fetch and return all rows.
(Only applies for SELECT statements.
Default: fetch all rows).
Outputs:
results: Single row, or list of rows (for SELECT statements),
depending on 'fetchone'. Or, the ID of the last
entry INSERT'ed (for INSERT statements).
"""
not_connected = True
count = 0
while not_connected:
try:
db_conn = sqlite3.connect(config.background.jobtracker_db,timeout=40.0)
db_conn.isolation_level = 'DEFERRED'
db_conn.row_factory = sqlite3.Row
db_cur = db_conn.cursor()
for q, args in zip(queries, arglists):
db_cur.execute(q, args)
db_conn.commit()
if db_cur.lastrowid:
results = db_cur.lastrowid
else:
if fetchone:
results = db_cur.fetchone()
else:
results = db_cur.fetchall()
db_conn.close()
not_connected = False
except sqlite3.OperationalError, e:
try:
db_conn.rollback()
db_conn.close()
except NameError:
# Connection wasn't established, 'db_conn' is not defined.
pass
if (count % 60) == 0:
print "Couldn't connect to DB for %d seconds. Will continue trying. " \
"Error message: %s" % (count, str(e))
time.sleep(1)
count+=1
return results
| 5,339,159
|
def physical_cpu_mhz(vir_connection):
""" Get the CPU frequency in MHz using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:return: The CPU frequency in MHz.
:rtype: int
"""
return vir_connection.getInfo()[3]
| 5,339,160
|
def test_pcall_sitepackages(venv, mocker, actioncls):
"""
Check that the sitepackages configuration in tox is passed to pipenv
"""
action = actioncls()
mocker.patch.object(os, "environ", autospec=True)
mocker.patch("subprocess.Popen")
venv.envconfig.sitepackages = True
result = tox_testenv_create(venv, action)
assert result == True
# Check that pipenv was executed with the correct arguments
subprocess.Popen.assert_called_once_with(
[sys.executable, "-m", "pipenv", "--site-packages", "--python", "test-python"],
action=action,
cwd=venv.path.dirpath(),
venv=False,
)
assert venv.tmpdir.ensure("Pipfile")
| 5,339,161
|
def addstream(bot, input):
"""Add a stream from the notify list"""
if not input.admin: return False
if not input.group(2): return
stream = input.group(2).lower()
if not stream in bot.config.streams:
bot.config.set_add('streams', stream)
bot.reply("Added {0} to stream list".format(stream))
else:
bot.reply("{0} is already in the stream list".format(stream))
| 5,339,162
|
def add_selection_logger(viewer: MjViewerBasic, sim: MjSim, callback=None):
"""
Adds a click handler that prints information about the body clicked with the middle mouse button.
Make sure to call env.render() so that a viewer exists before calling this function.
:param viewer: the MuJoCo viewer in use.
:param sim: the MuJoCo simulation object.
:param callback: optional callback to be called when the user clicks.
"""
import glfw
def mouse_callback(window, button, act, mods):
viewer._mouse_button_callback(window, button, act, mods)
middle_pressed = glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_MIDDLE) == glfw.PRESS
viewer.pert.active = 0
if middle_pressed:
w, h = glfw.get_window_size(window)
aspect_ratio = w / h
x, y = viewer._last_mouse_x, viewer._last_mouse_y
sel_point = np.zeros(3)
res = mujoco_py.functions.mjv_select(sim.model, sim.data, viewer.vopt,
aspect_ratio, x/w, (h-y)/h, viewer.scn, sel_point)
sel_body, sel_geom = '?', '?'
if res != -1:
sel_body = sim.model.body_id2name(sim.model.geom_bodyid[res])
sel_geom = sim.model.geom_id2name(res)
print(f'Selected {sel_body} ({sel_geom}) at {sel_point}')
if callable(callback):
callback(sel_point, x/w, (h-y)/h)
def cursor_pos_callback(window, xpos, ypos):
viewer._cursor_pos_callback(window, xpos, ypos)
middle_pressed = glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_MIDDLE) == glfw.PRESS
if middle_pressed:
w, h = glfw.get_window_size(window)
aspect_ratio = w / h
x, y = xpos, ypos
sel_point = np.zeros(3)
res = mujoco_py.functions.mjv_select(sim.model, sim.data, viewer.vopt,
aspect_ratio, x/w, (h-y)/h, viewer.scn, sel_point)
sel_body, sel_geom = '?', '?'
if res != -1:
sel_body = sim.model.body_id2name(sim.model.geom_bodyid[res])
sel_geom = sim.model.geom_id2name(res)
print(f'Selected {sel_body} ({sel_geom}) at {sel_point}')
if callable(callback):
callback(sel_point, x/w, (h-y)/h)
glfw.set_cursor_pos_callback(viewer.window, cursor_pos_callback)
glfw.set_mouse_button_callback(viewer.window, mouse_callback)
| 5,339,163
|
def get_author(search):
"""
Queries google scholar to find an author given a
search string. If != 0 results are found it gives an error
"""
authors = list(scholarly.search_author(search))
if len(authors) > 1:
raise ValueError(f'Found >1 authors with search string: {searc}, try something more specifc')
elif not authors:
raise ValueError(f'Could not find authors with search string: {search}')
return authors[0].fill(sections=['basics', 'indices', 'publications'])
| 5,339,164
|
def _named_tensor_generic_operation(
tensor: torch.Tensor,
tensor_ops_pre: callable = dummy,
tensor_ops_post: callable = dummy,
name_ops: callable = dummy) -> torch.Tensor:
"""
generic base function used by others
First store the names
Args:
tensor (): the named tensor to work on
tensor_ops_pre (): the operation before the name is removed
tensor_ops_post (): the operation after the name is removed that act on
the tensor
name_ops (): the operation to act on names
Returns:
"""
# Save the names in names_old and then remove the names from the tensor
tensor = tensor_ops_pre(tensor)
names_old = tensor.names
tensor = tensor.rename(None)
# operations
names_new = name_ops(names_old) # modify the names
tensor = tensor_ops_post(tensor) # change the tensor accordingly
return tensor.refine_names(*names_new)
| 5,339,165
|
def test_lico():
"""Example basic usage where everything works"""
input_list = Table.init_from_path(RESOURCE_PATH / "a_csv_file")
output_list = lico.process(input_list, Concatenate(columns=['patient', 'date']))
file = StringIO()
output_list.save(file)
file.seek(0)
content = file.read()
assert "patient,date,concatenated" in content
| 5,339,166
|
def test_stack_validation_fails_if_a_components_validator_fails(
stack_with_mock_components, failing_stack_validator
):
"""Tests that the stack validation fails if one of its components validates
fails to validate the stack."""
stack_with_mock_components.orchestrator.validator = failing_stack_validator
stack_with_mock_components.metadata_store.validator = None
stack_with_mock_components.artifact_store.validator = None
with pytest.raises(StackValidationError):
stack_with_mock_components.validate()
| 5,339,167
|
def normalize_address_components(parsed_addr):
# type: (MutableMapping[str, str]) -> MutableMapping[str, str]
"""Normalize parsed sections of address as appropriate.
Processes parsed address through subsets of normalization rules.
:param parsed_addr: address parsed into ordereddict per usaddress.
:type parsed_addr:Mapping
:return: parsed_addr with normalization processing applied to elements.
:rtype: dict
"""
parsed_addr = normalize_numbered_streets(parsed_addr)
parsed_addr = normalize_directionals(parsed_addr)
parsed_addr = normalize_street_types(parsed_addr)
parsed_addr = normalize_occupancy_type(parsed_addr)
return parsed_addr
| 5,339,168
|
def test_models_edx_ui_problem_graded_with_valid_statement(statement):
"""Tests that a `problem_graded` browser statement has the expected `event_type` and
`name`."""
assert statement.event_type == "problem_graded"
assert statement.name == "problem_graded"
| 5,339,169
|
def model_inputs():
"""
构造输入
返回:inputs, targets, learning_rate, source_sequence_len, target_sequence_len, max_target_sequence_len,类型为tensor
"""
inputs = tf.placeholder(tf.int32, [None, None], name="inputs")
targets = tf.placeholder(tf.int32, [None, None], name="targets")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
source_sequence_len = tf.placeholder(tf.int32, (None,), name="source_sequence_len") # TODO 这里和[None]是不是一模一样
target_sequence_len = tf.placeholder(tf.int32, (None,), name="target_sequence_len")
max_target_sequence_len = tf.placeholder(tf.int32, (None,), name="max_target_sequence_len")
return inputs, targets, learning_rate, source_sequence_len, target_sequence_len, max_target_sequence_len
| 5,339,170
|
def is_key_in_store(loc, key):
"""
A quick check to determine whether the :class:`pandas.HDFStore`
has datA for ``key``
:ARGS:
loc: :class:`string` of path to :class:`pandas.HDFStore`
key: :class:`string` of the ticker to check if currently
available
:RETURNS:
whether ``key`` is currently a part of the data set
"""
try:
store = pandas.HDFStore(path = loc, mode = 'r')
except IOError:
print loc + " is not a valid path to an HDFStore Object"
return
store_keys = store.keys()
store.close()
return key in map(lambda x: x.strip('/'), store_keys )
| 5,339,171
|
def transform_rows_nonlinear06(data, **kwargs):
"""
Nonlinear row transformation 06. 12 simulated data sources; Functions: 1.0, 0.5*(x+1)^2, sin(pi*x), sin(2*pi*x), cos(pi*x), cos(2*pi*x), x^5, exp2, log10(x-x.min()), boxcox(2), boxcox(4), boxcox(6).
"""
sources_transformers = [
1.0,
lambda x: 0.5 * np.power((x+1), 2),
lambda x: np.sin(np.pi * x),
lambda x: np.sin(2.0 * np.pi * x),
lambda x: np.cos(np.pi * x),
lambda x: np.cos(2.0 * np.pi * x),
lambda x: np.power(x, 5),
lambda x: np.exp2(x),
lambda x: np.log10(x + (-1.0 * x.min()) + 0.01),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 6.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
| 5,339,172
|
def catch_parameter(opt):
"""Change the captured parameters names"""
switch = {'-h': 'help', '-o': 'one_timestamp', '-a': 'activity',
'-f': 'file_name', '-i': 'imp', '-l': 'lstm_act',
'-d': 'dense_act', '-p': 'optim', '-n': 'norm_method',
'-m': 'model_type', '-z': 'n_size', '-y': 'l_size',
'-c': 'folder', '-b': 'model_file', '-x': 'is_single_exec',
'-t': 'max_trace_size', '-e': 'splits', '-g': 'sub_group'}
try:
return switch[opt]
except:
raise Exception('Invalid option ' + opt)
| 5,339,173
|
def generate_code_v2(program):
"""
Returns an instance of :class:`CodeGenerationResult`.
:param program: An instance of :class:`loopy.TranslationUnit`.
"""
from loopy.kernel import LoopKernel
from loopy.translation_unit import make_program
# {{{ cache retrieval
from loopy import CACHING_ENABLED
if CACHING_ENABLED:
input_program = program
try:
result = code_gen_cache[input_program]
logger.debug(f"TranslationUnit with entrypoints {program.entrypoints}:"
" code generation cache hit")
return result
except KeyError:
pass
# }}}
if isinstance(program, LoopKernel):
program = make_program(program)
from loopy.kernel import KernelState
if program.state < KernelState.PREPROCESSED:
# Note that we cannot have preprocessing separately for everyone.
# Since, now the preprocessing of each one depends on the other.
# So we check if any one of the callable kernels are not preprocesses
# then, we have to do the preprocessing of every other kernel.
from loopy.preprocess import preprocess_program
program = preprocess_program(program)
from loopy.type_inference import infer_unknown_types
program = infer_unknown_types(program, expect_completion=True)
from loopy.schedule import linearize
program = linearize(program)
# Why diverge? Generated code for a non-entrypoint kernel and an entrypoint
# kernel isn't same for a general loopy target. For example in OpenCL, a
# kernel callable from host and the one supposed to be callable from device
# have different function signatures. To generate correct code, each
# callable should be exclusively an entrypoint or a non-entrypoint kernel.
program = diverge_callee_entrypoints(program)
from loopy.check import pre_codegen_checks
pre_codegen_checks(program)
host_programs = {}
device_programs = []
device_preambles = []
callee_fdecls = []
implemented_data_infos = {}
# {{{ collect host/device programs
for func_id in sorted(key for key, val in program.callables_table.items()
if isinstance(val, CallableKernel)):
cgr = generate_code_for_a_single_kernel(program[func_id],
program.callables_table,
program.target,
func_id in program.entrypoints)
if func_id in program.entrypoints:
host_programs[func_id] = cgr.host_program
implemented_data_infos[func_id] = cgr.implemented_data_info
else:
assert len(cgr.device_programs) == 1
callee_fdecls.append(cgr.device_programs[0].ast.fdecl)
device_programs.extend(cgr.device_programs)
device_preambles.extend(cgr.device_preambles)
# }}}
# {{{ collect preambles
for clbl in program.callables_table.values():
device_preambles.extend(list(clbl.generate_preambles(program.target)))
# }}}
# adding the callee fdecls to the device_programs
device_programs = ([device_programs[0].copy(
ast=program.target.get_device_ast_builder().ast_module.Collection(
callee_fdecls+[device_programs[0].ast]))] +
device_programs[1:])
cgr = TranslationUnitCodeGenerationResult(
host_programs=host_programs,
device_programs=device_programs,
device_preambles=device_preambles,
implemented_data_infos=implemented_data_infos)
if CACHING_ENABLED:
code_gen_cache.store_if_not_present(input_program, cgr)
return cgr
| 5,339,174
|
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(str(port))
except (OSError, serial.SerialException):
pass
return result
| 5,339,175
|
def train_concise_ch11(trainer_fn, hyperparams, data_iter, num_epochs=4):
"""Defined in :numref:`sec_minibatches`"""
# Initialization
net = nn.Sequential(nn.Linear(5, 1))
def init_weights(module):
if type(module) == nn.Linear:
torch.nn.init.normal_(module.weight, std=0.01)
net.apply(init_weights)
optimizer = trainer_fn(net.parameters(), **hyperparams)
loss = nn.MSELoss(reduction='none')
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
optimizer.zero_grad()
out = net(X)
y = y.reshape(out.shape)
l = loss(out, y)
l.mean().backward()
optimizer.step()
n += X.shape[0]
if n % 200 == 0:
timer.stop()
# `MSELoss` computes squared error without the 1/2 factor
animator.add(n / X.shape[0] / len(data_iter),
(d2l.evaluate_loss(net, data_iter, loss) / 2,))
timer.start()
print(f'loss: {animator.Y[0][-1]:.3f}, {timer.avg():.3f} sec/epoch')
| 5,339,176
|
def list(tag_name=None, start_date=None):
""" List records for a given tag (optional: starting from a given date)
"""
if tag_name is not None:
if tag_name not in get_tags():
log.error(ERROR_UNKNOWN_TAG_NAME)
return
if start_date is not None:
if start_date == OPTION_TODAY:
start_date = get_date(DEFAULT_DATE)
else:
start_date = get_date(start_date)
with open(FILE, 'r') as f:
lines = f.read().splitlines()
for line in lines:
parts = line.split(" ")
if parts[2] == tag_name:
if start_date is not None:
if get_date(parts[0].split("/")[1]) >= start_date:
print nice(line)
else:
with open(FILE, 'r') as f:
lines = f.read().splitlines()
for line in lines:
parts = line.split(" ")
if parts[2] == tag_name:
print nice(line)
else:
with open(FILE, 'r') as f:
lines = f.read().splitlines()
for line in lines:
print line
| 5,339,177
|
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None):
"""
Fetches info about mgr pods in the cluster
Args:
mgr_label (str): label associated with mgr pods
(default: defaults.MGR_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mgr pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
mgrs = get_pods_having_label(mgr_label, namespace)
mgr_pods = [Pod(**mgr) for mgr in mgrs]
return mgr_pods
| 5,339,178
|
def normalise_architecture(architecture: Union[str, int]):
"""Convert any valid architecture alias to either 'x86_64' or 'i686'.
Raise an error for invalid input.
"""
for (true_name, aliases) in architecture_aliases.items():
if architecture in aliases:
return true_name
raise ValueError(
f"Invalid architecture {repr(architecture)}. "
f"Legal 64 bit values are:\n {architecture_aliases['x86_64']}\n"
f"And legal 32 bit values are:\n {architecture_aliases['i686']}\n"
)
| 5,339,179
|
def process_m(filename, m, estimator):
"""Returns the list of file sizes and PSNR values for
compression method m.
"""
filesize, psnr = [], []
for q in range(0, 101, 5):
_size, _psnr = process_q(filename, q, m, estimator)
filesize.append(_size / 1024) # in kilobyte(s)
psnr.append(_psnr)
return filesize, psnr
| 5,339,180
|
def fmt(n):
"""format number with a space in front if it is single digit"""
if n < 10:
return " " + str(n)
else:
return str(n)
| 5,339,181
|
def test_registration():
"""Test registering a magic and getting a copy of it and de-registering."""
manager.MagicManager.clear_magics()
def my_magic(cell=None, line=None):
"""This is a magic."""
if not cell:
cell = 'foo'
if not line:
line = 'bar'
return f'{cell}{line}'
my_magic.magic_name = 'magical_function'
my_magic.fn = my_magic
manager.MagicManager.register_magic(my_magic)
magic_from_manager = manager.MagicManager.get_magic('magical_function')
assert magic_from_manager() == 'foobar'
my_magic.magic_name = 'other_magic'
def conditional():
return False
manager.MagicManager.register_magic(my_magic, conditional=conditional)
magic_from_manager = manager.MagicManager.get_magic('other_magic')
assert magic_from_manager is None
manager.MagicManager.register_magic(my_magic)
magic_from_manager = manager.MagicManager.get_magic('other_magic')
assert magic_from_manager() == 'foobar'
manager.MagicManager.deregister_magic('other_magic')
magic_from_manager = manager.MagicManager.get_magic('other_magic')
assert magic_from_manager is None
manager.MagicManager.deregister_magic('magical_function')
magic_from_manager = manager.MagicManager.get_magic('magical_function')
assert magic_from_manager is None
with pytest.raises(KeyError):
manager.MagicManager.deregister_magic('does_not_exist')
| 5,339,182
|
def Show_Method(filename):
""" The function Show_Method(filename) reads the method contained in the file and displays its content
"""
global Method_Content
# Read the method and update the variable "Method_Content"
try:
with open(filename) as file:
Method_Content.set(file.read()) # Reads the content of the method
except:
Method_Content.set('No test - File does not exist')
# Create frame containing the method content
frame_Method_Content = ttk.LabelFrame(tab1, text = 'Segments', borderwidth=2, relief='ridge')
frame_Method_Content.grid(column=0, row=3, columnspan=1, sticky="nsew")
frame_Method_Content.columnconfigure(1, weight=1)
frame_Method_Content.rowconfigure(1, weight=1)
# Create a scrolledtext widget containing the method content
scrol_h = 30
scrol_w = 50
scr = scrolledtext.ScrolledText(frame_Method_Content, width = scrol_w, height = scrol_h)
scr.grid(row=0, column=0, padx=5, pady=5, columnspan=1)
scr.configure(state='normal')
scr.insert('insert', Method_Content.get())
scr.configure(state='disabled') # State is 'disabled' so that the user can change the content
| 5,339,183
|
def mailer(recips, subject, report):
"""
Sends an email containing a report from a flushed MessageBuffer.
"""
if not recips:
logging.error("Recips was empty, adding error recip")
recips.append(setting('REPORT_TO', ''))
logging.info('Mailer is emailing, subject = %r, recipients=%r',
subject, recips)
send_email(setting('REPORT_FROM', ''), ', '.join(recips),
subject, report, reply_to=setting('REPLY_TO', ''))
| 5,339,184
|
def stop():
"""Stop cleaning
This is using docstrings for specifications.
---
definitions:
stop:
type: object
properties:
did:
type: string
siid:
type: integer
aiid:
type: integer
code:
type: integer
out:
type: array
items: {}
security:
- Bearer: []
responses:
200:
description: OK
schema:
$ref: '#/definitions/stop'
400:
description: Bad Request
401:
description: Unauthorized
"""
consoleOutput = (
popen("miiocli dreamevacuum --ip " + creds.ip + " --token " + creds.token + " play_sound")
.read()
.strip()
.rstrip("\n")
)
# 400
if consoleOutput.find("Error") != -1:
return Response(response=consoleOutput.rstrip("\n"), status=400, mimetype="text/plain")
# 200
result = consoleOutput.partition("\n")[2]
print(result)
if result.find("{'did'") != -1:
return Response(response=result.replace("'", '"'), status=200, mimetype="application/json")
| 5,339,185
|
def save_key(access_key, output_filename=DEFAULT_ACCESS_KEY_FILE):
"""
saves access key to .yc json file
"""
with open(output_filename, "w+") as f:
f.write(json.dumps(access_key, indent=4))
return output_filename
| 5,339,186
|
def make_link_targets(proj_name,
user_name,
repo_name,
known_link_fname,
out_link_fname,
url=None,
ml_url=None):
""" Check and make link targets
If url is None or ml_url is None, check if there are links present for these
in `known_link_fname`. If not, raise error. The check is:
Look for a target `proj_name`.
Look for a target `proj_name` + ' mailing list'
Also, look for a target `proj_name` + 'github'. If this exists, don't write
this target into the new file below.
If we are writing any of the url, ml_url, or github address, then write new
file with these links, of form:
.. _`proj_name`
.. _`proj_name`: url
.. _`proj_name` mailing list: url
"""
with open(known_link_fname, 'rt') as link_fh:
link_contents = link_fh.readlines()
have_url = not url is None
have_ml_url = not ml_url is None
have_gh_url = None
for line in link_contents:
if not have_url:
match = re.match(r'..\s+_`%s`:\s+' % proj_name, line)
if match:
have_url = True
if not have_ml_url:
match = re.match(r'..\s+_`%s mailing list`:\s+' % proj_name, line)
if match:
have_ml_url = True
if not have_gh_url:
match = re.match(r'..\s+_`%s github`:\s+' % proj_name, line)
if match:
have_gh_url = True
if not have_url or not have_ml_url:
raise RuntimeError('Need command line or known project '
'and / or mailing list URLs')
lines = []
if not url is None:
lines.append('.. _`%s`: %s\n' % (proj_name, url))
if not have_gh_url:
gh_url = 'http://github.com/%s/%s\n' % (user_name, repo_name)
lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url))
if not ml_url is None:
lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url))
if len(lines) == 0:
# Nothing to do
return
# A neat little header line
lines = ['.. %s\n' % proj_name] + lines
with open(out_link_fname, 'wt') as out_links:
out_links.writelines(lines)
| 5,339,187
|
def PutObject(object_id: str):
"""Add/replace DRS object with a user-supplied ID.
Args:
object_id: Identifier of DRS object to be created/updated.
Returns:
Identifier of created/updated DRS object.
"""
return register_object(
data=request.json,
object_id=object_id,
)
| 5,339,188
|
def get_handler_name(method: str, url_path: str, path_params: dict):
"""
Возвращает имя необходимого хендлера для рефлексифного вызова метода
:param method: Метод
:param url_path: URL
:param path_params: Параметры
:return:
"""
handler = url_path.replace('/', '_')
for key, value in path_params.items():
handler = handler.replace(value, key)
return method.lower() + handler
| 5,339,189
|
def parse_args():
"""Process input arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('genotypes', metavar='G', help="Genotype table")
parser.add_argument('mutations', metavar='M', help="Mutation table")
parser.add_argument('--zygosity', '-z', default='both', type=str,
help="Minimum number of variant alleles required to be variant\
(het, hom or both)")
parser.add_argument('--nonsense', '-n', default=0, type=float,
help="Only consider nonsense variants occuring in the first\
X portion of the protein")
parser.add_argument('--total', '-t', action="store_true",
help="Return the count of variants in each gene")
parser.add_argument('--worst', '-w', action="store_true",
help="Return the neutral probability of the most impactful variant\
in a gene")
parser.add_argument('--sift', '-i', action="store_true",
help="Use SIFT scores to calculate P(Neutral)")
parser.add_argument('--blosum', '-b', action="store_true",
help="Use BLOSUM62 scores to calculate P(Neutral)")
parser.add_argument('--foldx', '-f', action="store_true",
help="Use FoldX ddG scores to calculate P(Neutral)")
return parser.parse_args()
| 5,339,190
|
def test_validate_json_invalid_json():
"""
Unit test to check the validate_json function with invalid json_object inputs
"""
json_object = {
"cpu_cores": "0.1"
}
with pytest.raises(AMLConfigurationException):
assert validate_json(
data=json_object,
schema=parameters_schema,
input_name="PARAMETERS_FILE"
)
| 5,339,191
|
def cli_create_subscription_type(args: argparse.Namespace) -> None:
"""
Handler for "mnstr subtypes create".
"""
result = create_subscription_type(
args.id,
args.name,
args.description,
args.icon,
args.choices,
args.stripe_product_id,
args.stripe_price_id,
args.active,
)
print(result.json())
| 5,339,192
|
def get_courses(terms, programs, synergis, whitelist=None):
""" yield a course augmented with sis info from a sorted list of courses for a list of terms & programs
gratitude to http://nedbatchelder.com/text/iter.html#h_customizing_iteration
NOTE: api returns courses in a subaccount AND ITS SUBACCOUNTS """
if whitelist:
for course_sis_id in sorted(whitelist):
course = get_course_by_sis_id(course_sis_id)
course_sis_info = validate_course(course)
if course_sis_info:
course['course_sis_info'] = course_sis_info
yield course
else:
print('>>> no course for {}'.format(course_sis_id))
else:
for term in terms or all_terms():
print(term, '-' * 70)
for program in programs or all_programs(synergis):
print(program, '-' * 70)
courses = api.get_list('accounts/{}/courses?enrollment_term_id={}'
.format(program_account(program), term_id_from_name(term)))
for course in sorted([course for course in courses if course['sis_course_id']],
key=operator.itemgetter('sis_course_id')):
course_sis_info = validate_course(course)
if course_sis_info:
course['course_sis_info'] = course_sis_info
yield course
| 5,339,193
|
def write_single(filename, dic, data, overwrite=False):
"""
Write data to a single NMRPipe file from memory.
Write 1D and 2D files completely as well as NMRPipe data streams.
2D planes of 3D and 4D files should be written with this function.
See :py:func:`write` for documentation.
"""
# append imaginary and flatten
if data.dtype == "complex64":
data = append_data(data)
data = unshape_data(data)
# create the fdata array
fdata = dic2fdata(dic)
# write the file
put_data(filename, fdata, data, overwrite)
return
| 5,339,194
|
def plot_AP(file_path: str):
""" 绘制 AP 柱状图 """
with open(file_path, encoding='utf-8') as f:
result = json.load(f)
AP = []
classes = []
for k, v in result.items():
if k!='mAP':
AP.append(v['AP'])
classes.append(k)
fig, ax = plt.subplots(1, 1, num='AP 柱状图')
ax.barh(range(len(AP)), AP, height=0.6, tick_label=classes)
ax.set(xlabel='AP', title=f'mAP: {result["mAP"]:.2%}')
return fig, ax
| 5,339,195
|
def partner_data_ingest_new_files(source, destination):
"""
:param source : list of files to process:
:param destination: destination to copy validated files
check s3 path for new file, trigger partner_data_ingest for new files.
"""
hook = S3SyncHook(aws_conn_id="aws_default", verify=True)
diff = hook.diff(source, destination)
return partner_data_ingest(new_files=diff, destination=destination)
| 5,339,196
|
def parse_data_describe(args: argparse.Namespace) -> None:
"""
Handler for the "data describe" command
"""
simiotics_client = client_from_env()
data_descriptions = data.describe_data(simiotics_client, args.source, args.ids)
print('*** Data descriptions ***')
for i, description in enumerate(data_descriptions):
print('*** Sample {} ***'.format(i))
print(description)
| 5,339,197
|
def buildGeneRegions(infile, outfile):
"""build a :term:`bed` file of regions spanning whole gene models.
This method outputs a single interval spanning the genomic region
that covers all transcripts within a particular gene.
The name column of the :term:`bed` file is set to the `gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
statement = """
gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=gene_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
| 5,339,198
|
def _mofval(value, indent, maxline, line_pos=0, end_space=0):
"""
Low level function that returns the MOF representation of a non-string
value (i.e. a value that cannot not be split into multiple parts, for
example a numeric or boolean value).
If the MOF representation of the value does not fit into the remaining
space of the current line, it is put into a new line, considering the
specified indentation.
NOTE: This method is derived from pywbem mofval but differs in that we
want to output even if we violate the maxline limit on the new line. This
method favors outputting data over exceptions.
Parameters:
value (:term:`unicode string`): The non-string value. Must not be `None`.
indent (:term:`integer`): Number of spaces to indent any new lines that
are generated.
maxline (:term:`integer`): Maximum line length for the generated MOF.
line_pos (:term:`integer`): Length of content already on the current
line.
end_space (:term:`integer`): Length of space to be left free on the last
line.
Returns:
tuple of
* :term:`unicode string`: MOF string.
* new line_pos
"""
assert isinstance(value, six.text_type)
# Check for output on current line
# if fits or this is first entry on the line
avl_len = maxline - line_pos - end_space
if len(value) <= avl_len or line_pos == 0:
line_pos += len(value)
return value, line_pos
mof_str = u'\n' + _indent_str(indent) + value
line_pos = indent + len(value)
return mof_str, line_pos
| 5,339,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.