content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def p_nonempty_name_list(p):
"""name_list : ID
| name_list COMMA ID
"""
p[0] = processList(p, 3)
| 6,400
|
def count_infected(pop):
"""
counts number of infected
"""
return sum(p.is_infected() for p in pop)
| 6,401
|
def esum(expr_iterable):
"""
Expression sum
:param term_iterable:
:return:
"""
var_dict = {}
constant = 0
for expr in expr_iterable:
for (var_name, coef) in expr.var_dict.items():
if coef not in var_dict:
var_dict[var_name] = coef
else:
var_dict[var_name] += coef
constant += expr.constant
return Expression.from_var_dict(
var_dict,
constant
)
| 6,402
|
def _prompt(func, prompt):
"""Prompts user for data. This is for testing."""
return func(prompt)
| 6,403
|
def fsflow():
"""
fsflow()
Defined at ../src/fsflow.f lines 93-211
"""
_min3p.f90wrap_fsflow()
| 6,404
|
def show_trace(func, *args, **kwargs):
# noinspection PyShadowingNames
"""
Display epic argument and context call information of given function.
>>> @show_trace
>>> def complex_function(a, b, c, **kwargs):
...
>>> complex_function('alpha', 'beta', False, debug=True)
calling haystack.submodule.complex_function with
args: ({'a': 'alpha', 'b': 'beta', 'c': False},)
kwargs: {'debug': True}
>>>
:param func: the decorated function
:param args: the positional args of the function
:param kwargs: the keyword args of the function
:return: the function return value
"""
func = Func(func)
print(
f"Calling {func.full_name} with: \n "
f"args: {args} \n "
f"kwargs: {kwargs}"
)
return func(*args, **kwargs)
| 6,405
|
def download(token, data_dir, file=None):
"""
Download produced data files from Zenodo
token : Your Access Toekn, get it from
https://zenodo.org/account/settings/applications/tokens/new/
data_dir : Provide the files' names you want to save data in
file : Name of the particular file you want to downlaod, e.g. descendants.zip. If None,
it downloads all the data
"""
import requests
import numpy as np
import os
record_id = 5770883 # Our repository's record ID on zenodo.org
r = requests.get(f"https://zenodo.org/api/records/{record_id}",
params={'access_token': token})
download_urls = np.array([f['links']['self'] for f in r.json()['files']])
filenames = np.array([f['key'] for f in r.json()['files']])
print(r.status_code)
if file is None:
file = filenames
else :
file= [file]
for fname in file:
ind = np.where(filenames==fname)[0][0]
print("Downloading:", fname, ' from ', download_urls[ind])
r = requests.get(download_urls[ind], params={'access_token': token})
with open(os.path.join(data_dir, fname), 'wb') as f:
f.write(r.content)
| 6,406
|
def record_editor(project_path: Path, debug: bool = False) -> List[Path]:
"""
Records ezvi running for each instructions file in a project.
The files to record a found using `fetch_project_editor_instructions()`.
Args:
project_path (Path): The path towards the project from which the
`ezvi` instructions will be recorded.
debug (bool, optional): Whether or not to use this function in
debug mode. Debug mode shows `ezvi`'s output on the user's terminal.
Defaults to False.
Returns:
List[Path]: A list of paths towards each recording that has been created.
"""
all_editor_instructions: List[Path] = fetch_project_editor_instructions(project_path)
all_editor_recordings: List[Path] = []
console: Console = Console()
with console.status("[bold green]Recording editor...") as status:
for instruction in all_editor_instructions:
save_path: Path = (
instruction.parent.parent / Path("asciicasts") / instruction.name
).with_suffix(".cast")
if save_path.exists():
os.remove(save_path)
subprocess.run(
["asciinema", "rec", "-c", f"runner {instruction}", str(save_path)],
capture_output=not debug,
)
console.log(f"Video contents in file {instruction} have been recorded.")
all_editor_recordings.append(save_path)
return all_editor_recordings
| 6,407
|
def to_percent__xy(x, y):
"""
To percent with 2 decimal places by diving inputs.
:param x:
:param y:
:return:
"""
return '{:.2%}'.format(x / y)
| 6,408
|
def test_compute_distances13(adata_cdr3, adata_cdr3_mock_distance_calculator):
"""Test for #174. Gracefully handle the case when there are IR."""
adata_cdr3.obs["IR_VJ_1_junction_aa"] = np.nan
adata_cdr3.obs["IR_VDJ_1_junction_aa"] = np.nan
adata_cdr3.obs["has_ir"] = "False"
# test both receptor arms, primary chain only
ir.pp.ir_dist(adata_cdr3, metric=adata_cdr3_mock_distance_calculator, sequence="aa")
with pytest.raises(ValueError):
cn = ClonotypeNeighbors(
adata_cdr3,
receptor_arms="all",
dual_ir="primary_only",
distance_key="ir_dist_aa_custom",
sequence_key="junction_aa",
)
| 6,409
|
def test_update_auto_refresh(ctx):
""" Test the ViewContext.update_auto_refresh method. """
# reset parameter because called in constructor
del ctx.parameters[AUTO]
# test call with default valid value
ctx.update_auto_refresh()
assert not ctx.parameters[AUTO]
# reset parameter
del ctx.parameters[AUTO]
# test call with other valid value
ctx.http_context.form[AUTO] = 't'
ctx.update_auto_refresh()
assert ctx.parameters[AUTO]
# reset parameter
del ctx.parameters[AUTO]
# test call with invalid value
ctx.http_context.form[AUTO] = 'not a boolean'
ctx.update_auto_refresh()
assert not ctx.parameters[AUTO]
| 6,410
|
def generic_constructor(value, name=None, strict=False, allow_downcast=None):
"""SharedVariable Constructor"""
return SharedVariable(type=generic, value=value, name=name, strict=strict,
allow_downcast=allow_downcast)
| 6,411
|
def u_scheme(tree, neighbours):
"""Calculates the u-:ref:`scheme <presolve>`.
"""
unique_neighbours = torch.sort(neighbours, 1, descending=True).values
unique_neighbours[:, 1:][unique_neighbours[:, 1:] == unique_neighbours[:, :-1]] = -1
pairs = torch.stack([tree.id[:, None].expand_as(neighbours), unique_neighbours], -1)
pairs = pairs[(pairs >= 0).all(-1) & tree.terminal[pairs].all(-1)]
partner_is_larger = tree.depths[pairs[:, 0]] > tree.depths[pairs[:, 1]]
smaller_partners = torch.flip(pairs[partner_is_larger], (1,))
pairs = torch.cat([pairs, smaller_partners])
return ragged.from_pairs(pairs, len(tree.id), len(tree.id))
| 6,412
|
def find_and_open_file(f):
"""
Looks in open windows for `f` and focuses the related view.
Opens file if not found. Returns associated view in both cases.
"""
for w in sublime.windows():
for v in w.views():
if normpath(f) == v.file_name():
w.focus_view(v)
return v
return sublime.active_window().open_file(f)
| 6,413
|
def stochastic_fit(input_data: object) -> FitParams:
"""
Acquire parameters for the stochastic input signals.
"""
params = FitParams(0.000036906289210966747, 0.014081285145600045)
return params
| 6,414
|
def unconfigure_radius_automate_tester(device, server_name, username):
""" Unconfigure Radius Automate Tester.
Args:
device (`obj`): Device object
server_name ('str'): Radius server name
username ('str'): Identity Username to query radius server
Return:
None
Raise:
SubCommandFailure: Failed configuring
"""
try:
device.configure([
"radius server {server_name}".format(server_name=server_name),
"no automate-tester username {username}".format(username=username)
])
except SubCommandFailure:
raise SubCommandFailure(
"Could not unconfigure Radius automate tester"
)
| 6,415
|
def sort_features_by_normalization(
normalization_parameters: Dict[int, NormalizationParameters]
) -> Tuple[List[int], List[int]]:
"""
Helper function to return a sorted list from a normalization map.
Also returns the starting index for each feature type"""
# Sort features by feature type
sorted_features: List[int] = []
feature_starts: List[int] = []
assert isinstance(
list(normalization_parameters.keys())[0], str
), "Normalization Parameters need to be str"
for feature_type in identify_types.FEATURE_TYPES:
feature_starts.append(len(sorted_features))
for feature in sorted(normalization_parameters.keys()):
norm = normalization_parameters[feature]
if norm.feature_type == feature_type:
sorted_features.append(feature)
return sorted_features, feature_starts
| 6,416
|
def main():
"""Main method that retrieves the devices"""
# Calculate Execution Time - Start
now = datetime.datetime.now()
print("Current date and time when script starts: " + now.strftime("%Y-%m-%d %H:%M:%S"))
# Connect to Switch
conn_pre = paramiko.SSHClient()
conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
conn_pre.connect(HOST_SW, 22, USER_SW, PASS_SW)
conn = conn_pre.invoke_shell()
conn.send("term len 0 \n")
time.sleep(.5)
output = conn.recv(65535)
Config_Profile(conn, SatIP, SatPort)
Register(conn, token)
conn.send("show license summary \n")
time.sleep(1)
output = conn.recv(65535)
print(output)
# Calculate Execution Time - End
now = datetime.datetime.now()
print("Current date and time when script finished: " + now.strftime("%Y-%m-%d %H:%M:%S"))
conn.close()
| 6,417
|
def block_sort():
"""
Do from here: https://en.wikipedia.org/wiki/Block_sort
:return: None
"""
return None
| 6,418
|
def improve_dictionary(file_to_open):
"""Implementation of the -w option. Improve a dictionary by
interactively questioning the user."""
kombinacija = {}
komb_unique = {}
if not os.path.isfile(file_to_open):
exit("Error: file " + file_to_open + " does not exist.")
chars = CONFIG["global"]["chars"]
years = CONFIG["global"]["years"]
numfrom = CONFIG["global"]["numfrom"]
numto = CONFIG["global"]["numto"]
fajl = open(file_to_open, "r")
listic = fajl.readlines()
listica = []
for x in listic:
listica += x.split()
print("\r\n *************************************************")
print(" * \033[1;31mWARNING!!!\033[1;m *")
print(" * Using large wordlists in some *")
print(" * options bellow is NOT recommended! *")
print(" *************************************************\r\n")
conts = input(
"> Do you want to concatenate all words from wordlist? Y/[N]: "
).lower()
if conts == "y" and len(listic) > CONFIG["global"]["threshold"]:
print(
"\r\n[-] Maximum number of words for concatenation is "
+ str(CONFIG["global"]["threshold"])
)
print("[-] Check configuration file for increasing this number.\r\n")
conts = input(
"> Do you want to concatenate all words from wordlist? Y/[N]: "
).lower()
cont = [""]
if conts == "y":
for cont1 in listica:
for cont2 in listica:
if listica.index(cont1) != listica.index(cont2):
cont.append(cont1 + cont2)
spechars = [""]
spechars1 = input(
"> Do you want to add special chars at the end of words? Y/[N]: "
).lower()
if spechars1 == "y":
for spec1 in chars:
spechars.append(spec1)
for spec2 in chars:
spechars.append(spec1 + spec2)
for spec3 in chars:
spechars.append(spec1 + spec2 + spec3)
randnum = input(
"> Do you want to add some random numbers at the end of words? Y/[N]:"
).lower()
leetmode = input("> Leet mode? (i.e. leet = 1337) Y/[N]: ").lower()
# init
for i in range(6):
kombinacija[i] = [""]
kombinacija[0] = list(komb(listica, years))
if conts == "y":
kombinacija[1] = list(komb(cont, years))
if spechars1 == "y":
kombinacija[2] = list(komb(listica, spechars))
if conts == "y":
kombinacija[3] = list(komb(cont, spechars))
if randnum == "y":
kombinacija[4] = list(concats(listica, numfrom, numto))
if conts == "y":
kombinacija[5] = list(concats(cont, numfrom, numto))
print("\r\n[+] Now making a dictionary...")
print("[+] Sorting list and removing duplicates...")
for i in range(6):
komb_unique[i] = list(dict.fromkeys(kombinacija[i]).keys())
komb_unique[6] = list(dict.fromkeys(listica).keys())
komb_unique[7] = list(dict.fromkeys(cont).keys())
# join the lists
uniqlist = []
for i in range(8):
uniqlist += komb_unique[i]
unique_lista = list(dict.fromkeys(uniqlist).keys())
unique_leet = []
if leetmode == "y":
for (
x
) in (
unique_lista
): # if you want to add more leet chars, you will need to add more lines in cupp.cfg too...
x = make_leet(x) # convert to leet
unique_leet.append(x)
unique_list = unique_lista + unique_leet
unique_list_finished = []
unique_list_finished = [
x
for x in unique_list
if len(x) > CONFIG["global"]["wcfrom"] and len(x) < CONFIG["global"]["wcto"]
]
print_to_file(file_to_open + ".cupp.txt", unique_list_finished)
fajl.close()
| 6,419
|
def prep_ground_truth(paths, box_data, qgt):
"""adds dbidx column to box data, sets dbidx in qgt and sorts qgt by dbidx
"""
orig_box_data = box_data
orig_qgt = qgt
path2idx = dict(zip(paths, range(len(paths))))
mapfun = lambda x : path2idx.get(x,-1)
box_data = box_data.assign(dbidx=box_data.file_path.map(mapfun).astype('int'))
box_data = box_data[box_data.dbidx >= 0].reset_index(drop=True)
new_ids = qgt.index.map(mapfun)
qgt = qgt[new_ids >= 0]
qgt = qgt.set_index(new_ids[new_ids >= 0])
qgt = qgt.sort_index()
## Add entries for files with no labels...
qgt = qgt.reindex(np.arange(len(paths))) # na values will be ignored...
assert len(paths) == qgt.shape[0], 'every path should be in the ground truth'
return box_data, qgt
| 6,420
|
def top_1_pct_share(df, col, w=None):
"""Calculates top 1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w)
| 6,421
|
def containsdupevalues(structure) -> bool or None:
"""Returns True if the passed dict has duplicate items/values, False otherwise. If the passed structure is not a dict, returns None."""
if isinstance(structure, dict):
# fast check for dupe keys
rev_dict = {}
for key, value in structure.items():
rev_dict.setdefault(value, set()).add(key)
dupes = list(filter(lambda x: len(x) > 1, rev_dict.values()))
if dupes:
return True
else:
return False
return None
| 6,422
|
def test_energy_one_socket(fs_one_socket, sensor_param):
"""
Create a sensor with given parameters (see printed output) and get energy of monitored devices
The machine contains only one socket
Test:
- return value of the function
"""
sensor = Sensor(sensor_param.devices, sensor_param.sockets)
assert sensor.energy() == sensor_param.one_socket_result
| 6,423
|
def only_letters(answer):
"""Checks if the string contains alpha-numeric characters
Args:
answer (string):
Returns:
bool:
"""
match = re.match("^[a-z0-9]*$", answer)
return bool(match)
| 6,424
|
def get_initial_conditions(scenario, reporting_unit):
""" Retreive the initial conditions from a given reporting unit. """
feature = dict(
geometry=json.loads(reporting_unit.polygon.json),
type='Feature',
properties={}
)
# Collect zonal stats from rasters
bps_stats, bps_raster = zonal_stats(feature, BPS_TIF)
sclass_stats, sclass_raster = zonal_stats(feature, SCLASS_TIF)
# The count of the area that *is not* masked, i.e. the count within the reporting unit
count = bps_raster.count()
# Yield each set of initial conditions
for value in bps_stats:
if value in BPS_MAPPING:
# If the raster value is not found, skip it
try:
bps_model_code = int(BPS_MAPPING[value])
except ValueError:
continue
stratum = Stratum.objects.filter(name=BPS_MAPPING[value], project=scenario.project)
# Not all BpS vegetation types have a STM model. Since we can't model them, we skip them.
if not stratum:
continue
stratum = stratum.first()
stateclass_names = []
for sclass_type, lookup in SCLASS_ALL_MAPPINGS:
if bps_model_code in lookup:
name = lookup[bps_model_code]
if name:
stateclass_names.append((sclass_type, name))
sclass_locations = sclass_raster[numpy.where(bps_raster == value)]
sclass_keys_found, sclass_counts = numpy.unique(sclass_locations, return_counts=True)
for i, name_tuple in enumerate(stateclass_names):
name, stateclass = name_tuple
if i not in sclass_keys_found:
relative_amount = 0.0
else:
sclass_idx = list(sclass_keys_found).index(i)
relative_amount = sclass_counts[sclass_idx] / count * 100.0 # Percent of the state class over the total number of cells
stateclass = StateClass.objects.filter(name=stateclass, project=scenario.project).first()
yield {
'scenario': scenario,
'relative_amount': relative_amount,
'stratum': stratum,
'stateclass': stateclass,
'reporting_unit': reporting_unit
}
| 6,425
|
def test_input1():
"""input1"""
run('input1.txt', ' 1:foo boo', 0)
| 6,426
|
def is_finally_visible_func(*args):
"""
is_finally_visible_func(pfn) -> bool
Is the function visible (event after considering 'SCF_SHHID_FUNC' )?
@param pfn (C++: func_t *)
"""
return _ida_funcs.is_finally_visible_func(*args)
| 6,427
|
def _losetup_list():
"""
List all the loopback devices on the system.
:returns: A ``list`` of
2-tuple(FilePath(device_file), FilePath(backing_file))
"""
output = check_output(
["losetup", "--all"]
).decode('utf8')
return _losetup_list_parse(output)
| 6,428
|
def test_compartmentModel_fit_model_returns_bool(preparedmodel):
"""Test whether the fit routine reports sucess of fitting
"""
return_value = preparedmodel.fit_model()
assert (isinstance(return_value, bool))
| 6,429
|
def gaussian_product_center(a,A,b,B):
"""
"""
A = np.array(A)
B = np.array(B)
return (a*A+b*B)/(a+b)
| 6,430
|
def test_gms_get_assertions_on_dataset_field():
"""lists all assertion urns including those which may not have executed"""
dataset_urn = make_dataset_urn("postgres", "fooTable")
field_urn = make_schema_field_urn(dataset_urn, "col1")
response = requests.get(
f"{GMS_ENDPOINT}/relationships?direction=INCOMING&urn={urllib.parse.quote(field_urn)}&types=Asserts"
)
response.raise_for_status()
data = response.json()
assert len(data["relationships"]) == 1
| 6,431
|
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
): # pylint: disable=unused-argument
"""Setup binary_sensor platform."""
name = discovery_info[CONF_NAME]
entities = []
for resource in discovery_info[CONF_RESOURCES]:
sensor_type = resource.lower()
if sensor_type not in BINARY_SENSOR_TYPES:
continue
entities.append(WattBoxBinarySensor(hass, name, sensor_type))
async_add_entities(entities, True)
| 6,432
|
def starting(date):
"""validate starting"""
validate_time(date)
validate_time_range(date)
| 6,433
|
def test_stack_describe_contains_local_stack() -> None:
"""Test that the stack describe command contains the default local stack"""
runner = CliRunner()
result = runner.invoke(describe_stack)
assert result.exit_code == 0
assert "default" in result.output
| 6,434
|
def comparison_plot(cn_see,
option='corr',
plot_orientation='horizontal',
cbar_orientation='vertical',
cbar_indiv_range=None,
title=True,
title_suffix='',
titles_='',
share_colorbar=False,
plot_colormap='jet',
plot_num_samples=1000,
remove_small_val_th=3,
remove_small_val=False,
plot_size = 12,
cbar_ticks_number=None,
save_fig=False,
save_fig_name='corr_'):
"""
"""
num_plots = len(cn_see)
if share_colorbar:
min_dim=4
else:
min_dim=3
if titles_=='' and num_plots==2:
titles_=['original ','denoised ']
if num_plots==2:
cn_see.append(cn_see[0]-cn_see[1])
titles_.append('residual ')
if plot_orientation == 'horizontal':
d1, d2 = min_dim,1
sharex = True
sharey = False
elif plot_orientation =='vertical':
d1, d2 = 1,min_dim
sharex = False
sharey = True
Cn_all =[]
#######################
# Calculate Cn to plot
#######################
for ii, array in enumerate(cn_see):
#print(array.shape)
if option =='corr': # Correlation
Cn, _ = correlation_pnr(array,
gSig=None,
remove_small_val=remove_small_val,
remove_small_val_th=remove_small_val_th,
center_psf=False)#,
#swap_dim=True) # 10 no ds
title_prefix = 'Local correlation: '
elif option =='var': #Variance
Cn = array.var(2)/array.shape[2]
title_prefix = 'Pixel variance: '
#print(Cn.min())
#print(Cn.max())
elif option =='pnr': # PNR
_, Cn = correlation_pnr(array,
gSig=None,
remove_small_val=remove_small_val,
remove_small_val_th=remove_small_val_th,
center_psf=False)#,
#swap_dim=True)
title_prefix = 'PNR: '
elif option=='input':
Cn =array - array.min()
Cn = Cn/Cn.max()
title_prefix = 'Single Frame: '
elif option=='snr':
Cn1 = array.std(2)
Cn2 = denoise.noise_level(array)
Cn = Cn1/Cn2
title_prefix = 'SNR: '
else:
title_prefix = ''
print ('%s range [%.1e %.1e]'%(title_prefix,
Cn.min(),
Cn.max()))
Cn_all.append(Cn)
#######################
# Plot configuration
#######################
vmax_ = list(map(np.max,Cn_all))
vmin_ = list(map(np.min,Cn_all))
if share_colorbar:
vmax_ = [max(vmax_)]*3
vmin_ = [min(vmin_)]*3
if cbar_indiv_range is not None:
for ii,range_ in enumerate(cbar_indiv_range):
vmin_[ii]=range_[0]
vmax_[ii]=range_[1]
dim2, dim1 = Cn.shape
x_ticks= np.linspace(0,dim1,5).astype('int')
y_ticks= np.linspace(0,dim2,5).astype('int')
fig, axarr = plt.subplots(d1,d2,
figsize=(d1*plot_size,d2*plot_size),
sharex=sharex,
sharey=sharey)
#cbar_enable= [False,False,True]
cbar_enable= not share_colorbar
for ii, Cn in enumerate(Cn_all):
show_img(Cn,
ax =axarr[ii],
cbar_orientation=cbar_orientation,
vmin=vmin_[ii],
vmax=vmax_[ii],
plot_colormap=plot_colormap,
cbar_ticks_number=cbar_ticks_number,
cbar_enable=cbar_enable)
axarr[ii].set_xticks(x_ticks)
axarr[ii].set_yticks(y_ticks)
axarr[ii].set_xticklabels([])
axarr[ii].set_yticklabels([])
if title:
axarr[ii].set_title(title_prefix
+ titles_[ii]
+ title_suffix)
plt.tight_layout()
if save_fig:
save_fig_name = save_fig_name+'comparison_plot_'+'.pdf'
plt.savefig(save_fig_name)
else:
plt.show()
return
| 6,435
|
def read_yaml_file(yaml_path):
"""Loads a YAML file.
:param yaml_path: the path to the yaml file.
:return: YAML file parsed content.
"""
if is_file(yaml_path):
try:
file_content = sudo_read(yaml_path)
yaml = YAML(typ='safe', pure=True)
return yaml.safe_load(file_content)
except YAMLError as e:
raise YAMLError('Failed to load yaml file {0}, due to {1}'
''.format(yaml_path, str(e)))
return None
| 6,436
|
async def test_parse_gcj02_position(caplog):
"""Test conversion of GCJ02 to WGS84 for china."""
account = await get_mocked_account(get_region_from_name("china"))
vehicle = account.get_vehicle(VIN_F48)
vehicle_test_data = {
"properties": {
"vehicleLocation": {
"address": {"formatted": "some_formatted_address"},
"coordinates": {"latitude": 39.83492, "longitude": 116.23221},
"heading": 123,
},
"lastUpdatedAt": "2021-11-14T20:20:21Z",
},
"status": {
"FuelAndBattery": [],
"lastUpdatedAt": "2021-11-14T20:20:21Z",
},
}
vehicle.update_state(dict(vehicle.data, **vehicle_test_data))
# Update twice to test against slowly crawling position due to GCJ02 to WGS84 conversion
vehicle.update_state(dict(vehicle.data, **vehicle_test_data))
assert (39.8337, 116.22617) == (
round(vehicle.vehicle_location.location[0], 5),
round(vehicle.vehicle_location.location[1], 5),
)
assert len(get_deprecation_warning_count(caplog)) == 0
| 6,437
|
def _InUse(resource):
"""All the secret names (local names & remote aliases) in use.
Args:
resource: Revision
Returns:
List of local names and remote aliases.
"""
return ([
source.secretName
for source in resource.template.volumes.secrets.values()
] + [
source.secretKeyRef.name
for source in resource.template.env_vars.secrets.values()
])
| 6,438
|
def filter_dwnmut(gene_data):
"""Removes the variants upstream to Frameshift/StopGain mutation.
Args:
- gene_data(dictionary): gene_transcript wise variants where
there is at least one Frameshift/Stopgain
mutation.
Returns:
- flt_data(dictionary): gene_transcript wise variants where there
is at least one Frameshift/StopGain mutation
and at least one downstream coding exonic
variant.
"""
rfgene = Refgene()
flt_gene_data = {}
for gene_info, val in gene_data.items():
trans_id = gene_info[1]
strand = rfgene.get_strand(trans_id)
if not strand:
continue
for e in val:
t = {}
variants = e.keys()
if strand == '+':
variants.sort()
elif strand == '-':
variants.sort(reverse=True)
size = 0
mut_type = ''
flag = False
for var in variants:
if flag == False and e[var][0] == 'StopGain':
mut_type = 'StopGain'
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == False and e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == True:
if mut_type == 'StopGain':
t[var] = e[var]
elif e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
elif e[var][0].startswith('NonFrameShift'):
if e[var][0][13:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][13:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
else:
t[var] = e[var]
if len(t) > 1:
key = tuple(list(gene_info) + [strand])
if key not in flt_gene_data:
flt_gene_data[key] = [t]
else:
if t != flt_gene_data[key][0]:
flt_gene_data[key].append(t)
return flt_gene_data
| 6,439
|
def format_scwgbs_file(file_path):
"""
Format a scwgbs file to a more usable manner
:param file_path: The path of the file to format
:type file_path: str
:return: A dict where each key is a chr and the value is an array with all the scwgbs reads
:rtype: dict
"""
chr_dict = extract_cols(file_path)
chr_dict = combine_strands(chr_dict)
return chr_dict
| 6,440
|
def connect_signals():
"""
Hooks up all the event handlers to our callbacks above.
"""
review_request_published.connect(review_request_published_cb,
sender=ReviewRequest)
review_published.connect(review_published_cb, sender=Review)
reply_published.connect(reply_published_cb, sender=Review)
hipchat.config.init_cfg('/home/ubuntu/reviewboard/hipchat.cfg')
| 6,441
|
def test_manifest_v2_all_pass(_, setup_route):
"""
Run a valid manifest through all V2 validators
"""
validators = get_all_validators(False, "2.0.0")
for validator in validators:
# Currently skipping SchemaValidator because of no context object and config
if isinstance(validator, v2_validators.SchemaValidator):
continue
validator.validate('active_directory', JSONDict(input_constants.V2_VALID_MANIFEST), False)
assert not validator.result.failed, validator.result
assert not validator.result.fixed
| 6,442
|
def ls_generator_loss(scores_fake):
"""
Computes the Least-Squares GAN loss for the generator.
Inputs:
- scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Outputs:
- loss: A PyTorch Tensor containing the loss.
"""
loss = None
####################################
# YOUR CODE HERE #
####################################
labels = torch.ones_like(scores_fake)
loss = 1/2 * mse_loss(scores_fake, labels, reduction = 'mean')
########## END ##########
return loss
| 6,443
|
def test_d3_4_28v03_d3_4_28v03i(mode, save_output, output_format):
"""
Tests the simpleType dateTimeStamp and its facets pattern, used in
lists
"""
assert_bindings(
schema="ibmData/valid/D3_4_28/d3_4_28v03.xsd",
instance="ibmData/valid/D3_4_28/d3_4_28v03.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 6,444
|
def rmsd(predicted, reference):
"""
Calculate root-mean-square deviation (RMSD) between two variables.
Calculates the root-mean-square deviation between two variables
PREDICTED and REFERENCE. The RMSD is calculated using the
formula:
RMSD^2 = sum_(n=1)^N [(p_n - r_n)^2]/N
where p is the predicted values, r is the reference values, and
N is the total number of values in p & r. Note that p & r must
have the same number of values.
Input:
PREDICTED : predicted values
REFERENCE : reference values
Output:
R : root-mean-square deviation (RMSD)
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
prochford@thesymplectic.com
Created on Dec 9, 2016
"""
# Check that dimensions of predicted and reference fields match
utils.check_arrays(predicted, reference)
# Calculate the RMSE
r = np.sqrt(np.sum(np.square(predicted - reference)) / len(predicted))
return r
| 6,445
|
def _get_ranks_for_sequence(logits: np.ndarray,
labels: np.ndarray) -> List[float]:
"""Returns ranks for a sequence.
Args:
logits: Logits of a single sequence, dim = (num_tokens, vocab_size).
labels: Target labels of a single sequence, dim = (num_tokens, 1).
Returns:
An array of ranks for tokens in the sequence, dim = (num_tokens, 1).
"""
sequence_ranks = []
for logit, label in zip(logits, labels.astype(int)):
rank = rankdata(-logit, method='min')[label] - 1.0
sequence_ranks.append(rank)
return sequence_ranks
| 6,446
|
def eval_model_on_grid(model, bbox, tx, voxel_grid_size, cell_vox_min=None, cell_vox_max=None, print_message=True):
"""
Evaluate the trained model (output of fit_model_to_pointcloud) on a voxel grid.
:param model: The trained model returned from fit_model_to_pointcloud
:param bbox: The bounding box defining the region of space on which to evaluate the model
(represented as the pair (origin, size))
:param tx: An affine transformation which transforms points in world coordinates to model
coordinates before evaluating the model (the second return value of fit_model_to_grid).
The transformation is represented as a tuple (t, s) where t is a translation and s is scale.
:param voxel_grid_size: The size of the voxel grid on which to reconstruct
:param cell_vox_min: If not None, reconstruct on the subset of the voxel grid starting at these indices.
:param cell_vox_max: If not None, reconstruct on the subset of the voxel grid ending at these indices.
:param print_message: If true, print status messages to stdout.
:return: A tensor representing the model evaluated on a grid.
"""
bbox_origin, bbox_size = bbox
voxel_size = bbox_size / voxel_grid_size # size of a single voxel cell
if cell_vox_min is None:
cell_vox_min = torch.tensor([0, 0, 0], dtype=torch.int32)
if cell_vox_max is None:
cell_vox_max = voxel_grid_size
if print_message:
print(f"Evaluating model on grid of size {[_.item() for _ in (cell_vox_max - cell_vox_min)]}.")
eval_start_time = time.time()
xmin = bbox_origin + (cell_vox_min + 0.5) * voxel_size
xmax = bbox_origin + (cell_vox_max - 0.5) * voxel_size
xmin = affine_transform_pointcloud(xmin.unsqueeze(0), tx).squeeze()
xmax = affine_transform_pointcloud(xmax.unsqueeze(0), tx).squeeze()
xmin, xmax = xmin.numpy(), xmax.numpy()
cell_vox_size = (cell_vox_max - cell_vox_min).numpy()
xgrid = np.stack([_.ravel() for _ in np.mgrid[xmin[0]:xmax[0]:cell_vox_size[0] * 1j,
xmin[1]:xmax[1]:cell_vox_size[1] * 1j,
xmin[2]:xmax[2]:cell_vox_size[2] * 1j]], axis=-1)
xgrid = torch.from_numpy(xgrid).to(model.alpha_.dtype)
xgrid = torch.cat([xgrid, torch.ones(xgrid.shape[0], 1).to(xgrid)], dim=-1).to(model.alpha_.dtype)
ygrid = model.predict(xgrid).reshape(tuple(cell_vox_size.astype(np.int))).detach().cpu()
if print_message:
print(f"Evaluated model in {time.time() - eval_start_time}s.")
return ygrid
| 6,447
|
def create_gop(mpeg_file_object: IO[bytes]) -> bytes:
"""Create an index that allows faster seeking.
Note: as far as I can tell, this is not a standard GOP / group of pictures
structure. It is an index that maps frame numbers to stream offsets.
This is referred to as `GOPList` in MoonShell:
misctools/DPGTools/sources/_encvideo.pas
and simply as `GOP` in other implementations."""
def row_to_frame(row: Iterable[str]) -> Mapping[str, str]:
frame = {}
for item in row:
if item == "frame":
continue
key, value = item.split("=", 1)
frame[key] = value
return frame
mpeg_file_object.seek(0)
cmd = [
FFPROBE,
"-hide_banner",
"-print_format", "compact",
"-show_frames", "-select_streams", "v",
"-",
]
gop = b""
frame_number = 0
try:
process = subprocess.Popen(
cmd,
stdin=mpeg_file_object,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError as os_err:
if os_err.errno == errno.ENOENT:
raise ExternalCommandNotFoundError(cmd)
raise os_err
for row in csv.reader(process.stdout, delimiter="|"):
if not row or row[0] != "frame":
continue
frame = row_to_frame(row)
if frame["pict_type"] == "I":
gop += struct.pack("<l", frame_number)
gop += struct.pack("<l", int(frame["pkt_pos"]))
frame_number += 1
process.wait()
if process.returncode != 0:
stderr = process.stderr.read()
raise ExternalCommandFailedError(process.returncode, process.args, stderr)
return gop
| 6,448
|
def add_path_arguments(parser) -> None:
"""Adds common presubmit check options to an argument parser."""
parser.add_argument(
'paths',
nargs='*',
type=Path,
help=(
'Paths to which to restrict the presubmit checks. '
'Directories are expanded with git ls-files. '
'If --base is provided, all paths are interpreted as Git paths.'))
parser.add_argument(
'-b',
'--base',
metavar='COMMIT',
help=('Git revision against which to diff for changed files. '
'If none is provided, the entire repository is used.'))
parser.add_argument(
'-e',
'--exclude',
metavar='REGULAR_EXPRESSION',
default=[],
action='append',
type=re.compile,
help='Exclude paths matching any of these regular expressions.')
| 6,449
|
def beam_search(model, test_data_src, beam_size, max_decoding_time_step):
""" Run beam search to construct hypotheses for a list of src-language sentences.
@param model : Model
@param test_data_src (List[List[str]]): List of sentences (words) in source language, from test set.
@param beam_size (int): beam_size (# of hypotheses to hold for a translation at every step)
@param max_decoding_time_step (int): maximum sentence length that Beam search can produce
@returns hypotheses (List[List[Hypothesis]]): List of Hypothesis translations for every source sentence.
"""
model.eval()
hypotheses = []
with torch.no_grad():
for src_sent in tqdm(test_data_src, desc='Decoding'):
example_hyps = model.beam_search(src_sent, beam_size=beam_size, max_decoding_time_step=max_decoding_time_step)
hypotheses.append(example_hyps)
return hypotheses
| 6,450
|
def main():
"""
Example of partial loading of a scene
Loads only some objects (by category) and in some room types
"""
logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
settings = MeshRendererSettings(enable_shadow=True, msaa=False)
if platform == "darwin":
settings.texture_scale = 0.5
s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
scene = InteractiveIndoorScene(
"Rs_int",
texture_randomization=False,
object_randomization=False,
load_object_categories=["swivel_chair"],
load_room_types=["living_room"],
)
s.import_scene(scene)
while True:
s.step()
| 6,451
|
def filter_months(c, months):
"""Filters the collection by matching its date-time index with the specified months."""
indices = find_all_in(get_months(c), get_months(months))
return take_at(c, indices)
| 6,452
|
def kmode_fisher(ks,mus,param_list,dPgg,dPgv,dPvv,fPgg,fPgv,fPvv,Ngg,Nvv, \
verbose=False):
"""
Fisher matrix for fields g(k,mu) and v(k,mu).
Returns F[g+v] and F[g]
dPgg, dPgv, dPvv are dictionaries of derivatives.
fPgg, fPgv, fPvv are fiducial powers.
"""
from orphics.stats import FisherMatrix
# Populate Fisher matrix
num_params = len(param_list)
param_combs = itertools.combinations_with_replacement(param_list,2)
Fisher = np.zeros((num_params,num_params))
FisherG = np.zeros((num_params,num_params))
for param1,param2 in param_combs:
i = param_list.index(param1)
j = param_list.index(param2)
if verbose: print("Calculating Fisher for ",param1,param2)
integral = 0.
integralG = 0.
dCov1 = np.array([[dPgg[param1],dPgv[param1]],
[dPgv[param1],dPvv[param1]]])
dCov2 = np.array([[dPgg[param2],dPgv[param2]],
[dPgv[param2],dPvv[param2]]])
Cov = np.array([[Pgg_fid+Ngg,Pgv_fid],
[Pgv_fid,Pvv_fid+Nvv]])
# Integrate over mu and k
for mu_id,mu in enumerate(mus[:-1]):
dmu = mus[mu_id+1]-mus[mu_id]
for k_id,k in enumerate(ks[:-1]):
dk = ks[k_id+1]-ks[k_id]
dC1 = dCov1[:,:,mu_id,k_id]
dC2 = dCov2[:,:,mu_id,k_id]
Cinv = np.linalg.inv(Cov[:,:,mu_id,k_id])
CinvG = 1./Cov[0,0,mu_id,k_id]
trace = np.trace(np.dot(np.dot(dC1,Cinv),np.dot(dC2,Cinv)))
traceG = dC1[0,0]*dC2[0,0]*CinvG**2.
pref = (k**2.)*dk*V/(2.*np.pi)**2./2.*dmu
integral += pref*trace
integralG += pref*traceG
Fisher[i,j] = integral
if j!=i: Fisher[j,i] = integral
FisherG[i,j] = integralG
if j!=i: FisherG[j,i] = integralG
return stats.FisherMatrix(Fisher,param_list), \
stats.FisherMatrix(FisherG,param_list)
| 6,453
|
def main(args):
"""
Downloads the necessary catalogues to perform the 1- and 2-halo
conformity analysis
"""
## Reading all elements and converting to python dictionary
param_dict = vars(args)
## ---- Adding to `param_dict` ----
param_dict = add_to_dict(param_dict)
## Checking for correct input
param_vals_test(param_dict)
# Creating instance of `ReadML` with the input parameters
param_dict['rs_args'] = RedSeq(**param_dict)
## Program message
Prog_msg = param_dict['Prog_msg']
## Creating folder directory
proj_dict = param_dict['rs_args'].proj_dict
proj_dict = directory_skeleton(param_dict, proj_dict)
## Downloading data
download_directory(param_dict, proj_dict)
# Cleaning up the data and saving to file
param_dict['rs_args'].extract_filtered_data(catl_kind='master',
return_pd=False, remove_file=param_dict['remove_files'])
| 6,454
|
def detect_device(model):
"""
Tries to determine the best-matching device for the given model
"""
model = model.lower()
# Try matching based on prefix, this is helpful to map e.g.
# FY2350H to FY2300
for device in wavedef.SUPPORTED_DEVICES:
if device[:4] == model[:4]:
return device
raise wavedef.UnsupportedDeviceError(
"Unable to autodetect device '%s'. "
"Use FYGen(device_name='fy2300') with one of the supported devices, "
"beware that the waveforms might not match up."
"Supported devices: %s"
% (
model,
', '.join(wavedef.SUPPORTED_DEVICES)
)
)
| 6,455
|
def re_identify_image_metadata(filename, image_names_pattern):
"""
Apply a regular expression to the *filename* and return metadata
:param filename:
:param image_names_pattern:
:return: a list with metadata derived from the image filename
"""
match = re.match(image_names_pattern, filename)
return None if match is None else match.groups()
| 6,456
|
def test_superoperator_to_kraus_fixed_values(superoperator, expected_kraus_operators):
"""Verifies that cirq.kraus_to_superoperator computes the correct channel matrix."""
actual_kraus_operators = cirq.superoperator_to_kraus(superoperator)
for i in (0, 1):
for j in (0, 1):
input_rho = np.zeros((2, 2))
input_rho[i, j] = 1
actual_rho = apply_kraus_operators(actual_kraus_operators, input_rho)
expected_rho = apply_kraus_operators(expected_kraus_operators, input_rho)
assert np.allclose(actual_rho, expected_rho)
| 6,457
|
def train():
"""given 20 players, run the tournament to make qlearner learn
with variable tournament parameters"""
players = setup_opponents()
#players = [axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random(), axl.Random()]
#players.append(axl.TitForTat())
for player in tqdm(players):
match = axl.Match([axl.RiskyQLearner(), player], prob_end = 0.001, p_A = random.choice([0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]))
match.play()
| 6,458
|
def load_config(config_file: str) -> dict:
"""
Function to load yaml configuration file
:param config_file: name of config file in directory
"""
try:
with open(config_file) as file:
config = yaml.safe_load(file)
except IOError as e:
print(e)
sys.exit(1)
return config
| 6,459
|
def LoginGoogleAccount(action_runner,
credential='googletest', # Recommended credential.
credentials_path=login_utils.DEFAULT_CREDENTIAL_PATH):
"""Logs in into Google account.
This function navigates the tab into Google's login page and logs in a user
using credentials in |credential| part of the |credentials_path| file.
Args:
action_runner: Action runner responsible for running actions on the page.
credential: The credential to retrieve from the credentials file
(type string).
credentials_path: The string that specifies the path to credential file.
NOTE: it's recommended to use 'googletest' credential from
page_sets/data/credentials.json credential since it is a Google test account
and will not trigger anti-bot verification. Other google credentials are kept
until all telemetry pages are updated to use the 'googletest' credential.
Raises:
exceptions.Error: See ExecuteJavaScript()
for a detailed list of possible exceptions.
"""
account_name, password = login_utils.GetAccountNameAndPassword(
credential, credentials_path=credentials_path)
action_runner.Navigate(
'https://accounts.google.com/ServiceLogin?continue='
'https%3A%2F%2Faccounts.google.com%2FManageAccount')
# Wait until either the email or password input is visible.
action_runner.WaitForJavaScriptCondition('{{ @a }} || {{ @b }}',
a=_EMAIL_INPUT_VISIBLE_CONDITION, b=_PASSWORD_INPUT_VISIBLE_CONDITION)
# If the email input is visible, this is the first Google login within the
# browser session, so we must enter both email and password. Otherwise, only
# password is required.
if action_runner.EvaluateJavaScript(_EMAIL_INPUT_VISIBLE_CONDITION):
login_utils.InputWithSelector(action_runner, account_name, _EMAIL_SELECTOR)
action_runner.ClickElement(selector=_EMAIL_NEXT_SELECTOR)
login_utils.InputWithSelector(action_runner, password, _PASSWORD_SELECTOR)
action_runner.ClickElement(selector=_SIGNIN_SELECTOR)
action_runner.WaitForElement(text='My Account')
| 6,460
|
async def test_application_state(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_device: RokuDevice,
mock_roku: MagicMock,
) -> None:
"""Test the creation and values of the Roku selects."""
entity_registry = er.async_get(hass)
entity_registry.async_get_or_create(
SELECT_DOMAIN,
DOMAIN,
"1GU48T017973_application",
suggested_object_id="my_roku_3_application",
disabled_by=None,
)
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("select.my_roku_3_application")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:application"
assert state.attributes.get(ATTR_OPTIONS) == [
"Home",
"Amazon Video on Demand",
"Free FrameChannel Service",
"MLB.TV" + "\u00AE",
"Mediafly",
"Netflix",
"Pandora",
"Pluto TV - It's Free TV",
"Roku Channel Store",
]
assert state.state == "Home"
entry = entity_registry.async_get("select.my_roku_3_application")
assert entry
assert entry.unique_id == "1GU48T017973_application"
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.my_roku_3_application",
ATTR_OPTION: "Netflix",
},
blocking=True,
)
assert mock_roku.launch.call_count == 1
mock_roku.launch.assert_called_with("12")
mock_device.app = mock_device.apps[1]
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("select.my_roku_3_application")
assert state
assert state.state == "Netflix"
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.my_roku_3_application",
ATTR_OPTION: "Home",
},
blocking=True,
)
assert mock_roku.remote.call_count == 1
mock_roku.remote.assert_called_with("home")
mock_device.app = Application(
app_id=None, name="Roku", version=None, screensaver=None
)
async_fire_time_changed(hass, dt_util.utcnow() + (SCAN_INTERVAL * 2))
await hass.async_block_till_done()
state = hass.states.get("select.my_roku_3_application")
assert state
assert state.state == "Home"
| 6,461
|
def remove(filepath_list):
"""移除中间文件"""
for path in filepath_list + [CONCAT_FILE]:
if os.path.exists(path):
os.remove(path)
| 6,462
|
async def admin_cmd_make_role_menu(message : discord.Message, args : str, isDM : bool):
"""Create a reaction role menu, allowing users to self-assign or remove roles by adding and removing reactions.
Each guild may have a maximum of cfg.maxRoleMenusPerGuild role menus active at any one time.
Option reactions must be either unicode, or custom to the server where the menu is being created.
args must contain a menu subject and new line, followed by a newline-separated list of emoji-option pairs,
where each pair is separated with a space.
For example: 'Number Roles\n0️⃣ @Role-1\n1️⃣ @Role-2\n2️⃣ @Role-3' will produce three options:
- Toggling the 0️⃣ reaction will toggle user ownership of @Role-1
- Toggling the 1️⃣ reaction will toggle user ownership of @Role-2
- Toggling the 2️⃣ reaction will toggle user ownership of @Role-3
Where the subject of the menu is 'Number Roles'.
The menu subject is optional. To not provide a subject, simply start args with a new line.
args may also optionally contain the following keyword arguments, given as argname=value
- target : A role or user to restrict participants by. Must be a user or role mention, not ID.
- days : The number of days that the menu should run for. Must be at least one, or unspecified.
- hours : The number of hours that the menu should run for. Must be at least one, or unspecified.
- minutes : The number of minutes that the menu should run for. Must be at least one, or unspecified.
- seconds : The number of seconds that the menu should run for. Must be at least one, or unspecified.
Reaction menus can be forced to run forever. To do this, specify ALL run time kwargs as 'off'.
TODO: Change options list formatting from comma separated to new line separated
TODO: Support target IDs
TODO: Implement single choice/grouped roles
TODO: Change non-expiring menu specification from all kwargs 'off' to a special kwarg 'on'
:param discord.Message message: the discord message calling the command
:param str args: A comma-separated list of space-separated emoji-option pairs, and optionally any kwargs
as specified in this function's docstring
:param bool isDM: Whether or not the command is being called from a DM channel
"""
requestedBBGuild = botState.guildsDB.getGuild(message.guild.id)
if requestedBBGuild.ownedRoleMenus >= cfg.maxRoleMenusPerGuild:
await message.reply(mention_author=False, content=":x: Guilds can have at most " + str(cfg.maxRoleMenusPerGuild) + " role menus!")
return
requestedBBGuild.ownedRoleMenus += 1
botRole = None
potentialRoles = []
for currRole in message.guild.me.roles:
if currRole.name == message.guild.me.name and currRole.managed:
potentialRoles.append(currRole)
if potentialRoles == []:
await message.reply(mention_author=False, content=":x: I can't find my '" + message.guild.me.name + "' role! Have you renamed it?")
return
botRole = potentialRoles[-1]
reactionRoles = {}
kwArgs = {}
argsSplit = args.split("\n")
if len(argsSplit) < 2:
await message.reply(mention_author=False, content=":x: Invalid arguments! Please provide your menu title, followed by a new line, " \
+ "then a new line-separated series of options.\nFor more info, see `" \
+ requestedBBGuild.commandPrefix + "admin-help`")
return
menuSubject = argsSplit[0]
argPos = 0
for arg in argsSplit[1:]:
if arg == "":
continue
argPos += 1
try:
roleStr, dumbReact = arg.strip(" ").split(" ")[1], lib.emojis.BasedEmoji.fromStr(arg.strip(" ").split(" ")[0])
except (ValueError, IndexError):
for kwArg in ["target=", "days=", "hours=", "seconds=", "minutes=", "multiplechoice="]:
if arg.lower().startswith(kwArg):
kwArgs[kwArg[:-1]] = arg[len(kwArg):]
break
# except lib.emojis.UnrecognisedCustomEmoji:
# await message.reply(mention_author=False, content=":x: I don't know your " + str(argPos) + lib.stringTyping.getNumExtension(argPos) \
# + " emoji!\nYou can only use built in emojis, or custom emojis " \
# + "that are in this server.")
# return
else:
if dumbReact.sendable == "None":
await message.reply(mention_author=False, content=":x: I don't know your " + str(argPos) + lib.stringTyping.getNumExtension(argPos) \
+ " emoji!\nYou can only use built in emojis, or custom emojis that " \
+ "are in this server.")
return
if dumbReact is None:
await message.reply(mention_author=False, content=":x: Invalid emoji: " + arg.strip(" ").split(" ")[1])
return
elif dumbReact.isID:
localEmoji = False
for localEmoji in message.guild.emojis:
if localEmoji.id == dumbReact.id:
localEmoji = True
break
if not localEmoji:
await message.reply(mention_author=False, content=":x: I don't know your " + str(argPos) \
+ lib.stringTyping.getNumExtension(argPos) + " emoji!\n" \
+ "You can only use built in emojis, or custom emojis " \
+ "that are in this server.")
return
if dumbReact in reactionRoles:
await message.reply(mention_author=False, content=":x: Cannot use the same emoji for two options!")
return
role = message.guild.get_role(int(roleStr.lstrip("<@&").rstrip(">")))
if role is None:
await message.reply(mention_author=False, content=":x: Unrecognised role: " + roleStr)
return
elif role.position > botRole.position:
await message.reply(mention_author=False, content=":x: I can't grant the **" + role.name + "** role!\nMake sure it's below my '" \
+ botRole.name + "' role in the server roles list.")
reactionRoles[dumbReact] = role
if len(reactionRoles) == 0:
await message.reply(mention_author=False, content=":x: No roles given!")
return
targetRole = None
targetMember = None
if "target" in kwArgs:
if lib.stringTyping.isRoleMention(kwArgs["target"]):
targetRole = message.guild.get_role(int(kwArgs["target"].lstrip("<@&").rstrip(">")))
if targetRole is None:
await message.reply(mention_author=False, content=":x: Unknown target role!")
return
elif lib.stringTyping.isMention(kwArgs["target"]):
targetMember = message.guild.get_member(int(kwArgs["target"].lstrip("<@!").rstrip(">")))
if targetMember is None:
await message.reply(mention_author=False, content=":x: Unknown target user!")
return
else:
await message.reply(mention_author=False, content=":x: Invalid target role/user!")
return
timeoutDict = {}
for timeName in ["days", "hours", "minutes", "seconds"]:
if timeName in kwArgs:
if kwArgs[timeName].lower() == "off":
timeoutDict[timeName] = -1
else:
if not lib.stringTyping.isInt(kwArgs[timeName]) or int(kwArgs[timeName]) < 1:
await message.reply(mention_author=False, content=":x: Invalid number of " + timeName + " before timeout!")
return
timeoutDict[timeName] = int(kwArgs[timeName])
timeoutExists = False
for timeName in timeoutDict:
if timeoutDict[timeName] != -1:
timeoutExists = True
timeoutExists = timeoutExists or timeoutDict == {}
menuMsg = await message.reply(mention_author=False, content="")
if timeoutExists:
timeoutDelta = timedelta(**cfg.timeouts.roleMenuExpiry if timeoutDict == {} else timeoutDict)
timeoutTT = timedTask.TimedTask(expiryDelta=timeoutDelta, expiryFunction=reactionRolePicker.markExpiredRoleMenu,
expiryFunctionArgs=menuMsg.id)
botState.reactionMenusTTDB.scheduleTask(timeoutTT)
else:
timeoutTT = None
menu = reactionRolePicker.ReactionRolePicker(menuMsg, reactionRoles, message.guild, targetRole=targetRole,
targetMember=targetMember, timeout=timeoutTT, titleTxt=menuSubject)
await menu.updateMessage()
botState.reactionMenusDB[menuMsg.id] = menu
| 6,463
|
def PSF_Moffat(alpha,beta,x,y):
""" Compute the PSF of the instrument with a Moffat function
Parameters
-----------
alpha: float
radial parameter
beta: float
power indice of the function
x: float
position along the x axis
y: float
position along the y axis
wavelength: float
effective wavelength of the filter in angstrom
Returns:
---------
psf: array
psf of the instrument
"""
psf = (beta-1.)/(np.pi*alpha*alpha) * (1.+(x*x+y*y)/(alpha*alpha))**(-beta)
return psf
| 6,464
|
def _tf_range_for_stmt(iter_,
extra_test,
body,
get_state,
set_state,
init_vars,
basic_symbol_names,
composite_symbol_names,
opts):
"""Overload of for_stmt that iterates over a TF range (and elides it)."""
_disallow_undefs_into_loop(*init_vars)
start, limit, delta = iter_.op.inputs
def while_body(iterate, *loop_vars):
new_vars = body(iterate, *loop_vars)
loop_vars = (iterate + delta,)
if new_vars:
loop_vars += new_vars
return loop_vars
def while_cond(iterate, *loop_vars):
"""Cond function for `tf.while_loop`."""
main_test = math_ops.logical_or(
math_ops.logical_and(delta >= 0, iterate < limit),
math_ops.logical_and(delta < 0, iterate > limit))
if extra_test is not None:
return control_flow_ops.cond(
main_test,
lambda: extra_test(*loop_vars),
lambda: False,
)
return main_test
opts['maximum_iterations'] = math_ops.cast(
misc.get_range_len(start, limit, delta), dtypes.int32)
results = _tf_while_stmt(
while_cond,
while_body,
get_state,
set_state,
(start,) + init_vars,
('<internal iterate>',) + basic_symbol_names,
composite_symbol_names,
opts,
)
# Note: the iteration index is not returned by the while loop, however
# if a symbol with the same name exists outside the loop, it will be captured
# by the loop variables and ultimately updated correctly.
if isinstance(results, (tuple, list)):
assert len(results) >= 1 # Has at least the iterate.
if len(results) > 1:
results = results[1:]
else:
results = ()
return results
| 6,465
|
def _import_class(module_and_class_name: str) -> type:
"""Import class from a module, e.g. 'text_recognizer.models.MLP'"""
module_name, class_name = module_and_class_name.rsplit(".", 1) # splits into 2 elements at "."
module = importlib.import_module(module_name)
class_ = getattr(module, class_name) # gives us model.class_name attribute (ex: jacques = Person(), jacques.age -> 28)
return class_
| 6,466
|
def mark_task(func):
"""Mark function as a defacto task (for documenting purpose)"""
func._is_task = True
return func
| 6,467
|
def dict_diff(left: Map, right: Map) -> t.List[t.Dict]:
"""Get the difference between 2 dict-like objects
Args:
left (Map): The left dict-like object
right (Map): The right dict-like object
The value returned is a list of dictionaries with keys ["path", "left", "right"]
which contain the query path and the differences between the left and right mapping.
If a key is missing in either mapping, it will be indicated as a "None".
`math.nan` (not-a-number) is used for default values in the comparison because of
the property: `math.nan != math.nan`. Simple None cannot be used, since it would
not handle keys that both have a value of None. In general, this function might
report false-positives for keys that contain the math.nan (or np.nan) value simply
due to this property. There is no workaround available.
"""
left_paths = set(get_valid_access_paths(left, _leaf_only=True, _use_lists=False))
right_paths = set(get_valid_access_paths(right, _leaf_only=True, _use_lists=False))
return list(
{
"path": path,
"left": getitem(left, path, math.nan),
"right": getitem(right, path, math.nan),
}
for path in left_paths.union(right_paths)
if getitem(left, path, math.nan) != getitem(right, path, math.nan)
)
| 6,468
|
def make_slack_message_divider() -> dict:
"""Generates a simple divider for a Slack message.
Returns:
The generated divider.
"""
return {'type': 'divider'}
| 6,469
|
def sandbox_multibranch(log_dir, request):
"""Multi-branch sandbox fixture. Parameterized by map of branches.
This fixture is identical to `sandbox` except that each node_id is
mapped to a pair (git revision, protocol version). For instance,
suppose a mapping:
MAP = { 0: ('zeronet', 'alpha'), 1:('mainnet', '004-Pt24m4xi'),
2: ('alphanet', '003-PsddFKi3' }
If we annotate the class test as follows.
@pytest.mark.parametrize('sandbox_multibranch', [MAP], indirect=True)
The executables (node, baker, endorser)
- for node_id 0 will be looked up in `TEZOS_BINARY/zeronet`,
- for node_id 1 will be looked up in `TEZOS_BINARY/mainnet` and so on...
baker and endorser will use the specified protocol version, according
to the tezos executables naming conventions.
"""
if paths.TEZOS_BINARIES is None:
pytest.skip()
branch_map = request.param
assert branch_map is not None
num_peers = max(branch_map) + 1
with SandboxMultiBranch(paths.TEZOS_BINARIES,
constants.IDENTITIES,
constants.GENESIS_PK,
num_peers=num_peers,
log_dir=log_dir,
branch_map=branch_map) as sandbox:
yield sandbox
# this assertion checks that deamons (baker, endorser, node...) didn't
# fail unexpected.
assert sandbox.are_daemons_alive()
| 6,470
|
def ensure_access(file):
"""Ensure we can access a directory and die with an error if we can't."""
if not can_access(file):
tty.die("Insufficient permissions for %s" % file)
| 6,471
|
def username():
""" Return username from env. """
return os.environ["USER"]
| 6,472
|
def get_history(filename: str, extension: int = 0) -> str:
"""
Returns the HISTOR header lines.
Args:
filename: image filename.
extension: image extension number.
Returns:
string containing all HISTORY lines.
"""
filename = azcam.utils.make_image_filename(filename)
hdr = pyfits.getheader(filename, extension)
history = hdr["HISTORY"]
return str(history)
| 6,473
|
def product_detail(request, product_id):
""" A view to show one product's details """
product = get_object_or_404(Product, pk=product_id)
review_form = ReviewForm()
reviews = Review.objects.filter(product_id=product_id).order_by('-created_at')
context = {
'product': product,
'review_form': review_form,
'reviews': reviews,
}
return render(request, 'products/product_detail.html', context)
| 6,474
|
def deserialize_response_content(response):
"""Convert utf-8 encoded string to a dict.
Since the response is encoded in utf-8, it gets decoded to regular python
string that will be a json string. That gets converted to python
dictionary.
Note: Do not use this method to process non-json response.content
:param requests.models.Response response: object that includes attributes
status code and content
:return: response content as decoded dictionary
:rtype: dict
"""
if response.content:
decoded = response.content.decode("utf-8")
if len(decoded) > 0:
return json.loads(decoded)
return {}
| 6,475
|
def _remove_header_create_bad_object(remove, client=None):
""" Create a new bucket, add an object without a header. This should cause a failure
"""
bucket_name = get_new_bucket()
if client == None:
client = get_client()
key_name = 'foo'
# remove custom headers before PutObject call
def remove_header(**kwargs):
if (remove in kwargs['params']['headers']):
del kwargs['params']['headers'][remove]
client.meta.events.register('before-call.s3.PutObject', remove_header)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
return e
| 6,476
|
def SpawnObjectsTab():
"""This function creates a layout containing the object spawning functionality.
Returns:
str : The reference to the layout.
"""
### Create main Layout for the tab
mainTab = cmds.columnLayout(adjustableColumn=True, columnAttach=('both', 20))
cmds.separator(height=10, style="none")
cmds.text(label="Asset Gallery:", align="left")
### Asset Name Text Field
cmds.separator(height=10, style="none")
SpawnObjectsTab.UserField = cmds.textFieldButtonGrp(placeholderText="Write Asset's Name", buttonLabel="Save Asset", buttonCommand=lambda: saveAsset(),
ann="Assign a name for the asset that will be used in the outliner and in the directory hierarchy.")
### Asset Gallery Layout
cmds.separator(height=10, style="none")
cmds.scrollLayout(childResizable=True, height=305, width=455, backgroundColor=(.2,.2,.2))
global objectScroll
objectScroll = cmds.gridLayout(cellWidthHeight=(150,150), autoGrow=True)
populateGallery() # Creates Icons
cmds.setParent(mainTab) # Exit scroll layout
### Choose between Arnold StandIn and Assembly Reference
cmds.separator(height=10, style="none")
global loadMethodRadio
cmds.rowLayout(numberOfColumns=3, adjustableColumn=3)
loadMethodRadio = cmds.radioCollection()
cmds.radioButton("standin", label="Load as Arnold StandIn", select=True,
ann="Arnold standIns bring materials. Render in Arnold to see them.") # Radio button for StandIn
cmds.separator(width=20, style="none")
cmds.radioButton("assembly", label="Load as Assembly Reference",
ann="Assembly references can change their representation mode.") # Radio button for Assembly
cmds.setParent(mainTab)
### Choose how to set the location of the object
cmds.separator(height=10)
cmds.text(label="Spawning method:", align="left")
cmds.separator(height=5, style="none")
cmds.rowLayout(numberOfColumns=4, adjustableColumn=4, columnAttach4=("both","both","both","both"), columnOffset4=(10,10,10,10))
global placingRadio
placingRadio = cmds.radioCollection()
# Create only one copy
cmds.radioButton("single", label="Single Object", select=True,
onCommand=lambda x: cmds.columnLayout(randomControlLayout, edit=True, enable=False),
offCommand=lambda x: cmds.columnLayout(randomControlLayout, edit=True, enable=True),
ann="Create one single object. MMC and drag to scene does the same.")
# Create copies along a curve
cmds.radioButton("curve", label="Along Curve",
ann="Spawn assets along a previously created curve")
# Create copies between a range in world space
cmds.radioButton("range", label="In Range",
onCommand=lambda x: cmds.columnLayout(rangeLayout, edit=True, visible=True),
offCommand=lambda x: cmds.columnLayout(rangeLayout, edit=True, visible=False),
ann="Creates objects in a defined range of coordinates.")
# Create copies on a mesh's surface
cmds.radioButton("mesh", label="On Mesh",
ann="Locate assets on the surface of a selected mesh.")
cmds.setParent(mainTab)
### Randomization parameters
cmds.separator(height=10, style="none")
randomControlLayout = cmds.columnLayout(enable=False)
# How many copies
SpawnObjectsTab.BuildingAmount = cmds.intSliderGrp(label="Building Number", field=True, value=10, min=2, max=50, fieldMaxValue=200)
# Deviation from original rotation
SpawnObjectsTab.RandomRotation = cmds.floatSliderGrp(label="Random Rotation", field=True, value=15, min=0, max=360)
# Deviation from orignal scale
SpawnObjectsTab.RandomScale = cmds.floatSliderGrp(label="Random Scale", field=True, value=0, min=0, max=10)
cmds.setParent(mainTab)
### Range spawning parameters
rangeLayout = cmds.columnLayout(visible=False)
# Min x, y and z coordinates
SpawnObjectsTab.MinimumField = cmds.floatFieldGrp(label="Minimum Range: ", numberOfFields=3)
# Max x, y and z coordinates
SpawnObjectsTab.MaximumField = cmds.floatFieldGrp(label="Maximum Range: ", numberOfFields=3)
cmds.setParent(mainTab)
### Finalize
cmds.separator(height=10, style="none")
cmds.button(label='Load Selected Objects', command=lambda x: choosePlacement(x))
cmds.setParent('..') # Exit column layout
return mainTab
| 6,477
|
def train_val_test_split(relevant_data: List[str], seed: int = 42) -> Tuple[List[str], List[str], List[str]]:
"""Splits a list in seperate train, validate and test datasets.
TODO: add params for train / val / test sizes
:param relevant_data: The list to be divided, generaly a list of filenames.
:dtype relevant_data: List[str]
"""
relevant_data = sorted(relevant_data) # Ensures the input to the split is always the same
train, rest = train_test_split(relevant_data, test_size=0.3, shuffle=True, random_state=seed) # 70% to train
val, test = train_test_split(rest, test_size=0.5, shuffle=True, random_state=seed) # Divide the remaining 30% equally over val and test
return train, val, test
| 6,478
|
def create_boxplots_ratio_3(arr1, arr2, arr3, labels, m, n, lambda_max, title,
ticks, no_vars, range_1, range_2, region,
function_type, func_evals):
"""Create boxplots."""
plt.figure(figsize=(5, 5))
plt.ylim(range_1, range_2)
bpl = plt.boxplot(arr1.T,
positions=np.array(range(len(arr1)))*3.0-0.6)
bpc = plt.boxplot(arr2.T,
positions=np.array(range(len(arr1)))*3.0)
bpr = plt.boxplot(arr3.T,
positions=np.array(range(len(arr2)))*3.0+0.6)
set_box_color(bpl, 'green')
set_box_color(bpc, 'navy')
set_box_color(bpr, 'purple')
plt.plot([], c='green', label=labels[0])
plt.plot([], c='navy', label=labels[1])
plt.plot([], c='purple', label=labels[2])
plt.xlabel(r'SNR', size=14)
plt.xticks(np.arange(0, len(ticks) * 3, 3), ticks, size=15)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig('%s_ratio_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s.png' %
(title, m, n, lambda_max,
no_vars, region, function_type, func_evals))
| 6,479
|
def getShdomDirections(Y_shdom, X_shdom, fov=math.pi/2):
"""Calculate the (SHDOM) direction of each pixel.
Directions are calculated in SHDOM convention where the direction is
of the photons.
"""
PHI_shdom = np.pi + np.arctan2(Y_shdom, X_shdom)
PSI_shdom = -np.pi + fov * np.sqrt(X_shdom**2 + Y_shdom**2)
return PHI_shdom, PSI_shdom
| 6,480
|
def twos_comp(val, bits):
"""returns the 2's complement of int value val with n bits
- https://stackoverflow.com/questions/1604464/twos-complement-in-python"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val & ((2 ** bits) - 1) # return positive value as is
| 6,481
|
def get_batch(source, i, cnf):
"""
Gets a batch shifted over by shift length
"""
seq_len = min(cnf.batch_size, len(source) - cnf.forecast_window - i)
data = source[i : i + seq_len]
target = source[
i + cnf.forecast_window : i + cnf.forecast_window + seq_len
].reshape(-1)
return data, target
| 6,482
|
def test_list_language_min_length_2_nistxml_sv_iv_list_language_min_length_3_1(mode, save_output, output_format):
"""
Type list/language is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/language/Schema+Instance/NISTSchema-SV-IV-list-language-minLength-3.xsd",
instance="nistData/list/language/Schema+Instance/NISTXML-SV-IV-list-language-minLength-3-1.xml",
class_name="NistschemaSvIvListLanguageMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 6,483
|
def verify_df(df, constraints_path, epsilon=None, type_checking=None,
repair=True, report='all', **kwargs):
"""
Verify that (i.e. check whether) the Pandas DataFrame provided
satisfies the constraints in the JSON ``.tdda`` file provided.
Mandatory Inputs:
*df*:
A Pandas DataFrame, to be checked.
*constraints_path*:
The path to a JSON ``.tdda`` file (possibly
generated by the discover_df function, below)
containing constraints to be checked.
Or, alternatively, an in-memory dictionary
containing the structured contents of a ``.tdda``
file.
Optional Inputs:
*epsilon*:
When checking minimum and maximum values
for numeric fields, this provides a
tolerance. The tolerance is a proportion
of the constraint value by which the
constraint can be exceeded without causing
a constraint violation to be issued.
For example, with epsilon set to 0.01 (i.e. 1%),
values can be up to 1% larger than a max constraint
without generating constraint failure,
and minimum values can be up to 1% smaller
that the minimum constraint value without
generating a constraint failure. (These
are modified, as appropriate, for negative
values.)
If not specified, an *epsilon* of 0 is used,
so there is no tolerance.
NOTE: A consequence of the fact that these
are proportionate is that min/max values
of zero do not have any tolerance, i.e.
the wrong sign always generates a failure.
*type_checking*:
``strict`` or ``sloppy``.
Because Pandas silently, routinely and
automatically "promotes" integer and boolean
columns to reals and objects respectively
if they contain nulls, strict type checking
can be problematical in Pandas. For this reason,
``type_checking`` defaults to ``sloppy``, meaning
that type changes that could plausibly be
attributed to Pandas type promotion will not
generate constraint values.
If this is set to strict, a Pandas ``float``
column ``c`` will only be allowed to satisfy a
an ``int`` type constraint if::
c.dropnulls().astype(int) == c.dropnulls()
Similarly, Object fields will satisfy a
``bool`` constraint only if::
c.dropnulls().astype(bool) == c.dropnulls()
*repair*:
A boolean to specify whether to try to use the
information in the constraints to attempt to
repair potentially-incorrect type inferrences
made when constructing the dataframe. When the
dataframe has been loaded from a .csv file, this
can often be useful (but should not be used with
dataframes that have come from a more reliable
source).
*report*:
``all`` or ``fields``.
This controls the behaviour of the
:py:meth:`~tdda.constraints.pd.constraints.PandasVerification.__str__` method on
the resulting :py:class:`~tdda.constraints.pd.constraints.PandasVerification`
object (but not its content).
The default is ``all``, which means that
all fields are shown, together with the
verification status of each constraint
for that field.
If report is set to ``fields``, only fields for
which at least one constraint failed are shown.
Returns:
:py:class:`~tdda.constraints.pd.constraints.PandasVerification` object.
This object has attributes:
- *passes* --- Number of passing constriants
- *failures* --- Number of failing constraints
It also has a :py:meth:`~tdda.constraints.pd.constraints.PandasVerification.to_frame()` method for
converting the results of the verification to a Pandas DataFrame,
and a :py:meth:`~tdda.constraints.pd.constraints.PandasVerification.__str__` method to print
both the detailed and summary results of the verification.
Example usage::
import pandas as pd
from tdda.constraints import verify_df
df = pd.DataFrame({'a': [0, 1, 2, 10, np.NaN],
'b': ['one', 'one', 'two', 'three', np.NaN]})
v = verify_df(df, 'example_constraints.tdda')
print('Constraints passing: %d\\n' % v.passes)
print('Constraints failing: %d\\n' % v.failures)
print(str(v))
print(v.to_frame())
See *simple_verification.py* in the :ref:`constraint_examples`
for a slightly fuller example.
"""
pdv = PandasConstraintVerifier(df, epsilon=epsilon,
type_checking=type_checking)
if isinstance(constraints_path, dict):
constraints = DatasetConstraints()
constraints.initialize_from_dict(native_definite(constraints_path))
else:
constraints = DatasetConstraints(loadpath=constraints_path)
if repair:
pdv.repair_field_types(constraints)
return pdv.verify(constraints,
VerificationClass=PandasVerification,
report=report, **kwargs)
| 6,484
|
def logical_and(image1, image2):
"""Logical AND between two videos. At least one of the videos must have
mode "1".
.. code-block:: python
out = ((image1 and image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
| 6,485
|
def crusader_action(party_sorted_by_rank, hero, raid_info, enemy_formation):
"""[ Ideal skill load-out: smite, stunning blow, holy lance, inspiring cry ]"""
global UpdatedPartyOrder
party = party_sorted_by_rank
list_of_attacks = ['smite', 'holy_lance']
stall_count = raid_info['battle']['round_stall_count']
stall_accelerated = raid_info['battle']['previous_stall_accelerated']
stall = False if stall_count >= 2 or stall_accelerated else True
attack, stun_chance, target = None, None, None
swap_distance = 1
if 'stunning_blow' in hero.skills:
stun_level = hero.skills['stunning_blow']
stun_chance = AttackSkills['stunning_blow'][2][stun_level]
enemies_not_dead_already = [enemy for enemy in enemy_formation if not enemy.alreadyGoingToDie]
enemy = enemies_not_dead_already[0] if len(enemies_not_dead_already) > 0 else None
# stall if only one weak enemy left and need to heal or stress heal
if (len(enemies_not_dead_already) == 0
or (len(enemies_not_dead_already) == 1
and (enemies_not_dead_already[0].threat < 4 or enemies_not_dead_already[0].stunned) and stall)) \
and ((any(ally.percentHp < 80 for ally in party) and any(ally.healer for ally in party))
or (any(ally.stress > 0 for ally in party) and any(ally.stressHealer for ally in party))):
if len(enemies_not_dead_already) == 1 \
and not (enemy.name == 'Bone Arbalist' and (3 in enemy.rank or 4 in enemy.rank)):
enemy = enemies_not_dead_already[0]
can_stun = stun_chance - enemy.stunResist >= 55
if 'inspiring_cry' in hero.skills \
and (any(ally.effectiveHp == 0 for ally in party)
or (enemy.threat <= 2 or enemy.stunned or (enemy.threat < 4 and enemy.canBeKilledIn1Hit)
and any(ally.stress > 0 for ally in party))):
attack = 'inspiring_cry'
elif (1 == hero.rank or 2 == hero.rank) and (2 in enemy.rank or 1 in enemy.rank) \
and not enemy.stunned and can_stun and not enemy.canBeKilledIn1Hit:
attack = 'stunning_blow'
target = enemy
else:
attack = 'swap' if hero.rank != 1 or (hero.rank == 1 and party[1].heroClass not in BackLineClasses) \
else None
swap_distance = -1 if hero.rank == 1 and party[1].heroClass not in BackLineClasses else 1
# stress heal if main threat is dealt with, or heal if ally is on deaths door or can stop from reaching deaths door
if attack is None and stall and 'inspiring_cry' in hero.skills \
and (any(ally.effectiveHp == 0 for ally in party)
or (len(enemies_not_dead_already) < 3 and any(ally.stress > 0 for ally in party)
and not any(enemy.threat > 2 or (enemy.threat > 3 and not enemy.stunned)
for enemy in enemies_not_dead_already))):
attack = 'inspiring_cry'
# stun enemy if can't kill
if attack is None and (hero.rank == 1 or hero.rank == 2) and 'stunning_blow' in hero.skills:
if any(stun_chance - enemy.stunResist >= 55 and not enemy.stunned
and not enemy.canBeKilledIn1Hit and (1 in enemy.rank or 2 in enemy.rank)
for enemy in enemies_not_dead_already):
attack = 'stunning_blow'
elif attack is None or (hero.rank == 3 or hero.rank == 4):
# holy lance if rank 3 or 4, and not front line on next rank, and enemy on rank 2
if any(2 in enemy.rank or 3 in enemy.rank or 4 in enemy.rank for enemy in enemies_not_dead_already) \
and 'holy_lance' in hero.skills and party[hero.rank-2].heroClass not in FrontLineClasses:
attack = 'holy_lance'
elif hero.rank == 3 and party[1] in BackLineClasses \
or (not any(ally.stress > 0 for ally in party)
and party[hero.rank-2].heroClass not in FrontLineClasses):
attack = 'swap'
elif 'inspiring_cry' in hero.skills:
attack = 'inspiring_cry'
else:
attack = 'swap'
if attack == 'swap':
swap_hero(hero, swap_distance, UpdatedPartyOrder, debug=Debug)
elif attack == 'stunning_blow':
find_target_and_stun(hero, enemies_not_dead_already, attack, stun_chance, UpdatedPartyOrder, target)
elif attack == 'inspiring_cry':
target = next((ally for ally in party if ally.currentHp == 0), None)
if target is None:
target = next((ally for ally in party if ally.effectiveHp == 0), None)
if target is None:
party.sort(key=lambda k: k.stress, reverse=True)
target = party[0]
heal_target(hero, target, attack, debug=Debug)
else:
# Find target and attack enemy
if attack is not None:
list_of_attacks.insert(0, attack)
find_target_and_attack(raid_info, enemy_formation, hero, party, list_of_attacks, UpdatedPartyOrder)
| 6,486
|
def predict(endpoint_id: str, instance: object) -> object:
"""Send a prediction request to a uCAIP model endpoint
Args:
endpoint_id (str): ID of the uCAIP endpoint
instance (object): The prediction instance, should match the input format that the endpoint expects
Returns:
object: Prediction results from the model
"""
return UCAIPService.get().predict_tables(endpoint_id, instance)
| 6,487
|
def positionNoFlag(PosName, coords):
"""This function writes the postion with no flags, on the format P1=(X,Y,Z,A,B,C)"""
definePosition(PosName, coords, True, [0,0])
| 6,488
|
async def resolve_address(ipaddr, *args, **kwargs):
"""Use a resolver to run a reverse query for PTR records.
See ``dns.asyncresolver.Resolver.resolve_address`` for more
information on the parameters.
"""
return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs)
| 6,489
|
def _stringify(item):
"""
Private funtion which wraps all items in quotes to protect from paths
being broken up. It will also unpack lists into strings
:param item: Item to stringify.
:return: string
"""
if isinstance(item, (list, tuple)):
return '"' + '" "'.join(item) + '"'
if isinstance(item, str) and len(item) == 0:
return None
return '"%s"' % item
| 6,490
|
def _get_tree_filter(attrs, vecvars):
"""
Pull attributes and input/output vector variables out of a tree System.
Parameters
----------
attrs : list of str
Names of attributes (may contain dots).
vecvars : list of str
Names of variables contained in the input or output vectors.
Returns
-------
function
A function that takes a System and returns a list of name value pairs.
"""
def _finder(system):
found = []
for attr in attrs:
parts = attr.split('.') # allow attrs with dots
try:
obj = system
for p in parts:
obj = getattr(obj, p)
found.append((attr, obj))
except AttributeError:
pass
for var in vecvars:
if var in system._outputs:
found.append((var, system._outputs[var]))
elif var in system._inputs:
found.append((var, system._inputs[var]))
return found
return _finder
| 6,491
|
def db(sql, action):
"""Create or Drop tables from a database"""
db = SQL(sql)
settings.SQL = sql
# auth_mod = importlib.import_module("labfunctions.auth.models")
wf_mod = importlib.import_module("labfunctions.models")
if action == "create":
db.create_all()
click.echo("Created...")
elif action == "drop":
db.drop_all()
click.echo("Droped...")
elif action == "upgrade":
alembic_ugprade(sql)
else:
click.echo("Wrong param...")
| 6,492
|
def gradient2(Y,x,sum_p):
"""
Description
-----------
Used to calculate the gradients of the beta values (excluding the first).
Parameters
----------
Y: label (0 or 1)
x: flux value
sum_p: sum of all beta values (see 'param_sum' function)
Returns
-------
num/denom: gradient value
"""
if Y == 1:
num = -x * np.exp(-sum_p)
denom = 1 + np.exp(-sum_p)
elif Y == 0:
num = x
denom = 1 + np.exp(-sum_p)
return num/denom
| 6,493
|
def test_dramatiq_collector_returns_queues_sizes(dramatiq_queue):
"""
DramatiqCollector returns a correct list of queues sizes.
GIVEN: There is a Dramatiq queue in Redis.
WHEN: _get_queues_sizes method is called with a list of valid queues names.
THEN: A list of tuples with correct queues names and theirs sizes is returned.
"""
queues_sizes = DramatiqCollector._get_queues_sizes([b"dramatiq:fake.msgs"])
assert queues_sizes == [("dramatiq:fake.msgs", 2)]
| 6,494
|
def test_dataset(args, valid_loader):
"""
return the required model depending on the arguments:
"""
test_data = next(iter(valid_loader))
print("Input dimension:")
if args.model_type == "diagnosis":
print(test_data["img"][args.perspectives[0]].shape)
print(torch.min(test_data["img"][args.perspectives[0]]))
print(torch.max(test_data["img"][args.perspectives[0]]))
else:
print(test_data["img"].shape)
print(torch.min(test_data["img"]))
print(torch.max(test_data["img"]))
| 6,495
|
def takeBlock2(aList, row_list, col_list):
"""
Take sublist given from rows specified by row_list and column specified by col_list from
a doublely iterated list.
The convention for the index of the rows and columns are the same as in slicing.
"""
result = []
for row in row_list:
result.append(map(lambda column: aList[row][column], col_list))
return result
| 6,496
|
def err_callback(signum: signal.Signals, frame: Any) -> None:
"""Callback that raises Timeout.ContextTimeout"""
raise ContextTimeout()
| 6,497
|
def param(key, desired_type=None):
"""Return a decorator to parse a JSON request value."""
def decorator(view_func):
"""The actual decorator"""
@wraps(view_func)
def inner(*args, **kwargs):
data = request.get_json() # May raise a 400
try:
value = data[key]
except (KeyError, TypeError):
abort(400, "Missing JSON value '{0}'.".format(key))
if desired_type and not isinstance(value, desired_type):
# For the error message
if desired_type == text_type:
type_name = 'string'
else:
type_name = desired_type.__name__
abort(400, ("Expected '{0}' to be type {1}."
.format(key, type_name)))
# Success, pass through to view function
kwargs[key] = value
return view_func(*args, **kwargs)
return inner
return decorator
| 6,498
|
def make_client(instance):
"""Returns a client to the ClientManager."""
tacker_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating tacker client: %s', tacker_client)
kwargs = {'service_type': 'nfv-orchestration',
'region_name': instance._region_name,
'endpoint_type': instance._interface,
'interface': instance._interface,
'session': instance.session
}
client = tacker_client(**kwargs)
return client
| 6,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.